19c92ab61SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f7e7b48eSJacob Chen /*
3f7e7b48eSJacob Chen  * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
4f7e7b48eSJacob Chen  * Author: Jacob Chen <jacob-chen@iotwrt.com>
5f7e7b48eSJacob Chen  */
6f7e7b48eSJacob Chen 
7f7e7b48eSJacob Chen #include <linux/pm_runtime.h>
877f2e2b2SMichael Tretter #include <linux/scatterlist.h>
9f7e7b48eSJacob Chen 
10*a61ff67fSMichael Tretter #include <media/v4l2-common.h>
11f7e7b48eSJacob Chen #include <media/v4l2-device.h>
12f7e7b48eSJacob Chen #include <media/v4l2-ioctl.h>
13f7e7b48eSJacob Chen #include <media/v4l2-mem2mem.h>
14f7e7b48eSJacob Chen #include <media/videobuf2-dma-sg.h>
15f7e7b48eSJacob Chen #include <media/videobuf2-v4l2.h>
16f7e7b48eSJacob Chen 
17f7e7b48eSJacob Chen #include "rga-hw.h"
18f7e7b48eSJacob Chen #include "rga.h"
19f7e7b48eSJacob Chen 
fill_descriptors(struct rga_dma_desc * desc,size_t max_desc,struct sg_table * sgt)20*a61ff67fSMichael Tretter static ssize_t fill_descriptors(struct rga_dma_desc *desc, size_t max_desc,
21*a61ff67fSMichael Tretter 				struct sg_table *sgt)
2277f2e2b2SMichael Tretter {
2377f2e2b2SMichael Tretter 	struct sg_dma_page_iter iter;
2477f2e2b2SMichael Tretter 	struct rga_dma_desc *tmp = desc;
2577f2e2b2SMichael Tretter 	size_t n_desc = 0;
2677f2e2b2SMichael Tretter 	dma_addr_t addr;
2777f2e2b2SMichael Tretter 
2877f2e2b2SMichael Tretter 	for_each_sgtable_dma_page(sgt, &iter, 0) {
29*a61ff67fSMichael Tretter 		if (n_desc > max_desc)
30*a61ff67fSMichael Tretter 			return -EINVAL;
3177f2e2b2SMichael Tretter 		addr = sg_page_iter_dma_address(&iter);
3277f2e2b2SMichael Tretter 		tmp->addr = lower_32_bits(addr);
3377f2e2b2SMichael Tretter 		tmp++;
3477f2e2b2SMichael Tretter 		n_desc++;
3577f2e2b2SMichael Tretter 	}
3677f2e2b2SMichael Tretter 
3777f2e2b2SMichael Tretter 	return n_desc;
3877f2e2b2SMichael Tretter }
3977f2e2b2SMichael Tretter 
40f7e7b48eSJacob Chen static int
rga_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])41f7e7b48eSJacob Chen rga_queue_setup(struct vb2_queue *vq,
42f7e7b48eSJacob Chen 		unsigned int *nbuffers, unsigned int *nplanes,
43f7e7b48eSJacob Chen 		unsigned int sizes[], struct device *alloc_devs[])
44f7e7b48eSJacob Chen {
45f7e7b48eSJacob Chen 	struct rga_ctx *ctx = vb2_get_drv_priv(vq);
46f7e7b48eSJacob Chen 	struct rga_frame *f = rga_get_frame(ctx, vq->type);
47*a61ff67fSMichael Tretter 	const struct v4l2_pix_format_mplane *pix_fmt;
48*a61ff67fSMichael Tretter 	int i;
49f7e7b48eSJacob Chen 
50f7e7b48eSJacob Chen 	if (IS_ERR(f))
51f7e7b48eSJacob Chen 		return PTR_ERR(f);
52f7e7b48eSJacob Chen 
53*a61ff67fSMichael Tretter 	pix_fmt = &f->pix;
54f7e7b48eSJacob Chen 
55*a61ff67fSMichael Tretter 	if (*nplanes) {
56*a61ff67fSMichael Tretter 		if (*nplanes != pix_fmt->num_planes)
57*a61ff67fSMichael Tretter 			return -EINVAL;
58*a61ff67fSMichael Tretter 
59*a61ff67fSMichael Tretter 		for (i = 0; i < pix_fmt->num_planes; i++)
60*a61ff67fSMichael Tretter 			if (sizes[i] < pix_fmt->plane_fmt[i].sizeimage)
61*a61ff67fSMichael Tretter 				return -EINVAL;
62*a61ff67fSMichael Tretter 
63*a61ff67fSMichael Tretter 		return 0;
64*a61ff67fSMichael Tretter 	}
65*a61ff67fSMichael Tretter 
66*a61ff67fSMichael Tretter 	*nplanes = pix_fmt->num_planes;
67*a61ff67fSMichael Tretter 
68*a61ff67fSMichael Tretter 	for (i = 0; i < pix_fmt->num_planes; i++)
69*a61ff67fSMichael Tretter 		sizes[i] = pix_fmt->plane_fmt[i].sizeimage;
70f7e7b48eSJacob Chen 
71f7e7b48eSJacob Chen 	return 0;
72f7e7b48eSJacob Chen }
73f7e7b48eSJacob Chen 
rga_buf_init(struct vb2_buffer * vb)746040702aSMichael Tretter static int rga_buf_init(struct vb2_buffer *vb)
756040702aSMichael Tretter {
766040702aSMichael Tretter 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
776040702aSMichael Tretter 	struct rga_vb_buffer *rbuf = vb_to_rga(vbuf);
786040702aSMichael Tretter 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
796040702aSMichael Tretter 	struct rockchip_rga *rga = ctx->rga;
806040702aSMichael Tretter 	struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
816040702aSMichael Tretter 	size_t n_desc = 0;
826040702aSMichael Tretter 
836040702aSMichael Tretter 	n_desc = DIV_ROUND_UP(f->size, PAGE_SIZE);
846040702aSMichael Tretter 
856040702aSMichael Tretter 	rbuf->n_desc = n_desc;
866040702aSMichael Tretter 	rbuf->dma_desc = dma_alloc_coherent(rga->dev,
876040702aSMichael Tretter 					    rbuf->n_desc * sizeof(*rbuf->dma_desc),
886040702aSMichael Tretter 					    &rbuf->dma_desc_pa, GFP_KERNEL);
896040702aSMichael Tretter 	if (!rbuf->dma_desc)
906040702aSMichael Tretter 		return -ENOMEM;
916040702aSMichael Tretter 
926040702aSMichael Tretter 	return 0;
936040702aSMichael Tretter }
946040702aSMichael Tretter 
get_plane_offset(struct rga_frame * f,int plane)9525783e2aSMichael Tretter static int get_plane_offset(struct rga_frame *f, int plane)
9625783e2aSMichael Tretter {
9725783e2aSMichael Tretter 	if (plane == 0)
9825783e2aSMichael Tretter 		return 0;
9925783e2aSMichael Tretter 	if (plane == 1)
10025783e2aSMichael Tretter 		return f->width * f->height;
10125783e2aSMichael Tretter 	if (plane == 2)
10225783e2aSMichael Tretter 		return f->width * f->height + (f->width * f->height / f->fmt->uv_factor);
10325783e2aSMichael Tretter 
10425783e2aSMichael Tretter 	return -EINVAL;
10525783e2aSMichael Tretter }
10625783e2aSMichael Tretter 
rga_buf_prepare(struct vb2_buffer * vb)107f7e7b48eSJacob Chen static int rga_buf_prepare(struct vb2_buffer *vb)
108f7e7b48eSJacob Chen {
1096040702aSMichael Tretter 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1106040702aSMichael Tretter 	struct rga_vb_buffer *rbuf = vb_to_rga(vbuf);
111f7e7b48eSJacob Chen 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
112f7e7b48eSJacob Chen 	struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
113*a61ff67fSMichael Tretter 	ssize_t n_desc = 0;
114*a61ff67fSMichael Tretter 	size_t curr_desc = 0;
115*a61ff67fSMichael Tretter 	int i;
116*a61ff67fSMichael Tretter 	const struct v4l2_format_info *info;
117*a61ff67fSMichael Tretter 	unsigned int offsets[VIDEO_MAX_PLANES];
118f7e7b48eSJacob Chen 
119f7e7b48eSJacob Chen 	if (IS_ERR(f))
120f7e7b48eSJacob Chen 		return PTR_ERR(f);
121f7e7b48eSJacob Chen 
122*a61ff67fSMichael Tretter 	for (i = 0; i < vb->num_planes; i++) {
123*a61ff67fSMichael Tretter 		vb2_set_plane_payload(vb, i, f->pix.plane_fmt[i].sizeimage);
124f7e7b48eSJacob Chen 
1256040702aSMichael Tretter 		/* Create local MMU table for RGA */
126*a61ff67fSMichael Tretter 		n_desc = fill_descriptors(&rbuf->dma_desc[curr_desc],
127*a61ff67fSMichael Tretter 					  rbuf->n_desc - curr_desc,
128*a61ff67fSMichael Tretter 					  vb2_dma_sg_plane_desc(vb, i));
129*a61ff67fSMichael Tretter 		if (n_desc < 0) {
130*a61ff67fSMichael Tretter 			v4l2_err(&ctx->rga->v4l2_dev,
131*a61ff67fSMichael Tretter 				 "Failed to map video buffer to RGA\n");
132*a61ff67fSMichael Tretter 			return n_desc;
133*a61ff67fSMichael Tretter 		}
134*a61ff67fSMichael Tretter 		offsets[i] = curr_desc << PAGE_SHIFT;
135*a61ff67fSMichael Tretter 		curr_desc += n_desc;
136*a61ff67fSMichael Tretter 	}
1376040702aSMichael Tretter 
138*a61ff67fSMichael Tretter 	/* Fill the remaining planes */
139*a61ff67fSMichael Tretter 	info = v4l2_format_info(f->fmt->fourcc);
140*a61ff67fSMichael Tretter 	for (i = info->mem_planes; i < info->comp_planes; i++)
141*a61ff67fSMichael Tretter 		offsets[i] = get_plane_offset(f, i);
142*a61ff67fSMichael Tretter 
143*a61ff67fSMichael Tretter 	rbuf->offset.y_off = offsets[0];
144*a61ff67fSMichael Tretter 	rbuf->offset.u_off = offsets[1];
145*a61ff67fSMichael Tretter 	rbuf->offset.v_off = offsets[2];
14625783e2aSMichael Tretter 
147f7e7b48eSJacob Chen 	return 0;
148f7e7b48eSJacob Chen }
149f7e7b48eSJacob Chen 
rga_buf_queue(struct vb2_buffer * vb)150f7e7b48eSJacob Chen static void rga_buf_queue(struct vb2_buffer *vb)
151f7e7b48eSJacob Chen {
152f7e7b48eSJacob Chen 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
153f7e7b48eSJacob Chen 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
154f7e7b48eSJacob Chen 
155f7e7b48eSJacob Chen 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
156f7e7b48eSJacob Chen }
157f7e7b48eSJacob Chen 
rga_buf_cleanup(struct vb2_buffer * vb)1586040702aSMichael Tretter static void rga_buf_cleanup(struct vb2_buffer *vb)
1596040702aSMichael Tretter {
1606040702aSMichael Tretter 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1616040702aSMichael Tretter 	struct rga_vb_buffer *rbuf = vb_to_rga(vbuf);
1626040702aSMichael Tretter 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1636040702aSMichael Tretter 	struct rockchip_rga *rga = ctx->rga;
1646040702aSMichael Tretter 
1656040702aSMichael Tretter 	dma_free_coherent(rga->dev, rbuf->n_desc * sizeof(*rbuf->dma_desc),
1666040702aSMichael Tretter 			  rbuf->dma_desc, rbuf->dma_desc_pa);
1676040702aSMichael Tretter }
1686040702aSMichael Tretter 
rga_buf_return_buffers(struct vb2_queue * q,enum vb2_buffer_state state)1699aecc035SEzequiel Garcia static void rga_buf_return_buffers(struct vb2_queue *q,
1709aecc035SEzequiel Garcia 				   enum vb2_buffer_state state)
171f7e7b48eSJacob Chen {
172f7e7b48eSJacob Chen 	struct rga_ctx *ctx = vb2_get_drv_priv(q);
173f7e7b48eSJacob Chen 	struct vb2_v4l2_buffer *vbuf;
174f7e7b48eSJacob Chen 
175f7e7b48eSJacob Chen 	for (;;) {
176f7e7b48eSJacob Chen 		if (V4L2_TYPE_IS_OUTPUT(q->type))
177f7e7b48eSJacob Chen 			vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
178f7e7b48eSJacob Chen 		else
179f7e7b48eSJacob Chen 			vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
180f7e7b48eSJacob Chen 		if (!vbuf)
181f7e7b48eSJacob Chen 			break;
1829aecc035SEzequiel Garcia 		v4l2_m2m_buf_done(vbuf, state);
1839aecc035SEzequiel Garcia 	}
184f7e7b48eSJacob Chen }
185f7e7b48eSJacob Chen 
rga_buf_start_streaming(struct vb2_queue * q,unsigned int count)1869aecc035SEzequiel Garcia static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
1879aecc035SEzequiel Garcia {
1889aecc035SEzequiel Garcia 	struct rga_ctx *ctx = vb2_get_drv_priv(q);
1899aecc035SEzequiel Garcia 	struct rockchip_rga *rga = ctx->rga;
1909aecc035SEzequiel Garcia 	int ret;
1919aecc035SEzequiel Garcia 
1920314339aSMauro Carvalho Chehab 	ret = pm_runtime_resume_and_get(rga->dev);
1939aecc035SEzequiel Garcia 	if (ret < 0) {
1949aecc035SEzequiel Garcia 		rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
1959aecc035SEzequiel Garcia 		return ret;
1969aecc035SEzequiel Garcia 	}
1979aecc035SEzequiel Garcia 
1989aecc035SEzequiel Garcia 	return 0;
1999aecc035SEzequiel Garcia }
2009aecc035SEzequiel Garcia 
rga_buf_stop_streaming(struct vb2_queue * q)2019aecc035SEzequiel Garcia static void rga_buf_stop_streaming(struct vb2_queue *q)
2029aecc035SEzequiel Garcia {
2039aecc035SEzequiel Garcia 	struct rga_ctx *ctx = vb2_get_drv_priv(q);
2049aecc035SEzequiel Garcia 	struct rockchip_rga *rga = ctx->rga;
2059aecc035SEzequiel Garcia 
2069aecc035SEzequiel Garcia 	rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR);
207f7e7b48eSJacob Chen 	pm_runtime_put(rga->dev);
208f7e7b48eSJacob Chen }
209f7e7b48eSJacob Chen 
210f7e7b48eSJacob Chen const struct vb2_ops rga_qops = {
211f7e7b48eSJacob Chen 	.queue_setup = rga_queue_setup,
2126040702aSMichael Tretter 	.buf_init = rga_buf_init,
213f7e7b48eSJacob Chen 	.buf_prepare = rga_buf_prepare,
214f7e7b48eSJacob Chen 	.buf_queue = rga_buf_queue,
2156040702aSMichael Tretter 	.buf_cleanup = rga_buf_cleanup,
216f7e7b48eSJacob Chen 	.wait_prepare = vb2_ops_wait_prepare,
217f7e7b48eSJacob Chen 	.wait_finish = vb2_ops_wait_finish,
218f7e7b48eSJacob Chen 	.start_streaming = rga_buf_start_streaming,
219f7e7b48eSJacob Chen 	.stop_streaming = rga_buf_stop_streaming,
220f7e7b48eSJacob Chen };
221