1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Renesas R-Car Fine Display Processor
4  *
5  * Video format converter and frame deinterlacer device.
6  *
7  * Author: Kieran Bingham, <kieran@bingham.xyz>
8  * Copyright (c) 2016 Renesas Electronics Corporation.
9  *
10  * This code is developed and inspired from the vim2m, rcar_jpu,
11  * m2m-deinterlace, and vsp1 drivers.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/fs.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <media/rcar-fcp.h>
28 #include <media/v4l2-ctrls.h>
29 #include <media/v4l2-device.h>
30 #include <media/v4l2-event.h>
31 #include <media/v4l2-ioctl.h>
32 #include <media/v4l2-mem2mem.h>
33 #include <media/videobuf2-dma-contig.h>
34 
35 static unsigned int debug;
36 module_param(debug, uint, 0644);
37 MODULE_PARM_DESC(debug, "activate debug info");
38 
39 /* Minimum and maximum frame width/height */
40 #define FDP1_MIN_W		80U
41 #define FDP1_MIN_H		80U
42 
43 #define FDP1_MAX_W		3840U
44 #define FDP1_MAX_H		2160U
45 
46 #define FDP1_MAX_PLANES		3U
47 #define FDP1_MAX_STRIDE		8190U
48 
49 /* Flags that indicate a format can be used for capture/output */
50 #define FDP1_CAPTURE		BIT(0)
51 #define FDP1_OUTPUT		BIT(1)
52 
53 #define DRIVER_NAME		"rcar_fdp1"
54 
55 /* Number of Job's to have available on the processing queue */
56 #define FDP1_NUMBER_JOBS 8
57 
58 #define dprintk(fdp1, fmt, arg...) \
59 	v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg)
60 
61 /*
62  * FDP1 registers and bits
63  */
64 
65 /* FDP1 start register - Imm */
66 #define FD1_CTL_CMD			0x0000
67 #define FD1_CTL_CMD_STRCMD		BIT(0)
68 
69 /* Sync generator register - Imm */
70 #define FD1_CTL_SGCMD			0x0004
71 #define FD1_CTL_SGCMD_SGEN		BIT(0)
72 
73 /* Register set end register - Imm */
74 #define FD1_CTL_REGEND			0x0008
75 #define FD1_CTL_REGEND_REGEND		BIT(0)
76 
77 /* Channel activation register - Vupdt */
78 #define FD1_CTL_CHACT			0x000c
79 #define FD1_CTL_CHACT_SMW		BIT(9)
80 #define FD1_CTL_CHACT_WR		BIT(8)
81 #define FD1_CTL_CHACT_SMR		BIT(3)
82 #define FD1_CTL_CHACT_RD2		BIT(2)
83 #define FD1_CTL_CHACT_RD1		BIT(1)
84 #define FD1_CTL_CHACT_RD0		BIT(0)
85 
86 /* Operation Mode Register - Vupdt */
87 #define FD1_CTL_OPMODE			0x0010
88 #define FD1_CTL_OPMODE_PRG		BIT(4)
89 #define FD1_CTL_OPMODE_VIMD_INTERRUPT	(0 << 0)
90 #define FD1_CTL_OPMODE_VIMD_BESTEFFORT	(1 << 0)
91 #define FD1_CTL_OPMODE_VIMD_NOINTERRUPT	(2 << 0)
92 
93 #define FD1_CTL_VPERIOD			0x0014
94 #define FD1_CTL_CLKCTRL			0x0018
95 #define FD1_CTL_CLKCTRL_CSTP_N		BIT(0)
96 
97 /* Software reset register */
98 #define FD1_CTL_SRESET			0x001c
99 #define FD1_CTL_SRESET_SRST		BIT(0)
100 
101 /* Control status register (V-update-status) */
102 #define FD1_CTL_STATUS			0x0024
103 #define FD1_CTL_STATUS_VINT_CNT_MASK	GENMASK(31, 16)
104 #define FD1_CTL_STATUS_VINT_CNT_SHIFT	16
105 #define FD1_CTL_STATUS_SGREGSET		BIT(10)
106 #define FD1_CTL_STATUS_SGVERR		BIT(9)
107 #define FD1_CTL_STATUS_SGFREND		BIT(8)
108 #define FD1_CTL_STATUS_BSY		BIT(0)
109 
110 #define FD1_CTL_VCYCLE_STAT		0x0028
111 
112 /* Interrupt enable register */
113 #define FD1_CTL_IRQENB			0x0038
114 /* Interrupt status register */
115 #define FD1_CTL_IRQSTA			0x003c
116 /* Interrupt control register */
117 #define FD1_CTL_IRQFSET			0x0040
118 
119 /* Common IRQ Bit settings */
120 #define FD1_CTL_IRQ_VERE		BIT(16)
121 #define FD1_CTL_IRQ_VINTE		BIT(4)
122 #define FD1_CTL_IRQ_FREE		BIT(0)
123 #define FD1_CTL_IRQ_MASK		(FD1_CTL_IRQ_VERE | \
124 					 FD1_CTL_IRQ_VINTE | \
125 					 FD1_CTL_IRQ_FREE)
126 
127 /* RPF */
128 #define FD1_RPF_SIZE			0x0060
129 #define FD1_RPF_SIZE_MASK		GENMASK(12, 0)
130 #define FD1_RPF_SIZE_H_SHIFT		16
131 #define FD1_RPF_SIZE_V_SHIFT		0
132 
133 #define FD1_RPF_FORMAT			0x0064
134 #define FD1_RPF_FORMAT_CIPM		BIT(16)
135 #define FD1_RPF_FORMAT_RSPYCS		BIT(13)
136 #define FD1_RPF_FORMAT_RSPUVS		BIT(12)
137 #define FD1_RPF_FORMAT_CF		BIT(8)
138 
139 #define FD1_RPF_PSTRIDE			0x0068
140 #define FD1_RPF_PSTRIDE_Y_SHIFT		16
141 #define FD1_RPF_PSTRIDE_C_SHIFT		0
142 
143 /* RPF0 Source Component Y Address register */
144 #define FD1_RPF0_ADDR_Y			0x006c
145 
146 /* RPF1 Current Picture Registers */
147 #define FD1_RPF1_ADDR_Y			0x0078
148 #define FD1_RPF1_ADDR_C0		0x007c
149 #define FD1_RPF1_ADDR_C1		0x0080
150 
151 /* RPF2 next picture register */
152 #define FD1_RPF2_ADDR_Y			0x0084
153 
154 #define FD1_RPF_SMSK_ADDR		0x0090
155 #define FD1_RPF_SWAP			0x0094
156 
157 /* WPF */
158 #define FD1_WPF_FORMAT			0x00c0
159 #define FD1_WPF_FORMAT_PDV_SHIFT	24
160 #define FD1_WPF_FORMAT_FCNL		BIT(20)
161 #define FD1_WPF_FORMAT_WSPYCS		BIT(15)
162 #define FD1_WPF_FORMAT_WSPUVS		BIT(14)
163 #define FD1_WPF_FORMAT_WRTM_601_16	(0 << 9)
164 #define FD1_WPF_FORMAT_WRTM_601_0	(1 << 9)
165 #define FD1_WPF_FORMAT_WRTM_709_16	(2 << 9)
166 #define FD1_WPF_FORMAT_CSC		BIT(8)
167 
168 #define FD1_WPF_RNDCTL			0x00c4
169 #define FD1_WPF_RNDCTL_CBRM		BIT(28)
170 #define FD1_WPF_RNDCTL_CLMD_NOCLIP	(0 << 12)
171 #define FD1_WPF_RNDCTL_CLMD_CLIP_16_235	(1 << 12)
172 #define FD1_WPF_RNDCTL_CLMD_CLIP_1_254	(2 << 12)
173 
174 #define FD1_WPF_PSTRIDE			0x00c8
175 #define FD1_WPF_PSTRIDE_Y_SHIFT		16
176 #define FD1_WPF_PSTRIDE_C_SHIFT		0
177 
178 /* WPF Destination picture */
179 #define FD1_WPF_ADDR_Y			0x00cc
180 #define FD1_WPF_ADDR_C0			0x00d0
181 #define FD1_WPF_ADDR_C1			0x00d4
182 #define FD1_WPF_SWAP			0x00d8
183 #define FD1_WPF_SWAP_OSWAP_SHIFT	0
184 #define FD1_WPF_SWAP_SSWAP_SHIFT	4
185 
186 /* WPF/RPF Common */
187 #define FD1_RWPF_SWAP_BYTE		BIT(0)
188 #define FD1_RWPF_SWAP_WORD		BIT(1)
189 #define FD1_RWPF_SWAP_LWRD		BIT(2)
190 #define FD1_RWPF_SWAP_LLWD		BIT(3)
191 
192 /* IPC */
193 #define FD1_IPC_MODE			0x0100
194 #define FD1_IPC_MODE_DLI		BIT(8)
195 #define FD1_IPC_MODE_DIM_ADAPT2D3D	(0 << 0)
196 #define FD1_IPC_MODE_DIM_FIXED2D	(1 << 0)
197 #define FD1_IPC_MODE_DIM_FIXED3D	(2 << 0)
198 #define FD1_IPC_MODE_DIM_PREVFIELD	(3 << 0)
199 #define FD1_IPC_MODE_DIM_NEXTFIELD	(4 << 0)
200 
201 #define FD1_IPC_SMSK_THRESH		0x0104
202 #define FD1_IPC_SMSK_THRESH_CONST	0x00010002
203 
204 #define FD1_IPC_COMB_DET		0x0108
205 #define FD1_IPC_COMB_DET_CONST		0x00200040
206 
207 #define FD1_IPC_MOTDEC			0x010c
208 #define FD1_IPC_MOTDEC_CONST		0x00008020
209 
210 /* DLI registers */
211 #define FD1_IPC_DLI_BLEND		0x0120
212 #define FD1_IPC_DLI_BLEND_CONST		0x0080ff02
213 
214 #define FD1_IPC_DLI_HGAIN		0x0124
215 #define FD1_IPC_DLI_HGAIN_CONST		0x001000ff
216 
217 #define FD1_IPC_DLI_SPRS		0x0128
218 #define FD1_IPC_DLI_SPRS_CONST		0x009004ff
219 
220 #define FD1_IPC_DLI_ANGLE		0x012c
221 #define FD1_IPC_DLI_ANGLE_CONST		0x0004080c
222 
223 #define FD1_IPC_DLI_ISOPIX0		0x0130
224 #define FD1_IPC_DLI_ISOPIX0_CONST	0xff10ff10
225 
226 #define FD1_IPC_DLI_ISOPIX1		0x0134
227 #define FD1_IPC_DLI_ISOPIX1_CONST	0x0000ff10
228 
229 /* Sensor registers */
230 #define FD1_IPC_SENSOR_TH0		0x0140
231 #define FD1_IPC_SENSOR_TH0_CONST	0x20208080
232 
233 #define FD1_IPC_SENSOR_TH1		0x0144
234 #define FD1_IPC_SENSOR_TH1_CONST	0
235 
236 #define FD1_IPC_SENSOR_CTL0		0x0170
237 #define FD1_IPC_SENSOR_CTL0_CONST	0x00002201
238 
239 #define FD1_IPC_SENSOR_CTL1		0x0174
240 #define FD1_IPC_SENSOR_CTL1_CONST	0
241 
242 #define FD1_IPC_SENSOR_CTL2		0x0178
243 #define FD1_IPC_SENSOR_CTL2_X_SHIFT	16
244 #define FD1_IPC_SENSOR_CTL2_Y_SHIFT	0
245 
246 #define FD1_IPC_SENSOR_CTL3		0x017c
247 #define FD1_IPC_SENSOR_CTL3_0_SHIFT	16
248 #define FD1_IPC_SENSOR_CTL3_1_SHIFT	0
249 
250 /* Line memory pixel number register */
251 #define FD1_IPC_LMEM			0x01e0
252 #define FD1_IPC_LMEM_LINEAR		1024
253 #define FD1_IPC_LMEM_TILE		960
254 
255 /* Internal Data (HW Version) */
256 #define FD1_IP_INTDATA			0x0800
257 #define FD1_IP_H3_ES1			0x02010101
258 #define FD1_IP_M3W			0x02010202
259 #define FD1_IP_H3			0x02010203
260 #define FD1_IP_M3N			0x02010204
261 #define FD1_IP_E3			0x02010205
262 
263 /* LUTs */
264 #define FD1_LUT_DIF_ADJ			0x1000
265 #define FD1_LUT_SAD_ADJ			0x1400
266 #define FD1_LUT_BLD_GAIN		0x1800
267 #define FD1_LUT_DIF_GAIN		0x1c00
268 #define FD1_LUT_MDET			0x2000
269 
270 /**
271  * struct fdp1_fmt - The FDP1 internal format data
272  * @fourcc: the fourcc code, to match the V4L2 API
273  * @bpp: bits per pixel per plane
274  * @num_planes: number of planes
275  * @hsub: horizontal subsampling factor
276  * @vsub: vertical subsampling factor
277  * @fmt: 7-bit format code for the fdp1 hardware
278  * @swap_yc: the Y and C components are swapped (Y comes before C)
279  * @swap_uv: the U and V components are swapped (V comes before U)
280  * @swap: swap register control
281  * @types: types of queue this format is applicable to
282  */
283 struct fdp1_fmt {
284 	u32	fourcc;
285 	u8	bpp[3];
286 	u8	num_planes;
287 	u8	hsub;
288 	u8	vsub;
289 	u8	fmt;
290 	bool	swap_yc;
291 	bool	swap_uv;
292 	u8	swap;
293 	u8	types;
294 };
295 
296 static const struct fdp1_fmt fdp1_formats[] = {
297 	/* RGB formats are only supported by the Write Pixel Formatter */
298 
299 	{ V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false,
300 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
301 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
302 	  FDP1_CAPTURE },
303 	{ V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false,
304 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
305 	  FD1_RWPF_SWAP_WORD,
306 	  FDP1_CAPTURE },
307 	{ V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false,
308 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
309 	  FD1_RWPF_SWAP_WORD,
310 	  FDP1_CAPTURE },
311 	{ V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false,
312 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
313 	  FD1_RWPF_SWAP_WORD,
314 	  FDP1_CAPTURE },
315 	{ V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
316 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
317 	  FDP1_CAPTURE },
318 	{ V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
319 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
320 	  FDP1_CAPTURE },
321 	{ V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
322 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
323 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
324 	  FDP1_CAPTURE },
325 	{ V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
326 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
327 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
328 	  FDP1_CAPTURE },
329 	{ V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false,
330 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
331 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
332 	  FDP1_CAPTURE },
333 	{ V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false,
334 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
335 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
336 	  FDP1_CAPTURE },
337 	{ V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false,
338 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
339 	  FD1_RWPF_SWAP_WORD,
340 	  FDP1_CAPTURE },
341 	{ V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false,
342 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
343 	  FD1_RWPF_SWAP_WORD,
344 	  FDP1_CAPTURE },
345 
346 	/* YUV Formats are supported by Read and Write Pixel Formatters */
347 
348 	{ V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false,
349 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
350 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
351 	  FDP1_CAPTURE | FDP1_OUTPUT },
352 	{ V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true,
353 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
354 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
355 	  FDP1_CAPTURE | FDP1_OUTPUT },
356 	{ V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false,
357 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
358 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
359 	  FDP1_CAPTURE | FDP1_OUTPUT },
360 	{ V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true,
361 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
362 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
363 	  FDP1_CAPTURE | FDP1_OUTPUT },
364 	{ V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false,
365 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
366 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
367 	  FDP1_CAPTURE | FDP1_OUTPUT },
368 	{ V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true,
369 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
370 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
371 	  FDP1_CAPTURE | FDP1_OUTPUT },
372 	{ V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false,
373 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
374 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
375 	  FDP1_CAPTURE | FDP1_OUTPUT },
376 	{ V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true,
377 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
378 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
379 	  FDP1_CAPTURE | FDP1_OUTPUT },
380 	{ V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false,
381 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
382 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
383 	  FDP1_CAPTURE | FDP1_OUTPUT },
384 	{ V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true,
385 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
386 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
387 	  FDP1_CAPTURE | FDP1_OUTPUT },
388 	{ V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false,
389 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
390 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
391 	  FDP1_CAPTURE | FDP1_OUTPUT },
392 	{ V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true,
393 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
394 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
395 	  FDP1_CAPTURE | FDP1_OUTPUT },
396 	{ V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false,
397 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
398 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
399 	  FDP1_CAPTURE | FDP1_OUTPUT },
400 	{ V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true,
401 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
402 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
403 	  FDP1_CAPTURE | FDP1_OUTPUT },
404 };
405 
fdp1_fmt_is_rgb(const struct fdp1_fmt * fmt)406 static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt)
407 {
408 	return fmt->fmt <= 0x1b; /* Last RGB code */
409 }
410 
411 /*
412  * FDP1 Lookup tables range from 0...255 only
413  *
414  * Each table must be less than 256 entries, and all tables
415  * are padded out to 256 entries by duplicating the last value.
416  */
417 static const u8 fdp1_diff_adj[] = {
418 	0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
419 	0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
420 	0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
421 };
422 
423 static const u8 fdp1_sad_adj[] = {
424 	0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
425 	0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
426 	0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
427 };
428 
429 static const u8 fdp1_bld_gain[] = {
430 	0x80,
431 };
432 
433 static const u8 fdp1_dif_gain[] = {
434 	0x80,
435 };
436 
437 static const u8 fdp1_mdet[] = {
438 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
439 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
440 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
441 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
442 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
443 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
444 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
445 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
446 	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
447 	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
448 	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
449 	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
450 	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
451 	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
452 	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
453 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
454 	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
455 	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
456 	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
457 	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
458 	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
459 	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
460 	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
461 	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
462 	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
463 	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
464 	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
465 	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
466 	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
467 	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
468 	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
469 	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
470 };
471 
472 /* Per-queue, driver-specific private data */
473 struct fdp1_q_data {
474 	const struct fdp1_fmt		*fmt;
475 	struct v4l2_pix_format_mplane	format;
476 
477 	unsigned int			vsize;
478 	unsigned int			stride_y;
479 	unsigned int			stride_c;
480 };
481 
fdp1_find_format(u32 pixelformat)482 static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat)
483 {
484 	const struct fdp1_fmt *fmt;
485 	unsigned int i;
486 
487 	for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) {
488 		fmt = &fdp1_formats[i];
489 		if (fmt->fourcc == pixelformat)
490 			return fmt;
491 	}
492 
493 	return NULL;
494 }
495 
496 enum fdp1_deint_mode {
497 	FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */
498 	FDP1_ADAPT2D3D,
499 	FDP1_FIXED2D,
500 	FDP1_FIXED3D,
501 	FDP1_PREVFIELD,
502 	FDP1_NEXTFIELD,
503 };
504 
505 #define FDP1_DEINT_MODE_USES_NEXT(mode) \
506 	(mode == FDP1_ADAPT2D3D || \
507 	 mode == FDP1_FIXED3D   || \
508 	 mode == FDP1_NEXTFIELD)
509 
510 #define FDP1_DEINT_MODE_USES_PREV(mode) \
511 	(mode == FDP1_ADAPT2D3D || \
512 	 mode == FDP1_FIXED3D   || \
513 	 mode == FDP1_PREVFIELD)
514 
515 /*
516  * FDP1 operates on potentially 3 fields, which are tracked
517  * from the VB buffers using this context structure.
518  * Will always be a field or a full frame, never two fields.
519  */
520 struct fdp1_field_buffer {
521 	struct vb2_v4l2_buffer		*vb;
522 	dma_addr_t			addrs[3];
523 
524 	/* Should be NONE:TOP:BOTTOM only */
525 	enum v4l2_field			field;
526 
527 	/* Flag to indicate this is the last field in the vb */
528 	bool				last_field;
529 
530 	/* Buffer queue lists */
531 	struct list_head		list;
532 };
533 
534 struct fdp1_buffer {
535 	struct v4l2_m2m_buffer		m2m_buf;
536 	struct fdp1_field_buffer	fields[2];
537 	unsigned int			num_fields;
538 };
539 
to_fdp1_buffer(struct vb2_v4l2_buffer * vb)540 static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb)
541 {
542 	return container_of(vb, struct fdp1_buffer, m2m_buf.vb);
543 }
544 
545 struct fdp1_job {
546 	struct fdp1_field_buffer	*previous;
547 	struct fdp1_field_buffer	*active;
548 	struct fdp1_field_buffer	*next;
549 	struct fdp1_field_buffer	*dst;
550 
551 	/* A job can only be on one list at a time */
552 	struct list_head		list;
553 };
554 
555 struct fdp1_dev {
556 	struct v4l2_device		v4l2_dev;
557 	struct video_device		vfd;
558 
559 	struct mutex			dev_mutex;
560 	spinlock_t			irqlock;
561 	spinlock_t			device_process_lock;
562 
563 	void __iomem			*regs;
564 	unsigned int			irq;
565 	struct device			*dev;
566 
567 	/* Job Queues */
568 	struct fdp1_job			jobs[FDP1_NUMBER_JOBS];
569 	struct list_head		free_job_list;
570 	struct list_head		queued_job_list;
571 	struct list_head		hw_job_list;
572 
573 	unsigned int			clk_rate;
574 
575 	struct rcar_fcp_device		*fcp;
576 	struct v4l2_m2m_dev		*m2m_dev;
577 };
578 
579 struct fdp1_ctx {
580 	struct v4l2_fh			fh;
581 	struct fdp1_dev			*fdp1;
582 
583 	struct v4l2_ctrl_handler	hdl;
584 	unsigned int			sequence;
585 
586 	/* Processed buffers in this transaction */
587 	u8				num_processed;
588 
589 	/* Transaction length (i.e. how many buffers per transaction) */
590 	u32				translen;
591 
592 	/* Abort requested by m2m */
593 	int				aborting;
594 
595 	/* Deinterlace processing mode */
596 	enum fdp1_deint_mode		deint_mode;
597 
598 	/*
599 	 * Adaptive 2D/3D mode uses a shared mask
600 	 * This is allocated at streamon, if the ADAPT2D3D mode
601 	 * is requested
602 	 */
603 	unsigned int			smsk_size;
604 	dma_addr_t			smsk_addr[2];
605 	void				*smsk_cpu;
606 
607 	/* Capture pipeline, can specify an alpha value
608 	 * for supported formats. 0-255 only
609 	 */
610 	unsigned char			alpha;
611 
612 	/* Source and destination queue data */
613 	struct fdp1_q_data		out_q; /* HW Source */
614 	struct fdp1_q_data		cap_q; /* HW Destination */
615 
616 	/*
617 	 * Field Queues
618 	 * Interlaced fields are used on 3 occasions, and tracked in this list.
619 	 *
620 	 * V4L2 Buffers are tracked inside the fdp1_buffer
621 	 * and released when the last 'field' completes
622 	 */
623 	struct list_head		fields_queue;
624 	unsigned int			buffers_queued;
625 
626 	/*
627 	 * For de-interlacing we need to track our previous buffer
628 	 * while preparing our job lists.
629 	 */
630 	struct fdp1_field_buffer	*previous;
631 };
632 
fh_to_ctx(struct v4l2_fh * fh)633 static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
634 {
635 	return container_of(fh, struct fdp1_ctx, fh);
636 }
637 
get_q_data(struct fdp1_ctx * ctx,enum v4l2_buf_type type)638 static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
639 					 enum v4l2_buf_type type)
640 {
641 	if (V4L2_TYPE_IS_OUTPUT(type))
642 		return &ctx->out_q;
643 	else
644 		return &ctx->cap_q;
645 }
646 
647 /*
648  * list_remove_job: Take the first item off the specified job list
649  *
650  * Returns: pointer to a job, or NULL if the list is empty.
651  */
list_remove_job(struct fdp1_dev * fdp1,struct list_head * list)652 static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1,
653 					 struct list_head *list)
654 {
655 	struct fdp1_job *job;
656 	unsigned long flags;
657 
658 	spin_lock_irqsave(&fdp1->irqlock, flags);
659 	job = list_first_entry_or_null(list, struct fdp1_job, list);
660 	if (job)
661 		list_del(&job->list);
662 	spin_unlock_irqrestore(&fdp1->irqlock, flags);
663 
664 	return job;
665 }
666 
667 /*
668  * list_add_job: Add a job to the specified job list
669  *
670  * Returns: void - always succeeds
671  */
list_add_job(struct fdp1_dev * fdp1,struct list_head * list,struct fdp1_job * job)672 static void list_add_job(struct fdp1_dev *fdp1,
673 			 struct list_head *list,
674 			 struct fdp1_job *job)
675 {
676 	unsigned long flags;
677 
678 	spin_lock_irqsave(&fdp1->irqlock, flags);
679 	list_add_tail(&job->list, list);
680 	spin_unlock_irqrestore(&fdp1->irqlock, flags);
681 }
682 
fdp1_job_alloc(struct fdp1_dev * fdp1)683 static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1)
684 {
685 	return list_remove_job(fdp1, &fdp1->free_job_list);
686 }
687 
fdp1_job_free(struct fdp1_dev * fdp1,struct fdp1_job * job)688 static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job)
689 {
690 	/* Ensure that all residue from previous jobs is gone */
691 	memset(job, 0, sizeof(struct fdp1_job));
692 
693 	list_add_job(fdp1, &fdp1->free_job_list, job);
694 }
695 
queue_job(struct fdp1_dev * fdp1,struct fdp1_job * job)696 static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
697 {
698 	list_add_job(fdp1, &fdp1->queued_job_list, job);
699 }
700 
get_queued_job(struct fdp1_dev * fdp1)701 static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1)
702 {
703 	return list_remove_job(fdp1, &fdp1->queued_job_list);
704 }
705 
queue_hw_job(struct fdp1_dev * fdp1,struct fdp1_job * job)706 static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
707 {
708 	list_add_job(fdp1, &fdp1->hw_job_list, job);
709 }
710 
get_hw_queued_job(struct fdp1_dev * fdp1)711 static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1)
712 {
713 	return list_remove_job(fdp1, &fdp1->hw_job_list);
714 }
715 
716 /*
717  * Buffer lists handling
718  */
fdp1_field_complete(struct fdp1_ctx * ctx,struct fdp1_field_buffer * fbuf)719 static void fdp1_field_complete(struct fdp1_ctx *ctx,
720 				struct fdp1_field_buffer *fbuf)
721 {
722 	/* job->previous may be on the first field */
723 	if (!fbuf)
724 		return;
725 
726 	if (fbuf->last_field)
727 		v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE);
728 }
729 
fdp1_queue_field(struct fdp1_ctx * ctx,struct fdp1_field_buffer * fbuf)730 static void fdp1_queue_field(struct fdp1_ctx *ctx,
731 			     struct fdp1_field_buffer *fbuf)
732 {
733 	unsigned long flags;
734 
735 	spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
736 	list_add_tail(&fbuf->list, &ctx->fields_queue);
737 	spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
738 
739 	ctx->buffers_queued++;
740 }
741 
fdp1_dequeue_field(struct fdp1_ctx * ctx)742 static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx)
743 {
744 	struct fdp1_field_buffer *fbuf;
745 	unsigned long flags;
746 
747 	ctx->buffers_queued--;
748 
749 	spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
750 	fbuf = list_first_entry_or_null(&ctx->fields_queue,
751 					struct fdp1_field_buffer, list);
752 	if (fbuf)
753 		list_del(&fbuf->list);
754 	spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
755 
756 	return fbuf;
757 }
758 
759 /*
760  * Return the next field in the queue - or NULL,
761  * without removing the item from the list
762  */
fdp1_peek_queued_field(struct fdp1_ctx * ctx)763 static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx)
764 {
765 	struct fdp1_field_buffer *fbuf;
766 	unsigned long flags;
767 
768 	spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
769 	fbuf = list_first_entry_or_null(&ctx->fields_queue,
770 					struct fdp1_field_buffer, list);
771 	spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
772 
773 	return fbuf;
774 }
775 
fdp1_read(struct fdp1_dev * fdp1,unsigned int reg)776 static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg)
777 {
778 	u32 value = ioread32(fdp1->regs + reg);
779 
780 	if (debug >= 2)
781 		dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg);
782 
783 	return value;
784 }
785 
fdp1_write(struct fdp1_dev * fdp1,u32 val,unsigned int reg)786 static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg)
787 {
788 	if (debug >= 2)
789 		dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg);
790 
791 	iowrite32(val, fdp1->regs + reg);
792 }
793 
794 /* IPC registers are to be programmed with constant values */
fdp1_set_ipc_dli(struct fdp1_ctx * ctx)795 static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx)
796 {
797 	struct fdp1_dev *fdp1 = ctx->fdp1;
798 
799 	fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST,	FD1_IPC_SMSK_THRESH);
800 	fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST,	FD1_IPC_COMB_DET);
801 	fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST,	FD1_IPC_MOTDEC);
802 
803 	fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST,	FD1_IPC_DLI_BLEND);
804 	fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST,	FD1_IPC_DLI_HGAIN);
805 	fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST,	FD1_IPC_DLI_SPRS);
806 	fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST,	FD1_IPC_DLI_ANGLE);
807 	fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST,	FD1_IPC_DLI_ISOPIX0);
808 	fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST,	FD1_IPC_DLI_ISOPIX1);
809 }
810 
811 
fdp1_set_ipc_sensor(struct fdp1_ctx * ctx)812 static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx)
813 {
814 	struct fdp1_dev *fdp1 = ctx->fdp1;
815 	struct fdp1_q_data *src_q_data = &ctx->out_q;
816 	unsigned int x0, x1;
817 	unsigned int hsize = src_q_data->format.width;
818 	unsigned int vsize = src_q_data->format.height;
819 
820 	x0 = hsize / 3;
821 	x1 = 2 * hsize / 3;
822 
823 	fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0);
824 	fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1);
825 	fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0);
826 	fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1);
827 
828 	fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) |
829 			 ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT),
830 			 FD1_IPC_SENSOR_CTL2);
831 
832 	fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) |
833 			 (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT),
834 			 FD1_IPC_SENSOR_CTL3);
835 }
836 
837 /*
838  * fdp1_write_lut: Write a padded LUT to the hw
839  *
840  * FDP1 uses constant data for de-interlacing processing,
841  * with large tables. These hardware tables are all 256 bytes
842  * long, however they often contain repeated data at the end.
843  *
844  * The last byte of the table is written to all remaining entries.
845  */
fdp1_write_lut(struct fdp1_dev * fdp1,const u8 * lut,unsigned int len,unsigned int base)846 static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut,
847 			   unsigned int len, unsigned int base)
848 {
849 	unsigned int i;
850 	u8 pad;
851 
852 	/* Tables larger than the hw are clipped */
853 	len = min(len, 256u);
854 
855 	for (i = 0; i < len; i++)
856 		fdp1_write(fdp1, lut[i], base + (i*4));
857 
858 	/* Tables are padded with the last entry */
859 	pad = lut[i-1];
860 
861 	for (; i < 256; i++)
862 		fdp1_write(fdp1, pad, base + (i*4));
863 }
864 
fdp1_set_lut(struct fdp1_dev * fdp1)865 static void fdp1_set_lut(struct fdp1_dev *fdp1)
866 {
867 	fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj),
868 			FD1_LUT_DIF_ADJ);
869 	fdp1_write_lut(fdp1, fdp1_sad_adj,  ARRAY_SIZE(fdp1_sad_adj),
870 			FD1_LUT_SAD_ADJ);
871 	fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain),
872 			FD1_LUT_BLD_GAIN);
873 	fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain),
874 			FD1_LUT_DIF_GAIN);
875 	fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet),
876 			FD1_LUT_MDET);
877 }
878 
fdp1_configure_rpf(struct fdp1_ctx * ctx,struct fdp1_job * job)879 static void fdp1_configure_rpf(struct fdp1_ctx *ctx,
880 			       struct fdp1_job *job)
881 {
882 	struct fdp1_dev *fdp1 = ctx->fdp1;
883 	u32 picture_size;
884 	u32 pstride;
885 	u32 format;
886 	u32 smsk_addr;
887 
888 	struct fdp1_q_data *q_data = &ctx->out_q;
889 
890 	/* Picture size is common to Source and Destination frames */
891 	picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT)
892 		     | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT);
893 
894 	/* Strides */
895 	pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT;
896 	if (q_data->format.num_planes > 1)
897 		pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT;
898 
899 	/* Format control */
900 	format = q_data->fmt->fmt;
901 	if (q_data->fmt->swap_yc)
902 		format |= FD1_RPF_FORMAT_RSPYCS;
903 
904 	if (q_data->fmt->swap_uv)
905 		format |= FD1_RPF_FORMAT_RSPUVS;
906 
907 	if (job->active->field == V4L2_FIELD_BOTTOM) {
908 		format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */
909 		smsk_addr = ctx->smsk_addr[0];
910 	} else {
911 		smsk_addr = ctx->smsk_addr[1];
912 	}
913 
914 	/* Deint mode is non-zero when deinterlacing */
915 	if (ctx->deint_mode)
916 		format |= FD1_RPF_FORMAT_CIPM;
917 
918 	fdp1_write(fdp1, format, FD1_RPF_FORMAT);
919 	fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP);
920 	fdp1_write(fdp1, picture_size, FD1_RPF_SIZE);
921 	fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE);
922 	fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR);
923 
924 	/* Previous Field Channel (CH0) */
925 	if (job->previous)
926 		fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y);
927 
928 	/* Current Field Channel (CH1) */
929 	fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y);
930 	fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0);
931 	fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1);
932 
933 	/* Next Field  Channel (CH2) */
934 	if (job->next)
935 		fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y);
936 }
937 
fdp1_configure_wpf(struct fdp1_ctx * ctx,struct fdp1_job * job)938 static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
939 			       struct fdp1_job *job)
940 {
941 	struct fdp1_dev *fdp1 = ctx->fdp1;
942 	struct fdp1_q_data *src_q_data = &ctx->out_q;
943 	struct fdp1_q_data *q_data = &ctx->cap_q;
944 	u32 pstride;
945 	u32 format;
946 	u32 swap;
947 	u32 rndctl;
948 
949 	pstride = q_data->format.plane_fmt[0].bytesperline
950 		<< FD1_WPF_PSTRIDE_Y_SHIFT;
951 
952 	if (q_data->format.num_planes > 1)
953 		pstride |= q_data->format.plane_fmt[1].bytesperline
954 			<< FD1_WPF_PSTRIDE_C_SHIFT;
955 
956 	format = q_data->fmt->fmt; /* Output Format Code */
957 
958 	if (q_data->fmt->swap_yc)
959 		format |= FD1_WPF_FORMAT_WSPYCS;
960 
961 	if (q_data->fmt->swap_uv)
962 		format |= FD1_WPF_FORMAT_WSPUVS;
963 
964 	if (fdp1_fmt_is_rgb(q_data->fmt)) {
965 		/* Enable Colour Space conversion */
966 		format |= FD1_WPF_FORMAT_CSC;
967 
968 		/* Set WRTM */
969 		if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709)
970 			format |= FD1_WPF_FORMAT_WRTM_709_16;
971 		else if (src_q_data->format.quantization ==
972 				V4L2_QUANTIZATION_FULL_RANGE)
973 			format |= FD1_WPF_FORMAT_WRTM_601_0;
974 		else
975 			format |= FD1_WPF_FORMAT_WRTM_601_16;
976 	}
977 
978 	/* Set an alpha value into the Pad Value */
979 	format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT;
980 
981 	/* Determine picture rounding and clipping */
982 	rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */
983 	rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP;
984 
985 	/* WPF Swap needs both ISWAP and OSWAP setting */
986 	swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT;
987 	swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT;
988 
989 	fdp1_write(fdp1, format, FD1_WPF_FORMAT);
990 	fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL);
991 	fdp1_write(fdp1, swap, FD1_WPF_SWAP);
992 	fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE);
993 
994 	fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y);
995 	fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0);
996 	fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1);
997 }
998 
fdp1_configure_deint_mode(struct fdp1_ctx * ctx,struct fdp1_job * job)999 static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx,
1000 				      struct fdp1_job *job)
1001 {
1002 	struct fdp1_dev *fdp1 = ctx->fdp1;
1003 	u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT;
1004 	u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */
1005 	u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */
1006 
1007 	/* De-interlacing Mode */
1008 	switch (ctx->deint_mode) {
1009 	default:
1010 	case FDP1_PROGRESSIVE:
1011 		dprintk(fdp1, "Progressive Mode\n");
1012 		opmode |= FD1_CTL_OPMODE_PRG;
1013 		ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
1014 		break;
1015 	case FDP1_ADAPT2D3D:
1016 		dprintk(fdp1, "Adapt2D3D Mode\n");
1017 		if (ctx->sequence == 0 || ctx->aborting)
1018 			ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
1019 		else
1020 			ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D;
1021 
1022 		if (ctx->sequence > 1) {
1023 			channels |= FD1_CTL_CHACT_SMW;
1024 			channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
1025 		}
1026 
1027 		if (ctx->sequence > 2)
1028 			channels |= FD1_CTL_CHACT_SMR;
1029 
1030 		break;
1031 	case FDP1_FIXED3D:
1032 		dprintk(fdp1, "Fixed 3D Mode\n");
1033 		ipcmode |= FD1_IPC_MODE_DIM_FIXED3D;
1034 		/* Except for first and last frame, enable all channels */
1035 		if (!(ctx->sequence == 0 || ctx->aborting))
1036 			channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
1037 		break;
1038 	case FDP1_FIXED2D:
1039 		dprintk(fdp1, "Fixed 2D Mode\n");
1040 		ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
1041 		/* No extra channels enabled */
1042 		break;
1043 	case FDP1_PREVFIELD:
1044 		dprintk(fdp1, "Previous Field Mode\n");
1045 		ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD;
1046 		channels |= FD1_CTL_CHACT_RD0; /* Previous */
1047 		break;
1048 	case FDP1_NEXTFIELD:
1049 		dprintk(fdp1, "Next Field Mode\n");
1050 		ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD;
1051 		channels |= FD1_CTL_CHACT_RD2; /* Next */
1052 		break;
1053 	}
1054 
1055 	fdp1_write(fdp1, channels,	FD1_CTL_CHACT);
1056 	fdp1_write(fdp1, opmode,	FD1_CTL_OPMODE);
1057 	fdp1_write(fdp1, ipcmode,	FD1_IPC_MODE);
1058 }
1059 
1060 /*
1061  * fdp1_device_process() - Run the hardware
1062  *
1063  * Configure and start the hardware to generate a single frame
1064  * of output given our input parameters.
1065  */
fdp1_device_process(struct fdp1_ctx * ctx)1066 static int fdp1_device_process(struct fdp1_ctx *ctx)
1067 
1068 {
1069 	struct fdp1_dev *fdp1 = ctx->fdp1;
1070 	struct fdp1_job *job;
1071 	unsigned long flags;
1072 
1073 	spin_lock_irqsave(&fdp1->device_process_lock, flags);
1074 
1075 	/* Get a job to process */
1076 	job = get_queued_job(fdp1);
1077 	if (!job) {
1078 		/*
1079 		 * VINT can call us to see if we can queue another job.
1080 		 * If we have no work to do, we simply return.
1081 		 */
1082 		spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
1083 		return 0;
1084 	}
1085 
1086 	/* First Frame only? ... */
1087 	fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL);
1088 
1089 	/* Set the mode, and configuration */
1090 	fdp1_configure_deint_mode(ctx, job);
1091 
1092 	/* DLI Static Configuration */
1093 	fdp1_set_ipc_dli(ctx);
1094 
1095 	/* Sensor Configuration */
1096 	fdp1_set_ipc_sensor(ctx);
1097 
1098 	/* Setup the source picture */
1099 	fdp1_configure_rpf(ctx, job);
1100 
1101 	/* Setup the destination picture */
1102 	fdp1_configure_wpf(ctx, job);
1103 
1104 	/* Line Memory Pixel Number Register for linear access */
1105 	fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM);
1106 
1107 	/* Enable Interrupts */
1108 	fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB);
1109 
1110 	/* Finally, the Immediate Registers */
1111 
1112 	/* This job is now in the HW queue */
1113 	queue_hw_job(fdp1, job);
1114 
1115 	/* Start the command */
1116 	fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD);
1117 
1118 	/* Registers will update to HW at next VINT */
1119 	fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND);
1120 
1121 	/* Enable VINT Generator */
1122 	fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD);
1123 
1124 	spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
1125 
1126 	return 0;
1127 }
1128 
1129 /*
1130  * mem2mem callbacks
1131  */
1132 
1133 /*
1134  * job_ready() - check whether an instance is ready to be scheduled to run
1135  */
fdp1_m2m_job_ready(void * priv)1136 static int fdp1_m2m_job_ready(void *priv)
1137 {
1138 	struct fdp1_ctx *ctx = priv;
1139 	struct fdp1_q_data *src_q_data = &ctx->out_q;
1140 	int srcbufs = 1;
1141 	int dstbufs = 1;
1142 
1143 	dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
1144 		v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
1145 		v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
1146 
1147 	/* One output buffer is required for each field */
1148 	if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
1149 		dstbufs = 2;
1150 
1151 	if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs
1152 	    || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) {
1153 		dprintk(ctx->fdp1, "Not enough buffers available\n");
1154 		return 0;
1155 	}
1156 
1157 	return 1;
1158 }
1159 
fdp1_m2m_job_abort(void * priv)1160 static void fdp1_m2m_job_abort(void *priv)
1161 {
1162 	struct fdp1_ctx *ctx = priv;
1163 
1164 	dprintk(ctx->fdp1, "+\n");
1165 
1166 	/* Will cancel the transaction in the next interrupt handler */
1167 	ctx->aborting = 1;
1168 
1169 	/* Immediate abort sequence */
1170 	fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD);
1171 	fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET);
1172 }
1173 
1174 /*
1175  * fdp1_prepare_job: Prepare and queue a new job for a single action of work
1176  *
1177  * Prepare the next field, (or frame in progressive) and an output
1178  * buffer for the hardware to perform a single operation.
1179  */
fdp1_prepare_job(struct fdp1_ctx * ctx)1180 static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx)
1181 {
1182 	struct vb2_v4l2_buffer *vbuf;
1183 	struct fdp1_buffer *fbuf;
1184 	struct fdp1_dev *fdp1 = ctx->fdp1;
1185 	struct fdp1_job *job;
1186 	unsigned int buffers_required = 1;
1187 
1188 	dprintk(fdp1, "+\n");
1189 
1190 	if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode))
1191 		buffers_required = 2;
1192 
1193 	if (ctx->buffers_queued < buffers_required)
1194 		return NULL;
1195 
1196 	job = fdp1_job_alloc(fdp1);
1197 	if (!job) {
1198 		dprintk(fdp1, "No free jobs currently available\n");
1199 		return NULL;
1200 	}
1201 
1202 	job->active = fdp1_dequeue_field(ctx);
1203 	if (!job->active) {
1204 		/* Buffer check should prevent this ever happening */
1205 		dprintk(fdp1, "No input buffers currently available\n");
1206 
1207 		fdp1_job_free(fdp1, job);
1208 		return NULL;
1209 	}
1210 
1211 	dprintk(fdp1, "+ Buffer en-route...\n");
1212 
1213 	/* Source buffers have been prepared on our buffer_queue
1214 	 * Prepare our Output buffer
1215 	 */
1216 	vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
1217 	fbuf = to_fdp1_buffer(vbuf);
1218 	job->dst = &fbuf->fields[0];
1219 
1220 	job->active->vb->sequence = ctx->sequence;
1221 	job->dst->vb->sequence = ctx->sequence;
1222 	ctx->sequence++;
1223 
1224 	if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) {
1225 		job->previous = ctx->previous;
1226 
1227 		/* Active buffer becomes the next job's previous buffer */
1228 		ctx->previous = job->active;
1229 	}
1230 
1231 	if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) {
1232 		/* Must be called after 'active' is dequeued */
1233 		job->next = fdp1_peek_queued_field(ctx);
1234 	}
1235 
1236 	/* Transfer timestamps and flags from src->dst */
1237 
1238 	job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp;
1239 
1240 	job->dst->vb->flags = job->active->vb->flags &
1241 				V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1242 
1243 	/* Ideally, the frame-end function will just 'check' to see
1244 	 * if there are more jobs instead
1245 	 */
1246 	ctx->translen++;
1247 
1248 	/* Finally, Put this job on the processing queue */
1249 	queue_job(fdp1, job);
1250 
1251 	dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen);
1252 
1253 	return job;
1254 }
1255 
1256 /* fdp1_m2m_device_run() - prepares and starts the device for an M2M task
1257  *
1258  * A single input buffer is taken and serialised into our fdp1_buffer
1259  * queue. The queue is then processed to create as many jobs as possible
1260  * from our available input.
1261  */
fdp1_m2m_device_run(void * priv)1262 static void fdp1_m2m_device_run(void *priv)
1263 {
1264 	struct fdp1_ctx *ctx = priv;
1265 	struct fdp1_dev *fdp1 = ctx->fdp1;
1266 	struct vb2_v4l2_buffer *src_vb;
1267 	struct fdp1_buffer *buf;
1268 	unsigned int i;
1269 
1270 	dprintk(fdp1, "+\n");
1271 
1272 	ctx->translen = 0;
1273 
1274 	/* Get our incoming buffer of either one or two fields, or one frame */
1275 	src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1276 	buf = to_fdp1_buffer(src_vb);
1277 
1278 	for (i = 0; i < buf->num_fields; i++) {
1279 		struct fdp1_field_buffer *fbuf = &buf->fields[i];
1280 
1281 		fdp1_queue_field(ctx, fbuf);
1282 		dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
1283 			i, fbuf->last_field);
1284 	}
1285 
1286 	/* Queue as many jobs as our data provides for */
1287 	while (fdp1_prepare_job(ctx))
1288 		;
1289 
1290 	if (ctx->translen == 0) {
1291 		dprintk(fdp1, "No jobs were processed. M2M action complete\n");
1292 		v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
1293 		return;
1294 	}
1295 
1296 	/* Kick the job processing action */
1297 	fdp1_device_process(ctx);
1298 }
1299 
1300 /*
1301  * device_frame_end:
1302  *
1303  * Handles the M2M level after a buffer completion event.
1304  */
device_frame_end(struct fdp1_dev * fdp1,enum vb2_buffer_state state)1305 static void device_frame_end(struct fdp1_dev *fdp1,
1306 			     enum vb2_buffer_state state)
1307 {
1308 	struct fdp1_ctx *ctx;
1309 	unsigned long flags;
1310 	struct fdp1_job *job = get_hw_queued_job(fdp1);
1311 
1312 	dprintk(fdp1, "+\n");
1313 
1314 	ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev);
1315 
1316 	if (ctx == NULL) {
1317 		v4l2_err(&fdp1->v4l2_dev,
1318 			"Instance released before the end of transaction\n");
1319 		return;
1320 	}
1321 
1322 	ctx->num_processed++;
1323 
1324 	/*
1325 	 * fdp1_field_complete will call buf_done only when the last vb2_buffer
1326 	 * reference is complete
1327 	 */
1328 	if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
1329 		fdp1_field_complete(ctx, job->previous);
1330 	else
1331 		fdp1_field_complete(ctx, job->active);
1332 
1333 	spin_lock_irqsave(&fdp1->irqlock, flags);
1334 	v4l2_m2m_buf_done(job->dst->vb, state);
1335 	job->dst = NULL;
1336 	spin_unlock_irqrestore(&fdp1->irqlock, flags);
1337 
1338 	/* Move this job back to the free job list */
1339 	fdp1_job_free(fdp1, job);
1340 
1341 	dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
1342 		ctx->num_processed, ctx->translen);
1343 
1344 	if (ctx->num_processed == ctx->translen ||
1345 			ctx->aborting) {
1346 		dprintk(ctx->fdp1, "Finishing transaction\n");
1347 		ctx->num_processed = 0;
1348 		v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
1349 	} else {
1350 		/*
1351 		 * For pipelined performance support, this would
1352 		 * be called from a VINT handler
1353 		 */
1354 		fdp1_device_process(ctx);
1355 	}
1356 }
1357 
1358 /*
1359  * video ioctls
1360  */
fdp1_vidioc_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1361 static int fdp1_vidioc_querycap(struct file *file, void *priv,
1362 			   struct v4l2_capability *cap)
1363 {
1364 	strscpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
1365 	strscpy(cap->card, DRIVER_NAME, sizeof(cap->card));
1366 	snprintf(cap->bus_info, sizeof(cap->bus_info),
1367 		 "platform:%s", DRIVER_NAME);
1368 	return 0;
1369 }
1370 
fdp1_enum_fmt(struct v4l2_fmtdesc * f,u32 type)1371 static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
1372 {
1373 	unsigned int i, num;
1374 
1375 	num = 0;
1376 
1377 	for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) {
1378 		if (fdp1_formats[i].types & type) {
1379 			if (num == f->index)
1380 				break;
1381 			++num;
1382 		}
1383 	}
1384 
1385 	/* Format not found */
1386 	if (i >= ARRAY_SIZE(fdp1_formats))
1387 		return -EINVAL;
1388 
1389 	/* Format found */
1390 	f->pixelformat = fdp1_formats[i].fourcc;
1391 
1392 	return 0;
1393 }
1394 
fdp1_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)1395 static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv,
1396 				 struct v4l2_fmtdesc *f)
1397 {
1398 	return fdp1_enum_fmt(f, FDP1_CAPTURE);
1399 }
1400 
fdp1_enum_fmt_vid_out(struct file * file,void * priv,struct v4l2_fmtdesc * f)1401 static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
1402 				   struct v4l2_fmtdesc *f)
1403 {
1404 	return fdp1_enum_fmt(f, FDP1_OUTPUT);
1405 }
1406 
fdp1_g_fmt(struct file * file,void * priv,struct v4l2_format * f)1407 static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1408 {
1409 	struct fdp1_q_data *q_data;
1410 	struct fdp1_ctx *ctx = fh_to_ctx(priv);
1411 
1412 	if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
1413 		return -EINVAL;
1414 
1415 	q_data = get_q_data(ctx, f->type);
1416 	f->fmt.pix_mp = q_data->format;
1417 
1418 	return 0;
1419 }
1420 
fdp1_compute_stride(struct v4l2_pix_format_mplane * pix,const struct fdp1_fmt * fmt)1421 static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
1422 				const struct fdp1_fmt *fmt)
1423 {
1424 	unsigned int i;
1425 
1426 	/* Compute and clamp the stride and image size. */
1427 	for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) {
1428 		unsigned int hsub = i > 0 ? fmt->hsub : 1;
1429 		unsigned int vsub = i > 0 ? fmt->vsub : 1;
1430 		 /* From VSP : TODO: Confirm alignment limits for FDP1 */
1431 		unsigned int align = 128;
1432 		unsigned int bpl;
1433 
1434 		bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
1435 			      pix->width / hsub * fmt->bpp[i] / 8,
1436 			      round_down(FDP1_MAX_STRIDE, align));
1437 
1438 		pix->plane_fmt[i].bytesperline = round_up(bpl, align);
1439 		pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
1440 					    * pix->height / vsub;
1441 
1442 	}
1443 
1444 	if (fmt->num_planes == 3) {
1445 		/* The two chroma planes must have the same stride. */
1446 		pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
1447 		pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
1448 
1449 	}
1450 }
1451 
fdp1_try_fmt_output(struct fdp1_ctx * ctx,const struct fdp1_fmt ** fmtinfo,struct v4l2_pix_format_mplane * pix)1452 static void fdp1_try_fmt_output(struct fdp1_ctx *ctx,
1453 				const struct fdp1_fmt **fmtinfo,
1454 				struct v4l2_pix_format_mplane *pix)
1455 {
1456 	const struct fdp1_fmt *fmt;
1457 	unsigned int width;
1458 	unsigned int height;
1459 
1460 	/* Validate the pixel format to ensure the output queue supports it. */
1461 	fmt = fdp1_find_format(pix->pixelformat);
1462 	if (!fmt || !(fmt->types & FDP1_OUTPUT))
1463 		fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
1464 
1465 	if (fmtinfo)
1466 		*fmtinfo = fmt;
1467 
1468 	pix->pixelformat = fmt->fourcc;
1469 	pix->num_planes = fmt->num_planes;
1470 
1471 	/*
1472 	 * Progressive video and all interlaced field orders are acceptable.
1473 	 * Default to V4L2_FIELD_INTERLACED.
1474 	 */
1475 	if (pix->field != V4L2_FIELD_NONE &&
1476 	    pix->field != V4L2_FIELD_ALTERNATE &&
1477 	    !V4L2_FIELD_HAS_BOTH(pix->field))
1478 		pix->field = V4L2_FIELD_INTERLACED;
1479 
1480 	/*
1481 	 * The deinterlacer doesn't care about the colorspace, accept all values
1482 	 * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion
1483 	 * at the output of the deinterlacer supports a subset of encodings and
1484 	 * quantization methods and will only be available when the colorspace
1485 	 * allows it.
1486 	 */
1487 	if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
1488 		pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
1489 
1490 	/*
1491 	 * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp
1492 	 * them to the supported frame size range. The height boundary are
1493 	 * related to the full frame, divide them by two when the format passes
1494 	 * fields in separate buffers.
1495 	 */
1496 	width = round_down(pix->width, fmt->hsub);
1497 	pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W);
1498 
1499 	height = round_down(pix->height, fmt->vsub);
1500 	if (pix->field == V4L2_FIELD_ALTERNATE)
1501 		pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2);
1502 	else
1503 		pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H);
1504 
1505 	fdp1_compute_stride(pix, fmt);
1506 }
1507 
fdp1_try_fmt_capture(struct fdp1_ctx * ctx,const struct fdp1_fmt ** fmtinfo,struct v4l2_pix_format_mplane * pix)1508 static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
1509 				 const struct fdp1_fmt **fmtinfo,
1510 				 struct v4l2_pix_format_mplane *pix)
1511 {
1512 	struct fdp1_q_data *src_data = &ctx->out_q;
1513 	enum v4l2_colorspace colorspace;
1514 	enum v4l2_ycbcr_encoding ycbcr_enc;
1515 	enum v4l2_quantization quantization;
1516 	const struct fdp1_fmt *fmt;
1517 	bool allow_rgb;
1518 
1519 	/*
1520 	 * Validate the pixel format. We can only accept RGB output formats if
1521 	 * the input encoding and quantization are compatible with the format
1522 	 * conversions supported by the hardware. The supported combinations are
1523 	 *
1524 	 * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE
1525 	 * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE
1526 	 * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE
1527 	 */
1528 	colorspace = src_data->format.colorspace;
1529 
1530 	ycbcr_enc = src_data->format.ycbcr_enc;
1531 	if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
1532 		ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
1533 
1534 	quantization = src_data->format.quantization;
1535 	if (quantization == V4L2_QUANTIZATION_DEFAULT)
1536 		quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace,
1537 							     ycbcr_enc);
1538 
1539 	allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 ||
1540 		    (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
1541 		     quantization == V4L2_QUANTIZATION_LIM_RANGE);
1542 
1543 	fmt = fdp1_find_format(pix->pixelformat);
1544 	if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt)))
1545 		fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
1546 
1547 	if (fmtinfo)
1548 		*fmtinfo = fmt;
1549 
1550 	pix->pixelformat = fmt->fourcc;
1551 	pix->num_planes = fmt->num_planes;
1552 	pix->field = V4L2_FIELD_NONE;
1553 
1554 	/*
1555 	 * The colorspace on the capture queue is copied from the output queue
1556 	 * as the hardware can't change the colorspace. It can convert YCbCr to
1557 	 * RGB though, in which case the encoding and quantization are set to
1558 	 * default values as anything else wouldn't make sense.
1559 	 */
1560 	pix->colorspace = src_data->format.colorspace;
1561 	pix->xfer_func = src_data->format.xfer_func;
1562 
1563 	if (fdp1_fmt_is_rgb(fmt)) {
1564 		pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1565 		pix->quantization = V4L2_QUANTIZATION_DEFAULT;
1566 	} else {
1567 		pix->ycbcr_enc = src_data->format.ycbcr_enc;
1568 		pix->quantization = src_data->format.quantization;
1569 	}
1570 
1571 	/*
1572 	 * The frame width is identical to the output queue, and the height is
1573 	 * either doubled or identical depending on whether the output queue
1574 	 * field order contains one or two fields per frame.
1575 	 */
1576 	pix->width = src_data->format.width;
1577 	if (src_data->format.field == V4L2_FIELD_ALTERNATE)
1578 		pix->height = 2 * src_data->format.height;
1579 	else
1580 		pix->height = src_data->format.height;
1581 
1582 	fdp1_compute_stride(pix, fmt);
1583 }
1584 
fdp1_try_fmt(struct file * file,void * priv,struct v4l2_format * f)1585 static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1586 {
1587 	struct fdp1_ctx *ctx = fh_to_ctx(priv);
1588 
1589 	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1590 		fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
1591 	else
1592 		fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
1593 
1594 	dprintk(ctx->fdp1, "Try %s format: %4.4s (0x%08x) %ux%u field %u\n",
1595 		V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
1596 		(char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
1597 		f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
1598 
1599 	return 0;
1600 }
1601 
fdp1_set_format(struct fdp1_ctx * ctx,struct v4l2_pix_format_mplane * pix,enum v4l2_buf_type type)1602 static void fdp1_set_format(struct fdp1_ctx *ctx,
1603 			    struct v4l2_pix_format_mplane *pix,
1604 			    enum v4l2_buf_type type)
1605 {
1606 	struct fdp1_q_data *q_data = get_q_data(ctx, type);
1607 	const struct fdp1_fmt *fmtinfo;
1608 
1609 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1610 		fdp1_try_fmt_output(ctx, &fmtinfo, pix);
1611 	else
1612 		fdp1_try_fmt_capture(ctx, &fmtinfo, pix);
1613 
1614 	q_data->fmt = fmtinfo;
1615 	q_data->format = *pix;
1616 
1617 	q_data->vsize = pix->height;
1618 	if (pix->field != V4L2_FIELD_NONE)
1619 		q_data->vsize /= 2;
1620 
1621 	q_data->stride_y = pix->plane_fmt[0].bytesperline;
1622 	q_data->stride_c = pix->plane_fmt[1].bytesperline;
1623 
1624 	/* Adjust strides for interleaved buffers */
1625 	if (pix->field == V4L2_FIELD_INTERLACED ||
1626 	    pix->field == V4L2_FIELD_INTERLACED_TB ||
1627 	    pix->field == V4L2_FIELD_INTERLACED_BT) {
1628 		q_data->stride_y *= 2;
1629 		q_data->stride_c *= 2;
1630 	}
1631 
1632 	/* Propagate the format from the output node to the capture node. */
1633 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1634 		struct fdp1_q_data *dst_data = &ctx->cap_q;
1635 
1636 		/*
1637 		 * Copy the format, clear the per-plane bytes per line and image
1638 		 * size, override the field and double the height if needed.
1639 		 */
1640 		dst_data->format = q_data->format;
1641 		memset(dst_data->format.plane_fmt, 0,
1642 		       sizeof(dst_data->format.plane_fmt));
1643 
1644 		dst_data->format.field = V4L2_FIELD_NONE;
1645 		if (pix->field == V4L2_FIELD_ALTERNATE)
1646 			dst_data->format.height *= 2;
1647 
1648 		fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format);
1649 
1650 		dst_data->vsize = dst_data->format.height;
1651 		dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline;
1652 		dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline;
1653 	}
1654 }
1655 
fdp1_s_fmt(struct file * file,void * priv,struct v4l2_format * f)1656 static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1657 {
1658 	struct fdp1_ctx *ctx = fh_to_ctx(priv);
1659 	struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
1660 	struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
1661 
1662 	if (vb2_is_busy(vq)) {
1663 		v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__);
1664 		return -EBUSY;
1665 	}
1666 
1667 	fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
1668 
1669 	dprintk(ctx->fdp1, "Set %s format: %4.4s (0x%08x) %ux%u field %u\n",
1670 		V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
1671 		(char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
1672 		f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
1673 
1674 	return 0;
1675 }
1676 
fdp1_g_ctrl(struct v4l2_ctrl * ctrl)1677 static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl)
1678 {
1679 	struct fdp1_ctx *ctx =
1680 		container_of(ctrl->handler, struct fdp1_ctx, hdl);
1681 	struct fdp1_q_data *src_q_data = &ctx->out_q;
1682 
1683 	switch (ctrl->id) {
1684 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
1685 		if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
1686 			ctrl->val = 2;
1687 		else
1688 			ctrl->val = 1;
1689 		return 0;
1690 	}
1691 
1692 	return 1;
1693 }
1694 
fdp1_s_ctrl(struct v4l2_ctrl * ctrl)1695 static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl)
1696 {
1697 	struct fdp1_ctx *ctx =
1698 		container_of(ctrl->handler, struct fdp1_ctx, hdl);
1699 
1700 	switch (ctrl->id) {
1701 	case V4L2_CID_ALPHA_COMPONENT:
1702 		ctx->alpha = ctrl->val;
1703 		break;
1704 
1705 	case V4L2_CID_DEINTERLACING_MODE:
1706 		ctx->deint_mode = ctrl->val;
1707 		break;
1708 	}
1709 
1710 	return 0;
1711 }
1712 
1713 static const struct v4l2_ctrl_ops fdp1_ctrl_ops = {
1714 	.s_ctrl = fdp1_s_ctrl,
1715 	.g_volatile_ctrl = fdp1_g_ctrl,
1716 };
1717 
1718 static const char * const fdp1_ctrl_deint_menu[] = {
1719 	"Progressive",
1720 	"Adaptive 2D/3D",
1721 	"Fixed 2D",
1722 	"Fixed 3D",
1723 	"Previous field",
1724 	"Next field",
1725 	NULL
1726 };
1727 
1728 static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
1729 	.vidioc_querycap	= fdp1_vidioc_querycap,
1730 
1731 	.vidioc_enum_fmt_vid_cap	= fdp1_enum_fmt_vid_cap,
1732 	.vidioc_enum_fmt_vid_out	= fdp1_enum_fmt_vid_out,
1733 	.vidioc_g_fmt_vid_cap_mplane	= fdp1_g_fmt,
1734 	.vidioc_g_fmt_vid_out_mplane	= fdp1_g_fmt,
1735 	.vidioc_try_fmt_vid_cap_mplane	= fdp1_try_fmt,
1736 	.vidioc_try_fmt_vid_out_mplane	= fdp1_try_fmt,
1737 	.vidioc_s_fmt_vid_cap_mplane	= fdp1_s_fmt,
1738 	.vidioc_s_fmt_vid_out_mplane	= fdp1_s_fmt,
1739 
1740 	.vidioc_reqbufs		= v4l2_m2m_ioctl_reqbufs,
1741 	.vidioc_querybuf	= v4l2_m2m_ioctl_querybuf,
1742 	.vidioc_qbuf		= v4l2_m2m_ioctl_qbuf,
1743 	.vidioc_dqbuf		= v4l2_m2m_ioctl_dqbuf,
1744 	.vidioc_prepare_buf	= v4l2_m2m_ioctl_prepare_buf,
1745 	.vidioc_create_bufs	= v4l2_m2m_ioctl_create_bufs,
1746 	.vidioc_expbuf		= v4l2_m2m_ioctl_expbuf,
1747 
1748 	.vidioc_streamon	= v4l2_m2m_ioctl_streamon,
1749 	.vidioc_streamoff	= v4l2_m2m_ioctl_streamoff,
1750 
1751 	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1752 	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1753 };
1754 
1755 /*
1756  * Queue operations
1757  */
1758 
fdp1_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_ctxs[])1759 static int fdp1_queue_setup(struct vb2_queue *vq,
1760 				unsigned int *nbuffers, unsigned int *nplanes,
1761 				unsigned int sizes[],
1762 				struct device *alloc_ctxs[])
1763 {
1764 	struct fdp1_ctx *ctx = vb2_get_drv_priv(vq);
1765 	struct fdp1_q_data *q_data;
1766 	unsigned int i;
1767 
1768 	q_data = get_q_data(ctx, vq->type);
1769 
1770 	if (*nplanes) {
1771 		if (*nplanes > FDP1_MAX_PLANES)
1772 			return -EINVAL;
1773 
1774 		return 0;
1775 	}
1776 
1777 	*nplanes = q_data->format.num_planes;
1778 
1779 	for (i = 0; i < *nplanes; i++)
1780 		sizes[i] = q_data->format.plane_fmt[i].sizeimage;
1781 
1782 	return 0;
1783 }
1784 
fdp1_buf_prepare_field(struct fdp1_q_data * q_data,struct vb2_v4l2_buffer * vbuf,unsigned int field_num)1785 static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data,
1786 				   struct vb2_v4l2_buffer *vbuf,
1787 				   unsigned int field_num)
1788 {
1789 	struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
1790 	struct fdp1_field_buffer *fbuf = &buf->fields[field_num];
1791 	unsigned int num_fields;
1792 	unsigned int i;
1793 
1794 	num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
1795 
1796 	fbuf->vb = vbuf;
1797 	fbuf->last_field = (field_num + 1) == num_fields;
1798 
1799 	for (i = 0; i < vbuf->vb2_buf.num_planes; ++i)
1800 		fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i);
1801 
1802 	switch (vbuf->field) {
1803 	case V4L2_FIELD_INTERLACED:
1804 		/*
1805 		 * Interlaced means bottom-top for 60Hz TV standards (NTSC) and
1806 		 * top-bottom for 50Hz. As TV standards are not applicable to
1807 		 * the mem-to-mem API, use the height as a heuristic.
1808 		 */
1809 		fbuf->field = (q_data->format.height < 576) == field_num
1810 			    ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
1811 		break;
1812 	case V4L2_FIELD_INTERLACED_TB:
1813 	case V4L2_FIELD_SEQ_TB:
1814 		fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
1815 		break;
1816 	case V4L2_FIELD_INTERLACED_BT:
1817 	case V4L2_FIELD_SEQ_BT:
1818 		fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
1819 		break;
1820 	default:
1821 		fbuf->field = vbuf->field;
1822 		break;
1823 	}
1824 
1825 	/* Buffer is completed */
1826 	if (!field_num)
1827 		return;
1828 
1829 	/* Adjust buffer addresses for second field */
1830 	switch (vbuf->field) {
1831 	case V4L2_FIELD_INTERLACED:
1832 	case V4L2_FIELD_INTERLACED_TB:
1833 	case V4L2_FIELD_INTERLACED_BT:
1834 		for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
1835 			fbuf->addrs[i] +=
1836 				(i == 0 ? q_data->stride_y : q_data->stride_c);
1837 		break;
1838 	case V4L2_FIELD_SEQ_TB:
1839 	case V4L2_FIELD_SEQ_BT:
1840 		for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
1841 			fbuf->addrs[i] += q_data->vsize *
1842 				(i == 0 ? q_data->stride_y : q_data->stride_c);
1843 		break;
1844 	}
1845 }
1846 
fdp1_buf_prepare(struct vb2_buffer * vb)1847 static int fdp1_buf_prepare(struct vb2_buffer *vb)
1848 {
1849 	struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1850 	struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type);
1851 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1852 	struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
1853 	unsigned int i;
1854 
1855 	if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
1856 		bool field_valid = true;
1857 
1858 		/* Validate the buffer field. */
1859 		switch (q_data->format.field) {
1860 		case V4L2_FIELD_NONE:
1861 			if (vbuf->field != V4L2_FIELD_NONE)
1862 				field_valid = false;
1863 			break;
1864 
1865 		case V4L2_FIELD_ALTERNATE:
1866 			if (vbuf->field != V4L2_FIELD_TOP &&
1867 			    vbuf->field != V4L2_FIELD_BOTTOM)
1868 				field_valid = false;
1869 			break;
1870 
1871 		case V4L2_FIELD_INTERLACED:
1872 		case V4L2_FIELD_SEQ_TB:
1873 		case V4L2_FIELD_SEQ_BT:
1874 		case V4L2_FIELD_INTERLACED_TB:
1875 		case V4L2_FIELD_INTERLACED_BT:
1876 			if (vbuf->field != q_data->format.field)
1877 				field_valid = false;
1878 			break;
1879 		}
1880 
1881 		if (!field_valid) {
1882 			dprintk(ctx->fdp1,
1883 				"buffer field %u invalid for format field %u\n",
1884 				vbuf->field, q_data->format.field);
1885 			return -EINVAL;
1886 		}
1887 	} else {
1888 		vbuf->field = V4L2_FIELD_NONE;
1889 	}
1890 
1891 	/* Validate the planes sizes. */
1892 	for (i = 0; i < q_data->format.num_planes; i++) {
1893 		unsigned long size = q_data->format.plane_fmt[i].sizeimage;
1894 
1895 		if (vb2_plane_size(vb, i) < size) {
1896 			dprintk(ctx->fdp1,
1897 				"data will not fit into plane [%u/%u] (%lu < %lu)\n",
1898 				i, q_data->format.num_planes,
1899 				vb2_plane_size(vb, i), size);
1900 			return -EINVAL;
1901 		}
1902 
1903 		/* We have known size formats all around */
1904 		vb2_set_plane_payload(vb, i, size);
1905 	}
1906 
1907 	buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
1908 	for (i = 0; i < buf->num_fields; ++i)
1909 		fdp1_buf_prepare_field(q_data, vbuf, i);
1910 
1911 	return 0;
1912 }
1913 
fdp1_buf_queue(struct vb2_buffer * vb)1914 static void fdp1_buf_queue(struct vb2_buffer *vb)
1915 {
1916 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1917 	struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1918 
1919 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
1920 }
1921 
fdp1_start_streaming(struct vb2_queue * q,unsigned int count)1922 static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
1923 {
1924 	struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
1925 	struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
1926 
1927 	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1928 		/*
1929 		 * Force our deint_mode when we are progressive,
1930 		 * ignoring any setting on the device from the user,
1931 		 * Otherwise, lock in the requested de-interlace mode.
1932 		 */
1933 		if (q_data->format.field == V4L2_FIELD_NONE)
1934 			ctx->deint_mode = FDP1_PROGRESSIVE;
1935 
1936 		if (ctx->deint_mode == FDP1_ADAPT2D3D) {
1937 			u32 stride;
1938 			dma_addr_t smsk_base;
1939 			const u32 bpp = 2; /* bytes per pixel */
1940 
1941 			stride = round_up(q_data->format.width, 8);
1942 
1943 			ctx->smsk_size = bpp * stride * q_data->vsize;
1944 
1945 			ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev,
1946 				ctx->smsk_size, &smsk_base, GFP_KERNEL);
1947 
1948 			if (ctx->smsk_cpu == NULL) {
1949 				dprintk(ctx->fdp1, "Failed to alloc smsk\n");
1950 				return -ENOMEM;
1951 			}
1952 
1953 			ctx->smsk_addr[0] = smsk_base;
1954 			ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2);
1955 		}
1956 	}
1957 
1958 	return 0;
1959 }
1960 
fdp1_stop_streaming(struct vb2_queue * q)1961 static void fdp1_stop_streaming(struct vb2_queue *q)
1962 {
1963 	struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
1964 	struct vb2_v4l2_buffer *vbuf;
1965 	unsigned long flags;
1966 
1967 	while (1) {
1968 		if (V4L2_TYPE_IS_OUTPUT(q->type))
1969 			vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1970 		else
1971 			vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
1972 		if (vbuf == NULL)
1973 			break;
1974 		spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
1975 		v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
1976 		spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
1977 	}
1978 
1979 	/* Empty Output queues */
1980 	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1981 		/* Empty our internal queues */
1982 		struct fdp1_field_buffer *fbuf;
1983 
1984 		/* Free any queued buffers */
1985 		fbuf = fdp1_dequeue_field(ctx);
1986 		while (fbuf != NULL) {
1987 			fdp1_field_complete(ctx, fbuf);
1988 			fbuf = fdp1_dequeue_field(ctx);
1989 		}
1990 
1991 		/* Free smsk_data */
1992 		if (ctx->smsk_cpu) {
1993 			dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
1994 					  ctx->smsk_cpu, ctx->smsk_addr[0]);
1995 			ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
1996 			ctx->smsk_cpu = NULL;
1997 		}
1998 
1999 		WARN(!list_empty(&ctx->fields_queue),
2000 		     "Buffer queue not empty");
2001 	} else {
2002 		/* Empty Capture queues (Jobs) */
2003 		struct fdp1_job *job;
2004 
2005 		job = get_queued_job(ctx->fdp1);
2006 		while (job) {
2007 			if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
2008 				fdp1_field_complete(ctx, job->previous);
2009 			else
2010 				fdp1_field_complete(ctx, job->active);
2011 
2012 			v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR);
2013 			job->dst = NULL;
2014 
2015 			job = get_queued_job(ctx->fdp1);
2016 		}
2017 
2018 		/* Free any held buffer in the ctx */
2019 		fdp1_field_complete(ctx, ctx->previous);
2020 
2021 		WARN(!list_empty(&ctx->fdp1->queued_job_list),
2022 		     "Queued Job List not empty");
2023 
2024 		WARN(!list_empty(&ctx->fdp1->hw_job_list),
2025 		     "HW Job list not empty");
2026 	}
2027 }
2028 
2029 static const struct vb2_ops fdp1_qops = {
2030 	.queue_setup	 = fdp1_queue_setup,
2031 	.buf_prepare	 = fdp1_buf_prepare,
2032 	.buf_queue	 = fdp1_buf_queue,
2033 	.start_streaming = fdp1_start_streaming,
2034 	.stop_streaming  = fdp1_stop_streaming,
2035 	.wait_prepare	 = vb2_ops_wait_prepare,
2036 	.wait_finish	 = vb2_ops_wait_finish,
2037 };
2038 
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)2039 static int queue_init(void *priv, struct vb2_queue *src_vq,
2040 		      struct vb2_queue *dst_vq)
2041 {
2042 	struct fdp1_ctx *ctx = priv;
2043 	int ret;
2044 
2045 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
2046 	src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
2047 	src_vq->drv_priv = ctx;
2048 	src_vq->buf_struct_size = sizeof(struct fdp1_buffer);
2049 	src_vq->ops = &fdp1_qops;
2050 	src_vq->mem_ops = &vb2_dma_contig_memops;
2051 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
2052 	src_vq->lock = &ctx->fdp1->dev_mutex;
2053 	src_vq->dev = ctx->fdp1->dev;
2054 
2055 	ret = vb2_queue_init(src_vq);
2056 	if (ret)
2057 		return ret;
2058 
2059 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2060 	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
2061 	dst_vq->drv_priv = ctx;
2062 	dst_vq->buf_struct_size = sizeof(struct fdp1_buffer);
2063 	dst_vq->ops = &fdp1_qops;
2064 	dst_vq->mem_ops = &vb2_dma_contig_memops;
2065 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
2066 	dst_vq->lock = &ctx->fdp1->dev_mutex;
2067 	dst_vq->dev = ctx->fdp1->dev;
2068 
2069 	return vb2_queue_init(dst_vq);
2070 }
2071 
2072 /*
2073  * File operations
2074  */
fdp1_open(struct file * file)2075 static int fdp1_open(struct file *file)
2076 {
2077 	struct fdp1_dev *fdp1 = video_drvdata(file);
2078 	struct v4l2_pix_format_mplane format;
2079 	struct fdp1_ctx *ctx = NULL;
2080 	struct v4l2_ctrl *ctrl;
2081 	int ret = 0;
2082 
2083 	if (mutex_lock_interruptible(&fdp1->dev_mutex))
2084 		return -ERESTARTSYS;
2085 
2086 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2087 	if (!ctx) {
2088 		ret = -ENOMEM;
2089 		goto done;
2090 	}
2091 
2092 	v4l2_fh_init(&ctx->fh, video_devdata(file));
2093 	file->private_data = &ctx->fh;
2094 	ctx->fdp1 = fdp1;
2095 
2096 	/* Initialise Queues */
2097 	INIT_LIST_HEAD(&ctx->fields_queue);
2098 
2099 	ctx->translen = 1;
2100 	ctx->sequence = 0;
2101 
2102 	/* Initialise controls */
2103 
2104 	v4l2_ctrl_handler_init(&ctx->hdl, 3);
2105 	v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops,
2106 				     V4L2_CID_DEINTERLACING_MODE,
2107 				     FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D,
2108 				     fdp1_ctrl_deint_menu);
2109 
2110 	ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
2111 				 V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
2112 	if (ctrl)
2113 		ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
2114 
2115 	v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
2116 			  V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
2117 
2118 	if (ctx->hdl.error) {
2119 		ret = ctx->hdl.error;
2120 		v4l2_ctrl_handler_free(&ctx->hdl);
2121 		kfree(ctx);
2122 		goto done;
2123 	}
2124 
2125 	ctx->fh.ctrl_handler = &ctx->hdl;
2126 	v4l2_ctrl_handler_setup(&ctx->hdl);
2127 
2128 	/* Configure default parameters. */
2129 	memset(&format, 0, sizeof(format));
2130 	fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
2131 
2132 	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init);
2133 
2134 	if (IS_ERR(ctx->fh.m2m_ctx)) {
2135 		ret = PTR_ERR(ctx->fh.m2m_ctx);
2136 
2137 		v4l2_ctrl_handler_free(&ctx->hdl);
2138 		kfree(ctx);
2139 		goto done;
2140 	}
2141 
2142 	/* Perform any power management required */
2143 	pm_runtime_get_sync(fdp1->dev);
2144 
2145 	v4l2_fh_add(&ctx->fh);
2146 
2147 	dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
2148 		ctx, ctx->fh.m2m_ctx);
2149 
2150 done:
2151 	mutex_unlock(&fdp1->dev_mutex);
2152 	return ret;
2153 }
2154 
fdp1_release(struct file * file)2155 static int fdp1_release(struct file *file)
2156 {
2157 	struct fdp1_dev *fdp1 = video_drvdata(file);
2158 	struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
2159 
2160 	dprintk(fdp1, "Releasing instance %p\n", ctx);
2161 
2162 	v4l2_fh_del(&ctx->fh);
2163 	v4l2_fh_exit(&ctx->fh);
2164 	v4l2_ctrl_handler_free(&ctx->hdl);
2165 	mutex_lock(&fdp1->dev_mutex);
2166 	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
2167 	mutex_unlock(&fdp1->dev_mutex);
2168 	kfree(ctx);
2169 
2170 	pm_runtime_put(fdp1->dev);
2171 
2172 	return 0;
2173 }
2174 
2175 static const struct v4l2_file_operations fdp1_fops = {
2176 	.owner		= THIS_MODULE,
2177 	.open		= fdp1_open,
2178 	.release	= fdp1_release,
2179 	.poll		= v4l2_m2m_fop_poll,
2180 	.unlocked_ioctl	= video_ioctl2,
2181 	.mmap		= v4l2_m2m_fop_mmap,
2182 };
2183 
2184 static const struct video_device fdp1_videodev = {
2185 	.name		= DRIVER_NAME,
2186 	.vfl_dir	= VFL_DIR_M2M,
2187 	.fops		= &fdp1_fops,
2188 	.device_caps	= V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
2189 	.ioctl_ops	= &fdp1_ioctl_ops,
2190 	.minor		= -1,
2191 	.release	= video_device_release_empty,
2192 };
2193 
2194 static const struct v4l2_m2m_ops m2m_ops = {
2195 	.device_run	= fdp1_m2m_device_run,
2196 	.job_ready	= fdp1_m2m_job_ready,
2197 	.job_abort	= fdp1_m2m_job_abort,
2198 };
2199 
fdp1_irq_handler(int irq,void * dev_id)2200 static irqreturn_t fdp1_irq_handler(int irq, void *dev_id)
2201 {
2202 	struct fdp1_dev *fdp1 = dev_id;
2203 	u32 int_status;
2204 	u32 ctl_status;
2205 	u32 vint_cnt;
2206 	u32 cycles;
2207 
2208 	int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA);
2209 	cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT);
2210 	ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS);
2211 	vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >>
2212 			FD1_CTL_STATUS_VINT_CNT_SHIFT;
2213 
2214 	/* Clear interrupts */
2215 	fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA);
2216 
2217 	if (debug >= 2) {
2218 		dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status,
2219 			int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]",
2220 			int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]",
2221 			int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]");
2222 
2223 		dprintk(fdp1, "CycleStatus = %d (%dms)\n",
2224 			cycles, cycles/(fdp1->clk_rate/1000));
2225 
2226 		dprintk(fdp1,
2227 			"Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n",
2228 			ctl_status, vint_cnt,
2229 			ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "",
2230 			ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "",
2231 			ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "",
2232 			ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : "");
2233 		dprintk(fdp1, "***********************************\n");
2234 	}
2235 
2236 	/* Spurious interrupt */
2237 	if (!(FD1_CTL_IRQ_MASK & int_status))
2238 		return IRQ_NONE;
2239 
2240 	/* Work completed, release the frame */
2241 	if (FD1_CTL_IRQ_VERE & int_status)
2242 		device_frame_end(fdp1, VB2_BUF_STATE_ERROR);
2243 	else if (FD1_CTL_IRQ_FREE & int_status)
2244 		device_frame_end(fdp1, VB2_BUF_STATE_DONE);
2245 
2246 	return IRQ_HANDLED;
2247 }
2248 
fdp1_probe(struct platform_device * pdev)2249 static int fdp1_probe(struct platform_device *pdev)
2250 {
2251 	struct fdp1_dev *fdp1;
2252 	struct video_device *vfd;
2253 	struct device_node *fcp_node;
2254 	struct resource *res;
2255 	struct clk *clk;
2256 	unsigned int i;
2257 
2258 	int ret;
2259 	int hw_version;
2260 
2261 	fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL);
2262 	if (!fdp1)
2263 		return -ENOMEM;
2264 
2265 	INIT_LIST_HEAD(&fdp1->free_job_list);
2266 	INIT_LIST_HEAD(&fdp1->queued_job_list);
2267 	INIT_LIST_HEAD(&fdp1->hw_job_list);
2268 
2269 	/* Initialise the jobs on the free list */
2270 	for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++)
2271 		list_add(&fdp1->jobs[i].list, &fdp1->free_job_list);
2272 
2273 	mutex_init(&fdp1->dev_mutex);
2274 
2275 	spin_lock_init(&fdp1->irqlock);
2276 	spin_lock_init(&fdp1->device_process_lock);
2277 	fdp1->dev = &pdev->dev;
2278 	platform_set_drvdata(pdev, fdp1);
2279 
2280 	/* Memory-mapped registers */
2281 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2282 	fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
2283 	if (IS_ERR(fdp1->regs))
2284 		return PTR_ERR(fdp1->regs);
2285 
2286 	/* Interrupt service routine registration */
2287 	fdp1->irq = ret = platform_get_irq(pdev, 0);
2288 	if (ret < 0) {
2289 		dev_err(&pdev->dev, "cannot find IRQ\n");
2290 		return ret;
2291 	}
2292 
2293 	ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
2294 			       dev_name(&pdev->dev), fdp1);
2295 	if (ret) {
2296 		dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq);
2297 		return ret;
2298 	}
2299 
2300 	/* FCP */
2301 	fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
2302 	if (fcp_node) {
2303 		fdp1->fcp = rcar_fcp_get(fcp_node);
2304 		of_node_put(fcp_node);
2305 		if (IS_ERR(fdp1->fcp)) {
2306 			dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
2307 				PTR_ERR(fdp1->fcp));
2308 			return PTR_ERR(fdp1->fcp);
2309 		}
2310 	}
2311 
2312 	/* Determine our clock rate */
2313 	clk = clk_get(&pdev->dev, NULL);
2314 	if (IS_ERR(clk))
2315 		return PTR_ERR(clk);
2316 
2317 	fdp1->clk_rate = clk_get_rate(clk);
2318 	clk_put(clk);
2319 
2320 	/* V4L2 device registration */
2321 	ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
2322 	if (ret) {
2323 		v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
2324 		return ret;
2325 	}
2326 
2327 	/* M2M registration */
2328 	fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops);
2329 	if (IS_ERR(fdp1->m2m_dev)) {
2330 		v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n");
2331 		ret = PTR_ERR(fdp1->m2m_dev);
2332 		goto unreg_dev;
2333 	}
2334 
2335 	/* Video registration */
2336 	fdp1->vfd = fdp1_videodev;
2337 	vfd = &fdp1->vfd;
2338 	vfd->lock = &fdp1->dev_mutex;
2339 	vfd->v4l2_dev = &fdp1->v4l2_dev;
2340 	video_set_drvdata(vfd, fdp1);
2341 	strscpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
2342 
2343 	ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
2344 	if (ret) {
2345 		v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
2346 		goto release_m2m;
2347 	}
2348 
2349 	v4l2_info(&fdp1->v4l2_dev, "Device registered as /dev/video%d\n",
2350 		  vfd->num);
2351 
2352 	/* Power up the cells to read HW */
2353 	pm_runtime_enable(&pdev->dev);
2354 	pm_runtime_get_sync(fdp1->dev);
2355 
2356 	hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
2357 	switch (hw_version) {
2358 	case FD1_IP_H3_ES1:
2359 		dprintk(fdp1, "FDP1 Version R-Car H3 ES1\n");
2360 		break;
2361 	case FD1_IP_M3W:
2362 		dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
2363 		break;
2364 	case FD1_IP_H3:
2365 		dprintk(fdp1, "FDP1 Version R-Car H3\n");
2366 		break;
2367 	case FD1_IP_M3N:
2368 		dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
2369 		break;
2370 	case FD1_IP_E3:
2371 		dprintk(fdp1, "FDP1 Version R-Car E3\n");
2372 		break;
2373 	default:
2374 		dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
2375 			hw_version);
2376 	}
2377 
2378 	/* Allow the hw to sleep until an open call puts it to use */
2379 	pm_runtime_put(fdp1->dev);
2380 
2381 	return 0;
2382 
2383 release_m2m:
2384 	v4l2_m2m_release(fdp1->m2m_dev);
2385 
2386 unreg_dev:
2387 	v4l2_device_unregister(&fdp1->v4l2_dev);
2388 
2389 	return ret;
2390 }
2391 
fdp1_remove(struct platform_device * pdev)2392 static int fdp1_remove(struct platform_device *pdev)
2393 {
2394 	struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
2395 
2396 	v4l2_m2m_release(fdp1->m2m_dev);
2397 	video_unregister_device(&fdp1->vfd);
2398 	v4l2_device_unregister(&fdp1->v4l2_dev);
2399 	pm_runtime_disable(&pdev->dev);
2400 
2401 	return 0;
2402 }
2403 
fdp1_pm_runtime_suspend(struct device * dev)2404 static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
2405 {
2406 	struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
2407 
2408 	rcar_fcp_disable(fdp1->fcp);
2409 
2410 	return 0;
2411 }
2412 
fdp1_pm_runtime_resume(struct device * dev)2413 static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
2414 {
2415 	struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
2416 
2417 	/* Program in the static LUTs */
2418 	fdp1_set_lut(fdp1);
2419 
2420 	return rcar_fcp_enable(fdp1->fcp);
2421 }
2422 
2423 static const struct dev_pm_ops fdp1_pm_ops = {
2424 	SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
2425 			   fdp1_pm_runtime_resume,
2426 			   NULL)
2427 };
2428 
2429 static const struct of_device_id fdp1_dt_ids[] = {
2430 	{ .compatible = "renesas,fdp1" },
2431 	{ },
2432 };
2433 MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
2434 
2435 static struct platform_driver fdp1_pdrv = {
2436 	.probe		= fdp1_probe,
2437 	.remove		= fdp1_remove,
2438 	.driver		= {
2439 		.name	= DRIVER_NAME,
2440 		.of_match_table = fdp1_dt_ids,
2441 		.pm	= &fdp1_pm_ops,
2442 	},
2443 };
2444 
2445 module_platform_driver(fdp1_pdrv);
2446 
2447 MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver");
2448 MODULE_AUTHOR("Kieran Bingham <kieran@bingham.xyz>");
2449 MODULE_LICENSE("GPL");
2450 MODULE_ALIAS("platform:" DRIVER_NAME);
2451