xref: /linux/drivers/gpu/drm/vc4/vc4_drv.h (revision 84b9b44b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015 Broadcom
4  */
5 #ifndef _VC4_DRV_H_
6 #define _VC4_DRV_H_
7 
8 #include <linux/delay.h>
9 #include <linux/of.h>
10 #include <linux/refcount.h>
11 #include <linux/uaccess.h>
12 
13 #include <drm/drm_atomic.h>
14 #include <drm/drm_debugfs.h>
15 #include <drm/drm_device.h>
16 #include <drm/drm_encoder.h>
17 #include <drm/drm_gem_dma_helper.h>
18 #include <drm/drm_managed.h>
19 #include <drm/drm_mm.h>
20 #include <drm/drm_modeset_lock.h>
21 
22 #include <kunit/test-bug.h>
23 
24 #include "uapi/drm/vc4_drm.h"
25 
26 struct drm_device;
27 struct drm_gem_object;
28 
29 extern const struct drm_driver vc4_drm_driver;
30 extern const struct drm_driver vc5_drm_driver;
31 
32 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
33  * this.
34  */
35 enum vc4_kernel_bo_type {
36 	/* Any kernel allocation (gem_create_object hook) before it
37 	 * gets another type set.
38 	 */
39 	VC4_BO_TYPE_KERNEL,
40 	VC4_BO_TYPE_V3D,
41 	VC4_BO_TYPE_V3D_SHADER,
42 	VC4_BO_TYPE_DUMB,
43 	VC4_BO_TYPE_BIN,
44 	VC4_BO_TYPE_RCL,
45 	VC4_BO_TYPE_BCL,
46 	VC4_BO_TYPE_KERNEL_CACHE,
47 	VC4_BO_TYPE_COUNT
48 };
49 
50 /* Performance monitor object. The perform lifetime is controlled by userspace
51  * using perfmon related ioctls. A perfmon can be attached to a submit_cl
52  * request, and when this is the case, HW perf counters will be activated just
53  * before the submit_cl is submitted to the GPU and disabled when the job is
54  * done. This way, only events related to a specific job will be counted.
55  */
56 struct vc4_perfmon {
57 	struct vc4_dev *dev;
58 
59 	/* Tracks the number of users of the perfmon, when this counter reaches
60 	 * zero the perfmon is destroyed.
61 	 */
62 	refcount_t refcnt;
63 
64 	/* Number of counters activated in this perfmon instance
65 	 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
66 	 */
67 	u8 ncounters;
68 
69 	/* Events counted by the HW perf counters. */
70 	u8 events[DRM_VC4_MAX_PERF_COUNTERS];
71 
72 	/* Storage for counter values. Counters are incremented by the HW
73 	 * perf counter values every time the perfmon is attached to a GPU job.
74 	 * This way, perfmon users don't have to retrieve the results after
75 	 * each job if they want to track events covering several submissions.
76 	 * Note that counter values can't be reset, but you can fake a reset by
77 	 * destroying the perfmon and creating a new one.
78 	 */
79 	u64 counters[];
80 };
81 
82 struct vc4_dev {
83 	struct drm_device base;
84 	struct device *dev;
85 
86 	bool is_vc5;
87 
88 	unsigned int irq;
89 
90 	struct vc4_hvs *hvs;
91 	struct vc4_v3d *v3d;
92 
93 	struct vc4_hang_state *hang_state;
94 
95 	/* The kernel-space BO cache.  Tracks buffers that have been
96 	 * unreferenced by all other users (refcounts of 0!) but not
97 	 * yet freed, so we can do cheap allocations.
98 	 */
99 	struct vc4_bo_cache {
100 		/* Array of list heads for entries in the BO cache,
101 		 * based on number of pages, so we can do O(1) lookups
102 		 * in the cache when allocating.
103 		 */
104 		struct list_head *size_list;
105 		uint32_t size_list_size;
106 
107 		/* List of all BOs in the cache, ordered by age, so we
108 		 * can do O(1) lookups when trying to free old
109 		 * buffers.
110 		 */
111 		struct list_head time_list;
112 		struct work_struct time_work;
113 		struct timer_list time_timer;
114 	} bo_cache;
115 
116 	u32 num_labels;
117 	struct vc4_label {
118 		const char *name;
119 		u32 num_allocated;
120 		u32 size_allocated;
121 	} *bo_labels;
122 
123 	/* Protects bo_cache and bo_labels. */
124 	struct mutex bo_lock;
125 
126 	/* Purgeable BO pool. All BOs in this pool can have their memory
127 	 * reclaimed if the driver is unable to allocate new BOs. We also
128 	 * keep stats related to the purge mechanism here.
129 	 */
130 	struct {
131 		struct list_head list;
132 		unsigned int num;
133 		size_t size;
134 		unsigned int purged_num;
135 		size_t purged_size;
136 		struct mutex lock;
137 	} purgeable;
138 
139 	uint64_t dma_fence_context;
140 
141 	/* Sequence number for the last job queued in bin_job_list.
142 	 * Starts at 0 (no jobs emitted).
143 	 */
144 	uint64_t emit_seqno;
145 
146 	/* Sequence number for the last completed job on the GPU.
147 	 * Starts at 0 (no jobs completed).
148 	 */
149 	uint64_t finished_seqno;
150 
151 	/* List of all struct vc4_exec_info for jobs to be executed in
152 	 * the binner.  The first job in the list is the one currently
153 	 * programmed into ct0ca for execution.
154 	 */
155 	struct list_head bin_job_list;
156 
157 	/* List of all struct vc4_exec_info for jobs that have
158 	 * completed binning and are ready for rendering.  The first
159 	 * job in the list is the one currently programmed into ct1ca
160 	 * for execution.
161 	 */
162 	struct list_head render_job_list;
163 
164 	/* List of the finished vc4_exec_infos waiting to be freed by
165 	 * job_done_work.
166 	 */
167 	struct list_head job_done_list;
168 	/* Spinlock used to synchronize the job_list and seqno
169 	 * accesses between the IRQ handler and GEM ioctls.
170 	 */
171 	spinlock_t job_lock;
172 	wait_queue_head_t job_wait_queue;
173 	struct work_struct job_done_work;
174 
175 	/* Used to track the active perfmon if any. Access to this field is
176 	 * protected by job_lock.
177 	 */
178 	struct vc4_perfmon *active_perfmon;
179 
180 	/* List of struct vc4_seqno_cb for callbacks to be made from a
181 	 * workqueue when the given seqno is passed.
182 	 */
183 	struct list_head seqno_cb_list;
184 
185 	/* The memory used for storing binner tile alloc, tile state,
186 	 * and overflow memory allocations.  This is freed when V3D
187 	 * powers down.
188 	 */
189 	struct vc4_bo *bin_bo;
190 
191 	/* Size of blocks allocated within bin_bo. */
192 	uint32_t bin_alloc_size;
193 
194 	/* Bitmask of the bin_alloc_size chunks in bin_bo that are
195 	 * used.
196 	 */
197 	uint32_t bin_alloc_used;
198 
199 	/* Bitmask of the current bin_alloc used for overflow memory. */
200 	uint32_t bin_alloc_overflow;
201 
202 	/* Incremented when an underrun error happened after an atomic commit.
203 	 * This is particularly useful to detect when a specific modeset is too
204 	 * demanding in term of memory or HVS bandwidth which is hard to guess
205 	 * at atomic check time.
206 	 */
207 	atomic_t underrun;
208 
209 	struct work_struct overflow_mem_work;
210 
211 	int power_refcount;
212 
213 	/* Set to true when the load tracker is active. */
214 	bool load_tracker_enabled;
215 
216 	/* Mutex controlling the power refcount. */
217 	struct mutex power_lock;
218 
219 	struct {
220 		struct timer_list timer;
221 		struct work_struct reset_work;
222 	} hangcheck;
223 
224 	struct drm_modeset_lock ctm_state_lock;
225 	struct drm_private_obj ctm_manager;
226 	struct drm_private_obj hvs_channels;
227 	struct drm_private_obj load_tracker;
228 
229 	/* Mutex for binner bo allocation. */
230 	struct mutex bin_bo_lock;
231 	/* Reference count for our binner bo. */
232 	struct kref bin_bo_kref;
233 };
234 
235 static inline struct vc4_dev *
236 to_vc4_dev(const struct drm_device *dev)
237 {
238 	return container_of(dev, struct vc4_dev, base);
239 }
240 
241 struct vc4_bo {
242 	struct drm_gem_dma_object base;
243 
244 	/* seqno of the last job to render using this BO. */
245 	uint64_t seqno;
246 
247 	/* seqno of the last job to use the RCL to write to this BO.
248 	 *
249 	 * Note that this doesn't include binner overflow memory
250 	 * writes.
251 	 */
252 	uint64_t write_seqno;
253 
254 	bool t_format;
255 
256 	/* List entry for the BO's position in either
257 	 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
258 	 */
259 	struct list_head unref_head;
260 
261 	/* Time in jiffies when the BO was put in vc4->bo_cache. */
262 	unsigned long free_time;
263 
264 	/* List entry for the BO's position in vc4_dev->bo_cache.size_list */
265 	struct list_head size_head;
266 
267 	/* Struct for shader validation state, if created by
268 	 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
269 	 */
270 	struct vc4_validated_shader_info *validated_shader;
271 
272 	/* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
273 	 * for user-allocated labels.
274 	 */
275 	int label;
276 
277 	/* Count the number of active users. This is needed to determine
278 	 * whether we can move the BO to the purgeable list or not (when the BO
279 	 * is used by the GPU or the display engine we can't purge it).
280 	 */
281 	refcount_t usecnt;
282 
283 	/* Store purgeable/purged state here */
284 	u32 madv;
285 	struct mutex madv_lock;
286 };
287 
288 static inline struct vc4_bo *
289 to_vc4_bo(const struct drm_gem_object *bo)
290 {
291 	return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base);
292 }
293 
294 struct vc4_fence {
295 	struct dma_fence base;
296 	struct drm_device *dev;
297 	/* vc4 seqno for signaled() test */
298 	uint64_t seqno;
299 };
300 
301 static inline struct vc4_fence *
302 to_vc4_fence(const struct dma_fence *fence)
303 {
304 	return container_of(fence, struct vc4_fence, base);
305 }
306 
307 struct vc4_seqno_cb {
308 	struct work_struct work;
309 	uint64_t seqno;
310 	void (*func)(struct vc4_seqno_cb *cb);
311 };
312 
313 struct vc4_v3d {
314 	struct vc4_dev *vc4;
315 	struct platform_device *pdev;
316 	void __iomem *regs;
317 	struct clk *clk;
318 	struct debugfs_regset32 regset;
319 };
320 
321 struct vc4_hvs {
322 	struct vc4_dev *vc4;
323 	struct platform_device *pdev;
324 	void __iomem *regs;
325 	u32 __iomem *dlist;
326 
327 	struct clk *core_clk;
328 
329 	unsigned long max_core_rate;
330 
331 	/* Memory manager for CRTCs to allocate space in the display
332 	 * list.  Units are dwords.
333 	 */
334 	struct drm_mm dlist_mm;
335 	/* Memory manager for the LBM memory used by HVS scaling. */
336 	struct drm_mm lbm_mm;
337 	spinlock_t mm_lock;
338 
339 	struct drm_mm_node mitchell_netravali_filter;
340 
341 	struct debugfs_regset32 regset;
342 
343 	/*
344 	 * Even if HDMI0 on the RPi4 can output modes requiring a pixel
345 	 * rate higher than 297MHz, it needs some adjustments in the
346 	 * config.txt file to be able to do so and thus won't always be
347 	 * available.
348 	 */
349 	bool vc5_hdmi_enable_hdmi_20;
350 
351 	/*
352 	 * 4096x2160@60 requires a core overclock to work, so register
353 	 * whether that is sufficient.
354 	 */
355 	bool vc5_hdmi_enable_4096by2160;
356 };
357 
358 #define HVS_NUM_CHANNELS 3
359 
360 struct vc4_hvs_state {
361 	struct drm_private_state base;
362 	unsigned long core_clock_rate;
363 
364 	struct {
365 		unsigned in_use: 1;
366 		unsigned long fifo_load;
367 		struct drm_crtc_commit *pending_commit;
368 	} fifo_state[HVS_NUM_CHANNELS];
369 };
370 
371 static inline struct vc4_hvs_state *
372 to_vc4_hvs_state(const struct drm_private_state *priv)
373 {
374 	return container_of(priv, struct vc4_hvs_state, base);
375 }
376 
377 struct vc4_hvs_state *vc4_hvs_get_global_state(struct drm_atomic_state *state);
378 struct vc4_hvs_state *vc4_hvs_get_old_global_state(const struct drm_atomic_state *state);
379 struct vc4_hvs_state *vc4_hvs_get_new_global_state(const struct drm_atomic_state *state);
380 
381 struct vc4_plane {
382 	struct drm_plane base;
383 };
384 
385 static inline struct vc4_plane *
386 to_vc4_plane(const struct drm_plane *plane)
387 {
388 	return container_of(plane, struct vc4_plane, base);
389 }
390 
391 enum vc4_scaling_mode {
392 	VC4_SCALING_NONE,
393 	VC4_SCALING_TPZ,
394 	VC4_SCALING_PPF,
395 };
396 
397 struct vc4_plane_state {
398 	struct drm_plane_state base;
399 	/* System memory copy of the display list for this element, computed
400 	 * at atomic_check time.
401 	 */
402 	u32 *dlist;
403 	u32 dlist_size; /* Number of dwords allocated for the display list */
404 	u32 dlist_count; /* Number of used dwords in the display list. */
405 
406 	/* Offset in the dlist to various words, for pageflip or
407 	 * cursor updates.
408 	 */
409 	u32 pos0_offset;
410 	u32 pos2_offset;
411 	u32 ptr0_offset;
412 	u32 lbm_offset;
413 
414 	/* Offset where the plane's dlist was last stored in the
415 	 * hardware at vc4_crtc_atomic_flush() time.
416 	 */
417 	u32 __iomem *hw_dlist;
418 
419 	/* Clipped coordinates of the plane on the display. */
420 	int crtc_x, crtc_y, crtc_w, crtc_h;
421 	/* Clipped area being scanned from in the FB. */
422 	u32 src_x, src_y;
423 
424 	u32 src_w[2], src_h[2];
425 
426 	/* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
427 	enum vc4_scaling_mode x_scaling[2], y_scaling[2];
428 	bool is_unity;
429 	bool is_yuv;
430 
431 	/* Offset to start scanning out from the start of the plane's
432 	 * BO.
433 	 */
434 	u32 offsets[3];
435 
436 	/* Our allocation in LBM for temporary storage during scaling. */
437 	struct drm_mm_node lbm;
438 
439 	/* Set when the plane has per-pixel alpha content or does not cover
440 	 * the entire screen. This is a hint to the CRTC that it might need
441 	 * to enable background color fill.
442 	 */
443 	bool needs_bg_fill;
444 
445 	/* Mark the dlist as initialized. Useful to avoid initializing it twice
446 	 * when async update is not possible.
447 	 */
448 	bool dlist_initialized;
449 
450 	/* Load of this plane on the HVS block. The load is expressed in HVS
451 	 * cycles/sec.
452 	 */
453 	u64 hvs_load;
454 
455 	/* Memory bandwidth needed for this plane. This is expressed in
456 	 * bytes/sec.
457 	 */
458 	u64 membus_load;
459 };
460 
461 static inline struct vc4_plane_state *
462 to_vc4_plane_state(const struct drm_plane_state *state)
463 {
464 	return container_of(state, struct vc4_plane_state, base);
465 }
466 
467 enum vc4_encoder_type {
468 	VC4_ENCODER_TYPE_NONE,
469 	VC4_ENCODER_TYPE_HDMI0,
470 	VC4_ENCODER_TYPE_HDMI1,
471 	VC4_ENCODER_TYPE_VEC,
472 	VC4_ENCODER_TYPE_DSI0,
473 	VC4_ENCODER_TYPE_DSI1,
474 	VC4_ENCODER_TYPE_SMI,
475 	VC4_ENCODER_TYPE_DPI,
476 	VC4_ENCODER_TYPE_TXP,
477 };
478 
479 struct vc4_encoder {
480 	struct drm_encoder base;
481 	enum vc4_encoder_type type;
482 	u32 clock_select;
483 
484 	void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state);
485 	void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
486 	void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
487 
488 	void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state);
489 	void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state);
490 };
491 
492 static inline struct vc4_encoder *
493 to_vc4_encoder(const struct drm_encoder *encoder)
494 {
495 	return container_of(encoder, struct vc4_encoder, base);
496 }
497 
498 static inline
499 struct drm_encoder *vc4_find_encoder_by_type(struct drm_device *drm,
500 					     enum vc4_encoder_type type)
501 {
502 	struct drm_encoder *encoder;
503 
504 	drm_for_each_encoder(encoder, drm) {
505 		struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
506 
507 		if (vc4_encoder->type == type)
508 			return encoder;
509 	}
510 
511 	return NULL;
512 }
513 
514 struct vc4_crtc_data {
515 	const char *name;
516 
517 	const char *debugfs_name;
518 
519 	/* Bitmask of channels (FIFOs) of the HVS that the output can source from */
520 	unsigned int hvs_available_channels;
521 
522 	/* Which output of the HVS this pixelvalve sources from. */
523 	int hvs_output;
524 };
525 
526 extern const struct vc4_crtc_data vc4_txp_crtc_data;
527 
528 struct vc4_pv_data {
529 	struct vc4_crtc_data	base;
530 
531 	/* Depth of the PixelValve FIFO in bytes */
532 	unsigned int fifo_depth;
533 
534 	/* Number of pixels output per clock period */
535 	u8 pixels_per_clock;
536 
537 	enum vc4_encoder_type encoder_types[4];
538 };
539 
540 extern const struct vc4_pv_data bcm2835_pv0_data;
541 extern const struct vc4_pv_data bcm2835_pv1_data;
542 extern const struct vc4_pv_data bcm2835_pv2_data;
543 extern const struct vc4_pv_data bcm2711_pv0_data;
544 extern const struct vc4_pv_data bcm2711_pv1_data;
545 extern const struct vc4_pv_data bcm2711_pv2_data;
546 extern const struct vc4_pv_data bcm2711_pv3_data;
547 extern const struct vc4_pv_data bcm2711_pv4_data;
548 
549 struct vc4_crtc {
550 	struct drm_crtc base;
551 	struct platform_device *pdev;
552 	const struct vc4_crtc_data *data;
553 	void __iomem *regs;
554 
555 	/* Timestamp at start of vblank irq - unaffected by lock delays. */
556 	ktime_t t_vblank;
557 
558 	u8 lut_r[256];
559 	u8 lut_g[256];
560 	u8 lut_b[256];
561 
562 	struct drm_pending_vblank_event *event;
563 
564 	struct debugfs_regset32 regset;
565 
566 	/**
567 	 * @feeds_txp: True if the CRTC feeds our writeback controller.
568 	 */
569 	bool feeds_txp;
570 
571 	/**
572 	 * @irq_lock: Spinlock protecting the resources shared between
573 	 * the atomic code and our vblank handler.
574 	 */
575 	spinlock_t irq_lock;
576 
577 	/**
578 	 * @current_dlist: Start offset of the display list currently
579 	 * set in the HVS for that CRTC. Protected by @irq_lock, and
580 	 * copied in vc4_hvs_update_dlist() for the CRTC interrupt
581 	 * handler to have access to that value.
582 	 */
583 	unsigned int current_dlist;
584 
585 	/**
586 	 * @current_hvs_channel: HVS channel currently assigned to the
587 	 * CRTC. Protected by @irq_lock, and copied in
588 	 * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have
589 	 * access to that value.
590 	 */
591 	unsigned int current_hvs_channel;
592 };
593 
594 static inline struct vc4_crtc *
595 to_vc4_crtc(const struct drm_crtc *crtc)
596 {
597 	return container_of(crtc, struct vc4_crtc, base);
598 }
599 
600 static inline const struct vc4_crtc_data *
601 vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc)
602 {
603 	return crtc->data;
604 }
605 
606 static inline const struct vc4_pv_data *
607 vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc)
608 {
609 	const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc);
610 
611 	return container_of(data, struct vc4_pv_data, base);
612 }
613 
614 struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
615 					 struct drm_crtc_state *state);
616 
617 struct vc4_crtc_state {
618 	struct drm_crtc_state base;
619 	/* Dlist area for this CRTC configuration. */
620 	struct drm_mm_node mm;
621 	bool txp_armed;
622 	unsigned int assigned_channel;
623 
624 	struct {
625 		unsigned int left;
626 		unsigned int right;
627 		unsigned int top;
628 		unsigned int bottom;
629 	} margins;
630 
631 	unsigned long hvs_load;
632 
633 	/* Transitional state below, only valid during atomic commits */
634 	bool update_muxing;
635 };
636 
637 #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
638 
639 static inline struct vc4_crtc_state *
640 to_vc4_crtc_state(const struct drm_crtc_state *crtc_state)
641 {
642 	return container_of(crtc_state, struct vc4_crtc_state, base);
643 }
644 
645 #define V3D_READ(offset)								\
646 	({										\
647 		kunit_fail_current_test("Accessing a register in a unit test!\n");	\
648 		readl(vc4->v3d->regs + (offset));						\
649 	})
650 
651 #define V3D_WRITE(offset, val)								\
652 	do {										\
653 		kunit_fail_current_test("Accessing a register in a unit test!\n");	\
654 		writel(val, vc4->v3d->regs + (offset));					\
655 	} while (0)
656 
657 #define HVS_READ(offset)								\
658 	({										\
659 		kunit_fail_current_test("Accessing a register in a unit test!\n");	\
660 		readl(hvs->regs + (offset));						\
661 	})
662 
663 #define HVS_WRITE(offset, val)								\
664 	do {										\
665 		kunit_fail_current_test("Accessing a register in a unit test!\n");	\
666 		writel(val, hvs->regs + (offset));					\
667 	} while (0)
668 
669 #define VC4_REG32(reg) { .name = #reg, .offset = reg }
670 
671 struct vc4_exec_info {
672 	struct vc4_dev *dev;
673 
674 	/* Sequence number for this bin/render job. */
675 	uint64_t seqno;
676 
677 	/* Latest write_seqno of any BO that binning depends on. */
678 	uint64_t bin_dep_seqno;
679 
680 	struct dma_fence *fence;
681 
682 	/* Last current addresses the hardware was processing when the
683 	 * hangcheck timer checked on us.
684 	 */
685 	uint32_t last_ct0ca, last_ct1ca;
686 
687 	/* Kernel-space copy of the ioctl arguments */
688 	struct drm_vc4_submit_cl *args;
689 
690 	/* This is the array of BOs that were looked up at the start of exec.
691 	 * Command validation will use indices into this array.
692 	 */
693 	struct drm_gem_object **bo;
694 	uint32_t bo_count;
695 
696 	/* List of BOs that are being written by the RCL.  Other than
697 	 * the binner temporary storage, this is all the BOs written
698 	 * by the job.
699 	 */
700 	struct drm_gem_dma_object *rcl_write_bo[4];
701 	uint32_t rcl_write_bo_count;
702 
703 	/* Pointers for our position in vc4->job_list */
704 	struct list_head head;
705 
706 	/* List of other BOs used in the job that need to be released
707 	 * once the job is complete.
708 	 */
709 	struct list_head unref_list;
710 
711 	/* Current unvalidated indices into @bo loaded by the non-hardware
712 	 * VC4_PACKET_GEM_HANDLES.
713 	 */
714 	uint32_t bo_index[2];
715 
716 	/* This is the BO where we store the validated command lists, shader
717 	 * records, and uniforms.
718 	 */
719 	struct drm_gem_dma_object *exec_bo;
720 
721 	/**
722 	 * This tracks the per-shader-record state (packet 64) that
723 	 * determines the length of the shader record and the offset
724 	 * it's expected to be found at.  It gets read in from the
725 	 * command lists.
726 	 */
727 	struct vc4_shader_state {
728 		uint32_t addr;
729 		/* Maximum vertex index referenced by any primitive using this
730 		 * shader state.
731 		 */
732 		uint32_t max_index;
733 	} *shader_state;
734 
735 	/** How many shader states the user declared they were using. */
736 	uint32_t shader_state_size;
737 	/** How many shader state records the validator has seen. */
738 	uint32_t shader_state_count;
739 
740 	bool found_tile_binning_mode_config_packet;
741 	bool found_start_tile_binning_packet;
742 	bool found_increment_semaphore_packet;
743 	bool found_flush;
744 	uint8_t bin_tiles_x, bin_tiles_y;
745 	/* Physical address of the start of the tile alloc array
746 	 * (where each tile's binned CL will start)
747 	 */
748 	uint32_t tile_alloc_offset;
749 	/* Bitmask of which binner slots are freed when this job completes. */
750 	uint32_t bin_slots;
751 
752 	/**
753 	 * Computed addresses pointing into exec_bo where we start the
754 	 * bin thread (ct0) and render thread (ct1).
755 	 */
756 	uint32_t ct0ca, ct0ea;
757 	uint32_t ct1ca, ct1ea;
758 
759 	/* Pointer to the unvalidated bin CL (if present). */
760 	void *bin_u;
761 
762 	/* Pointers to the shader recs.  These paddr gets incremented as CL
763 	 * packets are relocated in validate_gl_shader_state, and the vaddrs
764 	 * (u and v) get incremented and size decremented as the shader recs
765 	 * themselves are validated.
766 	 */
767 	void *shader_rec_u;
768 	void *shader_rec_v;
769 	uint32_t shader_rec_p;
770 	uint32_t shader_rec_size;
771 
772 	/* Pointers to the uniform data.  These pointers are incremented, and
773 	 * size decremented, as each batch of uniforms is uploaded.
774 	 */
775 	void *uniforms_u;
776 	void *uniforms_v;
777 	uint32_t uniforms_p;
778 	uint32_t uniforms_size;
779 
780 	/* Pointer to a performance monitor object if the user requested it,
781 	 * NULL otherwise.
782 	 */
783 	struct vc4_perfmon *perfmon;
784 
785 	/* Whether the exec has taken a reference to the binner BO, which should
786 	 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet.
787 	 */
788 	bool bin_bo_used;
789 };
790 
791 /* Per-open file private data. Any driver-specific resource that has to be
792  * released when the DRM file is closed should be placed here.
793  */
794 struct vc4_file {
795 	struct vc4_dev *dev;
796 
797 	struct {
798 		struct idr idr;
799 		struct mutex lock;
800 	} perfmon;
801 
802 	bool bin_bo_used;
803 };
804 
805 static inline struct vc4_exec_info *
806 vc4_first_bin_job(struct vc4_dev *vc4)
807 {
808 	return list_first_entry_or_null(&vc4->bin_job_list,
809 					struct vc4_exec_info, head);
810 }
811 
812 static inline struct vc4_exec_info *
813 vc4_first_render_job(struct vc4_dev *vc4)
814 {
815 	return list_first_entry_or_null(&vc4->render_job_list,
816 					struct vc4_exec_info, head);
817 }
818 
819 static inline struct vc4_exec_info *
820 vc4_last_render_job(struct vc4_dev *vc4)
821 {
822 	if (list_empty(&vc4->render_job_list))
823 		return NULL;
824 	return list_last_entry(&vc4->render_job_list,
825 			       struct vc4_exec_info, head);
826 }
827 
828 /**
829  * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
830  * setup parameters.
831  *
832  * This will be used at draw time to relocate the reference to the texture
833  * contents in p0, and validate that the offset combined with
834  * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
835  * Note that the hardware treats unprovided config parameters as 0, so not all
836  * of them need to be set up for every texure sample, and we'll store ~0 as
837  * the offset to mark the unused ones.
838  *
839  * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
840  * Setup") for definitions of the texture parameters.
841  */
842 struct vc4_texture_sample_info {
843 	bool is_direct;
844 	uint32_t p_offset[4];
845 };
846 
847 /**
848  * struct vc4_validated_shader_info - information about validated shaders that
849  * needs to be used from command list validation.
850  *
851  * For a given shader, each time a shader state record references it, we need
852  * to verify that the shader doesn't read more uniforms than the shader state
853  * record's uniform BO pointer can provide, and we need to apply relocations
854  * and validate the shader state record's uniforms that define the texture
855  * samples.
856  */
857 struct vc4_validated_shader_info {
858 	uint32_t uniforms_size;
859 	uint32_t uniforms_src_size;
860 	uint32_t num_texture_samples;
861 	struct vc4_texture_sample_info *texture_samples;
862 
863 	uint32_t num_uniform_addr_offsets;
864 	uint32_t *uniform_addr_offsets;
865 
866 	bool is_threaded;
867 };
868 
869 /**
870  * __wait_for - magic wait macro
871  *
872  * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
873  * important that we check the condition again after having timed out, since the
874  * timeout could be due to preemption or similar and we've never had a chance to
875  * check the condition before the timeout.
876  */
877 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
878 	const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
879 	long wait__ = (Wmin); /* recommended min for usleep is 10 us */	\
880 	int ret__;							\
881 	might_sleep();							\
882 	for (;;) {							\
883 		const bool expired__ = ktime_after(ktime_get_raw(), end__); \
884 		OP;							\
885 		/* Guarantee COND check prior to timeout */		\
886 		barrier();						\
887 		if (COND) {						\
888 			ret__ = 0;					\
889 			break;						\
890 		}							\
891 		if (expired__) {					\
892 			ret__ = -ETIMEDOUT;				\
893 			break;						\
894 		}							\
895 		usleep_range(wait__, wait__ * 2);			\
896 		if (wait__ < (Wmax))					\
897 			wait__ <<= 1;					\
898 	}								\
899 	ret__;								\
900 })
901 
902 #define _wait_for(COND, US, Wmin, Wmax)	__wait_for(, (COND), (US), (Wmin), \
903 						   (Wmax))
904 #define wait_for(COND, MS)		_wait_for((COND), (MS) * 1000, 10, 1000)
905 
906 /* vc4_bo.c */
907 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
908 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
909 			     bool from_cache, enum vc4_kernel_bo_type type);
910 int vc4_bo_dumb_create(struct drm_file *file_priv,
911 		       struct drm_device *dev,
912 		       struct drm_mode_create_dumb *args);
913 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
914 			struct drm_file *file_priv);
915 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
916 			       struct drm_file *file_priv);
917 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
918 		      struct drm_file *file_priv);
919 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
920 			 struct drm_file *file_priv);
921 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
922 			 struct drm_file *file_priv);
923 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
924 			     struct drm_file *file_priv);
925 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
926 		       struct drm_file *file_priv);
927 int vc4_bo_cache_init(struct drm_device *dev);
928 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
929 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
930 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
931 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
932 int vc4_bo_debugfs_init(struct drm_minor *minor);
933 
934 /* vc4_crtc.c */
935 extern struct platform_driver vc4_crtc_driver;
936 int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
937 int __vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
938 		    struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data,
939 		    struct drm_plane *primary_plane,
940 		    const struct drm_crtc_funcs *crtc_funcs,
941 		    const struct drm_crtc_helper_funcs *crtc_helper_funcs,
942 		    bool feeds_txp);
943 int vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
944 		  struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data,
945 		  const struct drm_crtc_funcs *crtc_funcs,
946 		  const struct drm_crtc_helper_funcs *crtc_helper_funcs,
947 		  bool feeds_txp);
948 int vc4_page_flip(struct drm_crtc *crtc,
949 		  struct drm_framebuffer *fb,
950 		  struct drm_pending_vblank_event *event,
951 		  uint32_t flags,
952 		  struct drm_modeset_acquire_ctx *ctx);
953 int vc4_crtc_atomic_check(struct drm_crtc *crtc,
954 			  struct drm_atomic_state *state);
955 struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc);
956 void vc4_crtc_destroy_state(struct drm_crtc *crtc,
957 			    struct drm_crtc_state *state);
958 void vc4_crtc_reset(struct drm_crtc *crtc);
959 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
960 void vc4_crtc_send_vblank(struct drm_crtc *crtc);
961 int vc4_crtc_late_register(struct drm_crtc *crtc);
962 void vc4_crtc_get_margins(struct drm_crtc_state *state,
963 			  unsigned int *left, unsigned int *right,
964 			  unsigned int *top, unsigned int *bottom);
965 
966 /* vc4_debugfs.c */
967 void vc4_debugfs_init(struct drm_minor *minor);
968 #ifdef CONFIG_DEBUG_FS
969 void vc4_debugfs_add_regset32(struct drm_device *drm,
970 			      const char *filename,
971 			      struct debugfs_regset32 *regset);
972 #else
973 
974 static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
975 					    const char *filename,
976 					    struct debugfs_regset32 *regset)
977 {}
978 #endif
979 
980 /* vc4_drv.c */
981 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
982 int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args);
983 
984 /* vc4_dpi.c */
985 extern struct platform_driver vc4_dpi_driver;
986 
987 /* vc4_dsi.c */
988 extern struct platform_driver vc4_dsi_driver;
989 
990 /* vc4_fence.c */
991 extern const struct dma_fence_ops vc4_fence_ops;
992 
993 /* vc4_gem.c */
994 int vc4_gem_init(struct drm_device *dev);
995 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
996 			struct drm_file *file_priv);
997 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
998 			 struct drm_file *file_priv);
999 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1000 		      struct drm_file *file_priv);
1001 void vc4_submit_next_bin_job(struct drm_device *dev);
1002 void vc4_submit_next_render_job(struct drm_device *dev);
1003 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
1004 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
1005 		       uint64_t timeout_ns, bool interruptible);
1006 void vc4_job_handle_completed(struct vc4_dev *vc4);
1007 int vc4_queue_seqno_cb(struct drm_device *dev,
1008 		       struct vc4_seqno_cb *cb, uint64_t seqno,
1009 		       void (*func)(struct vc4_seqno_cb *cb));
1010 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1011 			  struct drm_file *file_priv);
1012 
1013 /* vc4_hdmi.c */
1014 extern struct platform_driver vc4_hdmi_driver;
1015 
1016 /* vc4_vec.c */
1017 extern struct platform_driver vc4_vec_driver;
1018 
1019 /* vc4_txp.c */
1020 extern struct platform_driver vc4_txp_driver;
1021 
1022 /* vc4_irq.c */
1023 void vc4_irq_enable(struct drm_device *dev);
1024 void vc4_irq_disable(struct drm_device *dev);
1025 int vc4_irq_install(struct drm_device *dev, int irq);
1026 void vc4_irq_uninstall(struct drm_device *dev);
1027 void vc4_irq_reset(struct drm_device *dev);
1028 
1029 /* vc4_hvs.c */
1030 extern struct platform_driver vc4_hvs_driver;
1031 struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev);
1032 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output);
1033 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output);
1034 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo);
1035 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
1036 void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state);
1037 void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state);
1038 void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state);
1039 void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
1040 void vc4_hvs_dump_state(struct vc4_hvs *hvs);
1041 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel);
1042 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel);
1043 int vc4_hvs_debugfs_init(struct drm_minor *minor);
1044 
1045 /* vc4_kms.c */
1046 int vc4_kms_load(struct drm_device *dev);
1047 
1048 /* vc4_plane.c */
1049 struct drm_plane *vc4_plane_init(struct drm_device *dev,
1050 				 enum drm_plane_type type,
1051 				 uint32_t possible_crtcs);
1052 int vc4_plane_create_additional_planes(struct drm_device *dev);
1053 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
1054 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
1055 void vc4_plane_async_set_fb(struct drm_plane *plane,
1056 			    struct drm_framebuffer *fb);
1057 
1058 /* vc4_v3d.c */
1059 extern struct platform_driver vc4_v3d_driver;
1060 extern const struct of_device_id vc4_v3d_dt_match[];
1061 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
1062 int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
1063 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
1064 int vc4_v3d_pm_get(struct vc4_dev *vc4);
1065 void vc4_v3d_pm_put(struct vc4_dev *vc4);
1066 int vc4_v3d_debugfs_init(struct drm_minor *minor);
1067 
1068 /* vc4_validate.c */
1069 int
1070 vc4_validate_bin_cl(struct drm_device *dev,
1071 		    void *validated,
1072 		    void *unvalidated,
1073 		    struct vc4_exec_info *exec);
1074 
1075 int
1076 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
1077 
1078 struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec,
1079 				      uint32_t hindex);
1080 
1081 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
1082 
1083 bool vc4_check_tex_size(struct vc4_exec_info *exec,
1084 			struct drm_gem_dma_object *fbo,
1085 			uint32_t offset, uint8_t tiling_format,
1086 			uint32_t width, uint32_t height, uint8_t cpp);
1087 
1088 /* vc4_validate_shader.c */
1089 struct vc4_validated_shader_info *
1090 vc4_validate_shader(struct drm_gem_dma_object *shader_obj);
1091 
1092 /* vc4_perfmon.c */
1093 void vc4_perfmon_get(struct vc4_perfmon *perfmon);
1094 void vc4_perfmon_put(struct vc4_perfmon *perfmon);
1095 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
1096 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
1097 		      bool capture);
1098 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
1099 void vc4_perfmon_open_file(struct vc4_file *vc4file);
1100 void vc4_perfmon_close_file(struct vc4_file *vc4file);
1101 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
1102 			     struct drm_file *file_priv);
1103 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
1104 			      struct drm_file *file_priv);
1105 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
1106 				 struct drm_file *file_priv);
1107 
1108 #endif /* _VC4_DRV_H_ */
1109