xref: /netbsd/sys/arch/arm/rockchip/rk_drm.c (revision f77b3dc0)
1*f77b3dc0Sjmcneill /* $NetBSD: rk_drm.c,v 1.21 2022/10/30 23:10:43 jmcneill Exp $ */
2276fc83aSjmcneill 
3276fc83aSjmcneill /*-
4276fc83aSjmcneill  * Copyright (c) 2019 Jared D. McNeill <jmcneill@invisible.ca>
5276fc83aSjmcneill  * All rights reserved.
6276fc83aSjmcneill  *
7276fc83aSjmcneill  * Redistribution and use in source and binary forms, with or without
8276fc83aSjmcneill  * modification, are permitted provided that the following conditions
9276fc83aSjmcneill  * are met:
10276fc83aSjmcneill  * 1. Redistributions of source code must retain the above copyright
11276fc83aSjmcneill  *    notice, this list of conditions and the following disclaimer.
12276fc83aSjmcneill  * 2. Redistributions in binary form must reproduce the above copyright
13276fc83aSjmcneill  *    notice, this list of conditions and the following disclaimer in the
14276fc83aSjmcneill  *    documentation and/or other materials provided with the distribution.
15276fc83aSjmcneill  *
16276fc83aSjmcneill  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17276fc83aSjmcneill  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18276fc83aSjmcneill  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19276fc83aSjmcneill  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20276fc83aSjmcneill  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21276fc83aSjmcneill  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22276fc83aSjmcneill  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23276fc83aSjmcneill  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24276fc83aSjmcneill  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25276fc83aSjmcneill  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26276fc83aSjmcneill  * SUCH DAMAGE.
27276fc83aSjmcneill  */
28276fc83aSjmcneill 
29276fc83aSjmcneill #include <sys/cdefs.h>
30*f77b3dc0Sjmcneill __KERNEL_RCSID(0, "$NetBSD: rk_drm.c,v 1.21 2022/10/30 23:10:43 jmcneill Exp $");
31276fc83aSjmcneill 
32276fc83aSjmcneill #include <sys/param.h>
33276fc83aSjmcneill #include <sys/bus.h>
3467b4eb14Sriastradh #include <sys/conf.h>
35276fc83aSjmcneill #include <sys/device.h>
36276fc83aSjmcneill #include <sys/intr.h>
37276fc83aSjmcneill #include <sys/kernel.h>
3867b4eb14Sriastradh #include <sys/systm.h>
39276fc83aSjmcneill 
4067b4eb14Sriastradh #include <uvm/uvm_device.h>
41276fc83aSjmcneill #include <uvm/uvm_extern.h>
42276fc83aSjmcneill #include <uvm/uvm_object.h>
4367b4eb14Sriastradh 
4467b4eb14Sriastradh #include <dev/fdt/fdt_port.h>
4567b4eb14Sriastradh #include <dev/fdt/fdtvar.h>
4667b4eb14Sriastradh 
4767b4eb14Sriastradh #include <arm/rockchip/rk_drm.h>
48276fc83aSjmcneill 
498b06e185Sriastradh #include <drm/drm_atomic_helper.h>
50c7fa00edSriastradh #include <drm/drm_auth.h>
51276fc83aSjmcneill #include <drm/drm_crtc_helper.h>
528b06e185Sriastradh #include <drm/drm_damage_helper.h>
5367b4eb14Sriastradh #include <drm/drm_drv.h>
54276fc83aSjmcneill #include <drm/drm_fb_helper.h>
55c7fa00edSriastradh #include <drm/drm_fourcc.h>
56c7fa00edSriastradh #include <drm/drm_vblank.h>
57276fc83aSjmcneill 
58276fc83aSjmcneill #define	RK_DRM_MAX_WIDTH	3840
59276fc83aSjmcneill #define	RK_DRM_MAX_HEIGHT	2160
60276fc83aSjmcneill 
61276fc83aSjmcneill static TAILQ_HEAD(, rk_drm_ports) rk_drm_ports =
62276fc83aSjmcneill     TAILQ_HEAD_INITIALIZER(rk_drm_ports);
63276fc83aSjmcneill 
648e90f9edSthorpej static const struct device_compatible_entry compat_data[] = {
658e90f9edSthorpej 	{ .compat = "rockchip,display-subsystem" },
668e90f9edSthorpej 	DEVICE_COMPAT_EOL
67276fc83aSjmcneill };
68276fc83aSjmcneill 
69276fc83aSjmcneill static const char * fb_compatible[] = {
70276fc83aSjmcneill 	"simple-framebuffer",
71276fc83aSjmcneill 	NULL
72276fc83aSjmcneill };
73276fc83aSjmcneill 
74276fc83aSjmcneill static int	rk_drm_match(device_t, cfdata_t, void *);
75276fc83aSjmcneill static void	rk_drm_attach(device_t, device_t, void *);
76276fc83aSjmcneill 
77276fc83aSjmcneill static void	rk_drm_init(device_t);
78276fc83aSjmcneill static vmem_t	*rk_drm_alloc_cma_pool(struct drm_device *, size_t);
79276fc83aSjmcneill 
80276fc83aSjmcneill static int	rk_drm_load(struct drm_device *, unsigned long);
81c7fa00edSriastradh static void	rk_drm_unload(struct drm_device *);
82276fc83aSjmcneill 
8356649a44Sriastradh static void	rk_drm_task_work(struct work *, void *);
8456649a44Sriastradh 
85276fc83aSjmcneill static struct drm_driver rk_drm_driver = {
868b06e185Sriastradh 	.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
87276fc83aSjmcneill 	.dev_priv_size = 0,
88276fc83aSjmcneill 	.load = rk_drm_load,
89276fc83aSjmcneill 	.unload = rk_drm_unload,
90276fc83aSjmcneill 
91276fc83aSjmcneill 	.gem_free_object = drm_gem_cma_free_object,
92276fc83aSjmcneill 	.mmap_object = drm_gem_or_legacy_mmap_object,
93276fc83aSjmcneill 	.gem_uvm_ops = &drm_gem_cma_uvm_ops,
94276fc83aSjmcneill 
95276fc83aSjmcneill 	.dumb_create = drm_gem_cma_dumb_create,
96276fc83aSjmcneill 	.dumb_destroy = drm_gem_dumb_destroy,
97276fc83aSjmcneill 
98276fc83aSjmcneill 	.name = DRIVER_NAME,
99276fc83aSjmcneill 	.desc = DRIVER_DESC,
100276fc83aSjmcneill 	.date = DRIVER_DATE,
101276fc83aSjmcneill 	.major = DRIVER_MAJOR,
102276fc83aSjmcneill 	.minor = DRIVER_MINOR,
103276fc83aSjmcneill 	.patchlevel = DRIVER_PATCHLEVEL,
104276fc83aSjmcneill };
105276fc83aSjmcneill 
106276fc83aSjmcneill CFATTACH_DECL_NEW(rk_drm, sizeof(struct rk_drm_softc),
107276fc83aSjmcneill 	rk_drm_match, rk_drm_attach, NULL, NULL);
108276fc83aSjmcneill 
109276fc83aSjmcneill static int
rk_drm_match(device_t parent,cfdata_t cf,void * aux)110276fc83aSjmcneill rk_drm_match(device_t parent, cfdata_t cf, void *aux)
111276fc83aSjmcneill {
112276fc83aSjmcneill 	struct fdt_attach_args * const faa = aux;
113276fc83aSjmcneill 
1148e90f9edSthorpej 	return of_compatible_match(faa->faa_phandle, compat_data);
115276fc83aSjmcneill }
116276fc83aSjmcneill 
117276fc83aSjmcneill static void
rk_drm_attach(device_t parent,device_t self,void * aux)118276fc83aSjmcneill rk_drm_attach(device_t parent, device_t self, void *aux)
119276fc83aSjmcneill {
120276fc83aSjmcneill 	struct rk_drm_softc * const sc = device_private(self);
121276fc83aSjmcneill 	struct fdt_attach_args * const faa = aux;
122276fc83aSjmcneill 	struct drm_driver * const driver = &rk_drm_driver;
123276fc83aSjmcneill 	prop_dictionary_t dict = device_properties(self);
124276fc83aSjmcneill 	bool is_disabled;
125276fc83aSjmcneill 
12682168312Sriastradh 	aprint_naive("\n");
12782168312Sriastradh 
12882168312Sriastradh 	if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) &&
12982168312Sriastradh 	    is_disabled) {
13082168312Sriastradh 		aprint_normal(": (disabled)\n");
13182168312Sriastradh 		return;
13282168312Sriastradh 	}
13382168312Sriastradh 
13482168312Sriastradh 	aprint_normal("\n");
13582168312Sriastradh 
136db15a445Sriastradh #ifdef WSDISPLAY_MULTICONS
137db15a445Sriastradh 	const bool is_console = true;
138db15a445Sriastradh 	prop_dictionary_set_bool(dict, "is_console", is_console);
139db15a445Sriastradh #endif
140db15a445Sriastradh 
141276fc83aSjmcneill 	sc->sc_dev = self;
142276fc83aSjmcneill 	sc->sc_dmat = faa->faa_dmat;
143276fc83aSjmcneill 	sc->sc_bst = faa->faa_bst;
144276fc83aSjmcneill 	sc->sc_phandle = faa->faa_phandle;
14556649a44Sriastradh 	sc->sc_task_thread = NULL;
14656649a44Sriastradh 	SIMPLEQ_INIT(&sc->sc_tasks);
14756649a44Sriastradh 	if (workqueue_create(&sc->sc_task_wq, "rkdrm",
14856649a44Sriastradh 	    &rk_drm_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE)) {
14956649a44Sriastradh 		aprint_error_dev(self, "unable to create workqueue\n");
15056649a44Sriastradh 		sc->sc_task_wq = NULL;
15156649a44Sriastradh 		return;
15256649a44Sriastradh 	}
153276fc83aSjmcneill 
154276fc83aSjmcneill 	sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
15554044c30Sriastradh 	if (IS_ERR(sc->sc_ddev)) {
156276fc83aSjmcneill 		aprint_error_dev(self, "couldn't allocate DRM device\n");
157276fc83aSjmcneill 		return;
158276fc83aSjmcneill 	}
159276fc83aSjmcneill 	sc->sc_ddev->dev_private = sc;
160276fc83aSjmcneill 	sc->sc_ddev->bst = sc->sc_bst;
161276fc83aSjmcneill 	sc->sc_ddev->bus_dmat = sc->sc_dmat;
162276fc83aSjmcneill 	sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
163276fc83aSjmcneill 	sc->sc_ddev->dmat_subregion_p = false;
164276fc83aSjmcneill 
165276fc83aSjmcneill 	fdt_remove_bycompat(fb_compatible);
166276fc83aSjmcneill 
167b9ad5bf4Sriastradh 	/*
168b9ad5bf4Sriastradh 	 * Wait until rk_vop is attached as a sibling to this device --
169b9ad5bf4Sriastradh 	 * we need that to actually display our framebuffer.
170b9ad5bf4Sriastradh 	 */
171276fc83aSjmcneill 	config_defer(self, rk_drm_init);
172276fc83aSjmcneill }
173276fc83aSjmcneill 
174276fc83aSjmcneill static void
rk_drm_init(device_t dev)175276fc83aSjmcneill rk_drm_init(device_t dev)
176276fc83aSjmcneill {
177276fc83aSjmcneill 	struct rk_drm_softc * const sc = device_private(dev);
178276fc83aSjmcneill 	struct drm_driver * const driver = &rk_drm_driver;
179276fc83aSjmcneill 	int error;
180276fc83aSjmcneill 
18156649a44Sriastradh 	/*
18256649a44Sriastradh 	 * Cause any tasks issued synchronously during attach to be
18356649a44Sriastradh 	 * processed at the end of this function.
18456649a44Sriastradh 	 */
18556649a44Sriastradh 	sc->sc_task_thread = curlwp;
18656649a44Sriastradh 
187276fc83aSjmcneill 	error = -drm_dev_register(sc->sc_ddev, 0);
188276fc83aSjmcneill 	if (error) {
189276fc83aSjmcneill 		aprint_error_dev(dev, "couldn't register DRM device: %d\n",
190276fc83aSjmcneill 		    error);
19156649a44Sriastradh 		goto out;
192276fc83aSjmcneill 	}
19356649a44Sriastradh 	sc->sc_dev_registered = true;
194276fc83aSjmcneill 
195276fc83aSjmcneill 	aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
196276fc83aSjmcneill 	    driver->name, driver->major, driver->minor, driver->patchlevel,
197276fc83aSjmcneill 	    driver->date, sc->sc_ddev->primary->index);
19856649a44Sriastradh 
19956649a44Sriastradh 	/*
20056649a44Sriastradh 	 * Process asynchronous tasks queued synchronously during
20156649a44Sriastradh 	 * attach.  This will be for display detection to attach a
20256649a44Sriastradh 	 * framebuffer, so we have the opportunity for a console device
20356649a44Sriastradh 	 * to attach before autoconf has completed, in time for init(8)
20456649a44Sriastradh 	 * to find that console without panicking.
20556649a44Sriastradh 	 */
20656649a44Sriastradh 	while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) {
20756649a44Sriastradh 		struct rk_drm_task *const task = SIMPLEQ_FIRST(&sc->sc_tasks);
20856649a44Sriastradh 
20956649a44Sriastradh 		SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, rdt_u.queue);
21056649a44Sriastradh 		(*task->rdt_fn)(task);
21156649a44Sriastradh 	}
21256649a44Sriastradh 
213011519cdSandvar out:	/* Cause any subsequent tasks to be processed by the workqueue.  */
21456649a44Sriastradh 	atomic_store_relaxed(&sc->sc_task_thread, NULL);
215276fc83aSjmcneill }
216276fc83aSjmcneill 
217276fc83aSjmcneill static vmem_t *
rk_drm_alloc_cma_pool(struct drm_device * ddev,size_t cma_size)218276fc83aSjmcneill rk_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
219276fc83aSjmcneill {
220276fc83aSjmcneill 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
221276fc83aSjmcneill 	bus_dma_segment_t segs[1];
222276fc83aSjmcneill 	int nsegs;
223276fc83aSjmcneill 	int error;
224276fc83aSjmcneill 
225276fc83aSjmcneill 	error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
226276fc83aSjmcneill 	    segs, 1, &nsegs, BUS_DMA_NOWAIT);
227276fc83aSjmcneill 	if (error) {
228276fc83aSjmcneill 		aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
229276fc83aSjmcneill 		return NULL;
230276fc83aSjmcneill 	}
231276fc83aSjmcneill 
232276fc83aSjmcneill 	return vmem_create("rkdrm", segs[0].ds_addr, segs[0].ds_len,
233276fc83aSjmcneill 	    PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
234276fc83aSjmcneill }
235276fc83aSjmcneill 
236276fc83aSjmcneill static int
rk_drm_fb_create_handle(struct drm_framebuffer * fb,struct drm_file * file,unsigned int * handle)237276fc83aSjmcneill rk_drm_fb_create_handle(struct drm_framebuffer *fb,
238276fc83aSjmcneill     struct drm_file *file, unsigned int *handle)
239276fc83aSjmcneill {
240276fc83aSjmcneill 	struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
241276fc83aSjmcneill 
242276fc83aSjmcneill 	return drm_gem_handle_create(file, &sfb->obj->base, handle);
243276fc83aSjmcneill }
244276fc83aSjmcneill 
245276fc83aSjmcneill static void
rk_drm_fb_destroy(struct drm_framebuffer * fb)246276fc83aSjmcneill rk_drm_fb_destroy(struct drm_framebuffer *fb)
247276fc83aSjmcneill {
248276fc83aSjmcneill 	struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
249276fc83aSjmcneill 
250276fc83aSjmcneill 	drm_framebuffer_cleanup(fb);
251c7fa00edSriastradh 	drm_gem_object_put_unlocked(&sfb->obj->base);
252276fc83aSjmcneill 	kmem_free(sfb, sizeof(*sfb));
253276fc83aSjmcneill }
254276fc83aSjmcneill 
255276fc83aSjmcneill static const struct drm_framebuffer_funcs rk_drm_framebuffer_funcs = {
256276fc83aSjmcneill 	.create_handle = rk_drm_fb_create_handle,
257276fc83aSjmcneill 	.destroy = rk_drm_fb_destroy,
258276fc83aSjmcneill };
259276fc83aSjmcneill 
260276fc83aSjmcneill static struct drm_framebuffer *
rk_drm_fb_create(struct drm_device * ddev,struct drm_file * file,const struct drm_mode_fb_cmd2 * cmd)261276fc83aSjmcneill rk_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
262c7fa00edSriastradh     const struct drm_mode_fb_cmd2 *cmd)
263276fc83aSjmcneill {
264276fc83aSjmcneill 	struct rk_drm_framebuffer *fb;
265276fc83aSjmcneill 	struct drm_gem_object *gem_obj;
266276fc83aSjmcneill 	int error;
267276fc83aSjmcneill 
268276fc83aSjmcneill 	if (cmd->flags)
269276fc83aSjmcneill 		return NULL;
270276fc83aSjmcneill 
271c7fa00edSriastradh 	gem_obj = drm_gem_object_lookup(file, cmd->handles[0]);
272276fc83aSjmcneill 	if (gem_obj == NULL)
273276fc83aSjmcneill 		return NULL;
274276fc83aSjmcneill 
275276fc83aSjmcneill 	fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
2767819df54Sriastradh 	drm_helper_mode_fill_fb_struct(ddev, &fb->base, cmd);
277276fc83aSjmcneill 	fb->obj = to_drm_gem_cma_obj(gem_obj);
278276fc83aSjmcneill 
279276fc83aSjmcneill 	error = drm_framebuffer_init(ddev, &fb->base, &rk_drm_framebuffer_funcs);
280276fc83aSjmcneill 	if (error != 0)
281276fc83aSjmcneill 		goto dealloc;
282276fc83aSjmcneill 
283276fc83aSjmcneill 	return &fb->base;
284276fc83aSjmcneill 
285276fc83aSjmcneill dealloc:
286276fc83aSjmcneill 	drm_framebuffer_cleanup(&fb->base);
287276fc83aSjmcneill 	kmem_free(fb, sizeof(*fb));
288c7fa00edSriastradh 	drm_gem_object_put_unlocked(gem_obj);
289276fc83aSjmcneill 
290276fc83aSjmcneill 	return NULL;
291276fc83aSjmcneill }
292276fc83aSjmcneill 
293276fc83aSjmcneill static struct drm_mode_config_funcs rk_drm_mode_config_funcs = {
294276fc83aSjmcneill 	.fb_create = rk_drm_fb_create,
2958b06e185Sriastradh 	.atomic_check = drm_atomic_helper_check,
2968b06e185Sriastradh 	.atomic_commit = drm_atomic_helper_commit,
2978b06e185Sriastradh };
2988b06e185Sriastradh 
2998b06e185Sriastradh static struct drm_mode_config_helper_funcs rk_drm_mode_config_helper_funcs = {
3008b06e185Sriastradh 	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
301276fc83aSjmcneill };
302276fc83aSjmcneill 
303276fc83aSjmcneill static int
rk_drm_fb_probe(struct drm_fb_helper * helper,struct drm_fb_helper_surface_size * sizes)304276fc83aSjmcneill rk_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
305276fc83aSjmcneill {
306276fc83aSjmcneill 	struct rk_drm_softc * const sc = rk_drm_private(helper->dev);
307276fc83aSjmcneill 	struct drm_device *ddev = helper->dev;
308276fc83aSjmcneill 	struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(helper->fb);
309276fc83aSjmcneill 	struct drm_framebuffer *fb = helper->fb;
310276fc83aSjmcneill 	struct rk_drmfb_attach_args sfa;
311276fc83aSjmcneill 	size_t cma_size;
312276fc83aSjmcneill 	int error;
313276fc83aSjmcneill 
314276fc83aSjmcneill 	const u_int width = sizes->surface_width;
315276fc83aSjmcneill 	const u_int height = sizes->surface_height;
316276fc83aSjmcneill 	const u_int pitch = width * (32 / 8);
317276fc83aSjmcneill 
318276fc83aSjmcneill 	const size_t size = roundup(height * pitch, PAGE_SIZE);
319276fc83aSjmcneill 
320276fc83aSjmcneill 	/* Reserve enough memory for the FB console plus a 4K plane, rounded to 1MB */
321276fc83aSjmcneill 	cma_size = size;
322276fc83aSjmcneill 	cma_size += (RK_DRM_MAX_WIDTH * RK_DRM_MAX_HEIGHT * 4);
323276fc83aSjmcneill 	cma_size = roundup(cma_size, 1024 * 1024);
324276fc83aSjmcneill 	sc->sc_ddev->cma_pool = rk_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
325276fc83aSjmcneill 	if (sc->sc_ddev->cma_pool != NULL)
326276fc83aSjmcneill 		aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA\n",
327276fc83aSjmcneill 		    (u_int)(cma_size / (1024 * 1024)));
328276fc83aSjmcneill 
329276fc83aSjmcneill 	sfb->obj = drm_gem_cma_create(ddev, size);
330276fc83aSjmcneill 	if (sfb->obj == NULL) {
331276fc83aSjmcneill 		DRM_ERROR("failed to allocate memory for framebuffer\n");
332276fc83aSjmcneill 		return -ENOMEM;
333276fc83aSjmcneill 	}
334276fc83aSjmcneill 
3357819df54Sriastradh 	/* similar to drm_helper_mode_fill_fb_struct(), but we have no cmd */
336276fc83aSjmcneill 	fb->pitches[0] = pitch;
337276fc83aSjmcneill 	fb->offsets[0] = 0;
338276fc83aSjmcneill 	fb->width = width;
339276fc83aSjmcneill 	fb->height = height;
3407819df54Sriastradh 	fb->modifier = 0;
3417819df54Sriastradh 	fb->flags = 0;
342ebfe8d4fSmrg #ifdef __ARM_BIG_ENDIAN
343c7fa00edSriastradh 	fb->format = drm_format_info(DRM_FORMAT_BGRX8888);
344ebfe8d4fSmrg #else
345c7fa00edSriastradh 	fb->format = drm_format_info(DRM_FORMAT_XRGB8888);
346ebfe8d4fSmrg #endif
3478b06e185Sriastradh 	fb->dev = ddev;
348276fc83aSjmcneill 
349276fc83aSjmcneill 	error = drm_framebuffer_init(ddev, fb, &rk_drm_framebuffer_funcs);
350276fc83aSjmcneill 	if (error != 0) {
351276fc83aSjmcneill 		DRM_ERROR("failed to initialize framebuffer\n");
352276fc83aSjmcneill 		return error;
353276fc83aSjmcneill 	}
354276fc83aSjmcneill 
355276fc83aSjmcneill 	memset(&sfa, 0, sizeof(sfa));
356276fc83aSjmcneill 	sfa.sfa_drm_dev = ddev;
357276fc83aSjmcneill 	sfa.sfa_fb_helper = helper;
358276fc83aSjmcneill 	sfa.sfa_fb_sizes = *sizes;
359276fc83aSjmcneill 	sfa.sfa_fb_bst = sc->sc_bst;
360276fc83aSjmcneill 	sfa.sfa_fb_dmat = sc->sc_dmat;
361276fc83aSjmcneill 	sfa.sfa_fb_linebytes = helper->fb->pitches[0];
362276fc83aSjmcneill 
3633bee0c11Sthorpej 	helper->fbdev = config_found(ddev->dev, &sfa, NULL,
364beecddb6Sthorpej 	    CFARGS(.iattr = "rkfbbus"));
365276fc83aSjmcneill 	if (helper->fbdev == NULL) {
366276fc83aSjmcneill 		DRM_ERROR("unable to attach framebuffer\n");
367276fc83aSjmcneill 		return -ENXIO;
368276fc83aSjmcneill 	}
369276fc83aSjmcneill 
370276fc83aSjmcneill 	return 0;
371276fc83aSjmcneill }
372276fc83aSjmcneill 
373276fc83aSjmcneill static struct drm_fb_helper_funcs rk_drm_fb_helper_funcs = {
374276fc83aSjmcneill 	.fb_probe = rk_drm_fb_probe,
375276fc83aSjmcneill };
376276fc83aSjmcneill 
377276fc83aSjmcneill static int
rk_drm_load(struct drm_device * ddev,unsigned long flags)378276fc83aSjmcneill rk_drm_load(struct drm_device *ddev, unsigned long flags)
379276fc83aSjmcneill {
380276fc83aSjmcneill 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
381276fc83aSjmcneill 	struct rk_drm_ports *sport;
382276fc83aSjmcneill 	struct rk_drm_fbdev *fbdev;
383276fc83aSjmcneill 	struct fdt_endpoint *ep;
384276fc83aSjmcneill 	const u_int *data;
385276fc83aSjmcneill 	int datalen, error, num_crtc, ep_index;
386276fc83aSjmcneill 
387276fc83aSjmcneill 	drm_mode_config_init(ddev);
388276fc83aSjmcneill 	ddev->mode_config.min_width = 0;
389276fc83aSjmcneill 	ddev->mode_config.min_height = 0;
390276fc83aSjmcneill 	ddev->mode_config.max_width = RK_DRM_MAX_WIDTH;
391276fc83aSjmcneill 	ddev->mode_config.max_height = RK_DRM_MAX_HEIGHT;
392276fc83aSjmcneill 	ddev->mode_config.funcs = &rk_drm_mode_config_funcs;
3938b06e185Sriastradh 	ddev->mode_config.helper_private = &rk_drm_mode_config_helper_funcs;
394276fc83aSjmcneill 
395276fc83aSjmcneill 	num_crtc = 0;
396276fc83aSjmcneill 	data = fdtbus_get_prop(sc->sc_phandle, "ports", &datalen);
397276fc83aSjmcneill 	while (datalen >= 4) {
398276fc83aSjmcneill 		const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
399276fc83aSjmcneill 
400276fc83aSjmcneill 		TAILQ_FOREACH(sport, &rk_drm_ports, entries)
401276fc83aSjmcneill 			if (sport->phandle == crtc_phandle && sport->ddev == NULL) {
402276fc83aSjmcneill 				sport->ddev = ddev;
403276fc83aSjmcneill 				for (ep_index = 0; (ep = fdt_endpoint_get_from_index(sport->port, 0, ep_index)) != NULL; ep_index++) {
404276fc83aSjmcneill 					error = fdt_endpoint_activate_direct(ep, true);
405276fc83aSjmcneill 					if (error != 0)
406276fc83aSjmcneill 						aprint_debug_dev(sc->sc_dev,
407276fc83aSjmcneill 						    "failed to activate endpoint %d: %d\n",
408276fc83aSjmcneill 						    ep_index, error);
409276fc83aSjmcneill 				}
410276fc83aSjmcneill 				num_crtc++;
411276fc83aSjmcneill 			}
412276fc83aSjmcneill 
413276fc83aSjmcneill 		datalen -= 4;
414276fc83aSjmcneill 		data++;
415276fc83aSjmcneill 	}
416276fc83aSjmcneill 
417276fc83aSjmcneill 	if (num_crtc == 0) {
418276fc83aSjmcneill 		aprint_error_dev(sc->sc_dev, "no display interface ports configured\n");
419ae4347c1Smrg 		error = ENXIO;
420ae4347c1Smrg 		goto drmerr;
421276fc83aSjmcneill 	}
422276fc83aSjmcneill 
423494f488dSriastradh 	drm_mode_config_reset(ddev);
424494f488dSriastradh 
425276fc83aSjmcneill 	fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
426276fc83aSjmcneill 
427276fc83aSjmcneill 	drm_fb_helper_prepare(ddev, &fbdev->helper, &rk_drm_fb_helper_funcs);
428276fc83aSjmcneill 
429c7fa00edSriastradh 	error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc);
430276fc83aSjmcneill 	if (error)
431ae4347c1Smrg 		goto allocerr;
432276fc83aSjmcneill 
433276fc83aSjmcneill 	fbdev->helper.fb = kmem_zalloc(sizeof(struct rk_drm_framebuffer), KM_SLEEP);
434276fc83aSjmcneill 
435276fc83aSjmcneill 	drm_fb_helper_single_add_all_connectors(&fbdev->helper);
436276fc83aSjmcneill 
437276fc83aSjmcneill 	drm_fb_helper_initial_config(&fbdev->helper, 32);
438276fc83aSjmcneill 
43959b77be9Sriastradh 	/* XXX Delegate this to rk_vop.c?  */
440276fc83aSjmcneill 	ddev->irq_enabled = true;
441276fc83aSjmcneill 	drm_vblank_init(ddev, num_crtc);
442276fc83aSjmcneill 
443276fc83aSjmcneill 	return 0;
444276fc83aSjmcneill 
445ae4347c1Smrg allocerr:
446ae4347c1Smrg 	kmem_free(fbdev, sizeof(*fbdev));
447276fc83aSjmcneill drmerr:
448276fc83aSjmcneill 	drm_mode_config_cleanup(ddev);
449276fc83aSjmcneill 
450276fc83aSjmcneill 	return error;
451276fc83aSjmcneill }
452276fc83aSjmcneill 
453c7fa00edSriastradh static void
rk_drm_unload(struct drm_device * ddev)454276fc83aSjmcneill rk_drm_unload(struct drm_device *ddev)
455276fc83aSjmcneill {
456276fc83aSjmcneill 	drm_mode_config_cleanup(ddev);
457276fc83aSjmcneill }
458276fc83aSjmcneill 
459276fc83aSjmcneill int
rk_drm_register_port(int phandle,struct fdt_device_ports * port)460276fc83aSjmcneill rk_drm_register_port(int phandle, struct fdt_device_ports *port)
461276fc83aSjmcneill {
462276fc83aSjmcneill 	struct rk_drm_ports *sport;
463276fc83aSjmcneill 
464276fc83aSjmcneill 	sport = kmem_zalloc(sizeof(*sport), KM_SLEEP);
465276fc83aSjmcneill 	sport->phandle = phandle;
466276fc83aSjmcneill 	sport->port = port;
467276fc83aSjmcneill 	sport->ddev = NULL;
468276fc83aSjmcneill 	TAILQ_INSERT_TAIL(&rk_drm_ports, sport, entries);
469276fc83aSjmcneill 
470276fc83aSjmcneill 	return 0;
471276fc83aSjmcneill }
472276fc83aSjmcneill 
473276fc83aSjmcneill struct drm_device *
rk_drm_port_device(struct fdt_device_ports * port)474276fc83aSjmcneill rk_drm_port_device(struct fdt_device_ports *port)
475276fc83aSjmcneill {
476276fc83aSjmcneill 	struct rk_drm_ports *sport;
477276fc83aSjmcneill 
478276fc83aSjmcneill 	TAILQ_FOREACH(sport, &rk_drm_ports, entries)
479276fc83aSjmcneill 		if (sport->port == port)
480276fc83aSjmcneill 			return sport->ddev;
481276fc83aSjmcneill 
482276fc83aSjmcneill 	return NULL;
483276fc83aSjmcneill }
48456649a44Sriastradh 
48556649a44Sriastradh static void
rk_drm_task_work(struct work * work,void * cookie)48656649a44Sriastradh rk_drm_task_work(struct work *work, void *cookie)
48756649a44Sriastradh {
48856649a44Sriastradh 	struct rk_drm_task *task = container_of(work, struct rk_drm_task,
48956649a44Sriastradh 	    rdt_u.work);
49056649a44Sriastradh 
49156649a44Sriastradh 	(*task->rdt_fn)(task);
49256649a44Sriastradh }
49356649a44Sriastradh 
49456649a44Sriastradh void
rk_task_init(struct rk_drm_task * task,void (* fn)(struct rk_drm_task *))49556649a44Sriastradh rk_task_init(struct rk_drm_task *task,
49656649a44Sriastradh     void (*fn)(struct rk_drm_task *))
49756649a44Sriastradh {
49856649a44Sriastradh 
49956649a44Sriastradh 	task->rdt_fn = fn;
50056649a44Sriastradh }
50156649a44Sriastradh 
50256649a44Sriastradh void
rk_task_schedule(device_t self,struct rk_drm_task * task)50356649a44Sriastradh rk_task_schedule(device_t self, struct rk_drm_task *task)
50456649a44Sriastradh {
50556649a44Sriastradh 	struct rk_drm_softc *sc = device_private(self);
50656649a44Sriastradh 
50756649a44Sriastradh 	if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp)
50856649a44Sriastradh 		SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, rdt_u.queue);
50956649a44Sriastradh 	else
51056649a44Sriastradh 		workqueue_enqueue(sc->sc_task_wq, &task->rdt_u.work, NULL);
51156649a44Sriastradh }
512