xref: /linux/drivers/gpu/drm/msm/msm_fbdev.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <drm/drm_crtc.h>
8 #include <drm/drm_fb_helper.h>
9 
10 #include "msm_drv.h"
11 #include "msm_kms.h"
12 
13 extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
14 					struct vm_area_struct *vma);
15 static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
16 
17 /*
18  * fbdev funcs, to implement legacy fbdev interface on top of drm driver
19  */
20 
21 #define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
22 
23 struct msm_fbdev {
24 	struct drm_fb_helper base;
25 	struct drm_framebuffer *fb;
26 };
27 
28 static struct fb_ops msm_fb_ops = {
29 	.owner = THIS_MODULE,
30 	DRM_FB_HELPER_DEFAULT_OPS,
31 
32 	/* Note: to properly handle manual update displays, we wrap the
33 	 * basic fbdev ops which write to the framebuffer
34 	 */
35 	.fb_read = drm_fb_helper_sys_read,
36 	.fb_write = drm_fb_helper_sys_write,
37 	.fb_fillrect = drm_fb_helper_sys_fillrect,
38 	.fb_copyarea = drm_fb_helper_sys_copyarea,
39 	.fb_imageblit = drm_fb_helper_sys_imageblit,
40 	.fb_mmap = msm_fbdev_mmap,
41 };
42 
43 static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
44 {
45 	struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
46 	struct msm_fbdev *fbdev = to_msm_fbdev(helper);
47 	struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
48 	int ret = 0;
49 
50 	ret = drm_gem_mmap_obj(bo, bo->size, vma);
51 	if (ret) {
52 		pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
53 		return ret;
54 	}
55 
56 	return msm_gem_mmap_obj(bo, vma);
57 }
58 
59 static int msm_fbdev_create(struct drm_fb_helper *helper,
60 		struct drm_fb_helper_surface_size *sizes)
61 {
62 	struct msm_fbdev *fbdev = to_msm_fbdev(helper);
63 	struct drm_device *dev = helper->dev;
64 	struct msm_drm_private *priv = dev->dev_private;
65 	struct drm_framebuffer *fb = NULL;
66 	struct drm_gem_object *bo;
67 	struct fb_info *fbi = NULL;
68 	uint64_t paddr;
69 	uint32_t format;
70 	int ret, pitch;
71 
72 	format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
73 
74 	DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
75 			sizes->surface_height, sizes->surface_bpp,
76 			sizes->fb_width, sizes->fb_height);
77 
78 	pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
79 	fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
80 			sizes->surface_height, pitch, format);
81 
82 	if (IS_ERR(fb)) {
83 		DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
84 		return PTR_ERR(fb);
85 	}
86 
87 	bo = msm_framebuffer_bo(fb, 0);
88 
89 	mutex_lock(&dev->struct_mutex);
90 
91 	/*
92 	 * NOTE: if we can be guaranteed to be able to map buffer
93 	 * in panic (ie. lock-safe, etc) we could avoid pinning the
94 	 * buffer now:
95 	 */
96 	ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
97 	if (ret) {
98 		DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
99 		goto fail_unlock;
100 	}
101 
102 	fbi = drm_fb_helper_alloc_fbi(helper);
103 	if (IS_ERR(fbi)) {
104 		DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
105 		ret = PTR_ERR(fbi);
106 		goto fail_unlock;
107 	}
108 
109 	DBG("fbi=%p, dev=%p", fbi, dev);
110 
111 	fbdev->fb = fb;
112 	helper->fb = fb;
113 
114 	fbi->fbops = &msm_fb_ops;
115 
116 	drm_fb_helper_fill_info(fbi, helper, sizes);
117 
118 	dev->mode_config.fb_base = paddr;
119 
120 	fbi->screen_base = msm_gem_get_vaddr(bo);
121 	if (IS_ERR(fbi->screen_base)) {
122 		ret = PTR_ERR(fbi->screen_base);
123 		goto fail_unlock;
124 	}
125 	fbi->screen_size = bo->size;
126 	fbi->fix.smem_start = paddr;
127 	fbi->fix.smem_len = bo->size;
128 
129 	DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
130 	DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
131 
132 	mutex_unlock(&dev->struct_mutex);
133 
134 	return 0;
135 
136 fail_unlock:
137 	mutex_unlock(&dev->struct_mutex);
138 	drm_framebuffer_remove(fb);
139 	return ret;
140 }
141 
142 static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
143 	.fb_probe = msm_fbdev_create,
144 };
145 
146 /* initialize fbdev helper */
147 struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
148 {
149 	struct msm_drm_private *priv = dev->dev_private;
150 	struct msm_fbdev *fbdev = NULL;
151 	struct drm_fb_helper *helper;
152 	int ret;
153 
154 	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
155 	if (!fbdev)
156 		goto fail;
157 
158 	helper = &fbdev->base;
159 
160 	drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
161 
162 	ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
163 	if (ret) {
164 		DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
165 		goto fail;
166 	}
167 
168 	ret = drm_fb_helper_single_add_all_connectors(helper);
169 	if (ret)
170 		goto fini;
171 
172 	ret = drm_fb_helper_initial_config(helper, 32);
173 	if (ret)
174 		goto fini;
175 
176 	priv->fbdev = helper;
177 
178 	return helper;
179 
180 fini:
181 	drm_fb_helper_fini(helper);
182 fail:
183 	kfree(fbdev);
184 	return NULL;
185 }
186 
187 void msm_fbdev_free(struct drm_device *dev)
188 {
189 	struct msm_drm_private *priv = dev->dev_private;
190 	struct drm_fb_helper *helper = priv->fbdev;
191 	struct msm_fbdev *fbdev;
192 
193 	DBG();
194 
195 	drm_fb_helper_unregister_fbi(helper);
196 
197 	drm_fb_helper_fini(helper);
198 
199 	fbdev = to_msm_fbdev(priv->fbdev);
200 
201 	/* this will free the backing object */
202 	if (fbdev->fb) {
203 		struct drm_gem_object *bo =
204 			msm_framebuffer_bo(fbdev->fb, 0);
205 		msm_gem_put_vaddr(bo);
206 		drm_framebuffer_remove(fbdev->fb);
207 	}
208 
209 	kfree(fbdev);
210 
211 	priv->fbdev = NULL;
212 }
213