xref: /linux/drivers/gpu/drm/vboxvideo/vbox_main.c (revision 44f57d78)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2013-2017 Oracle Corporation
4  * This file is based on ast_main.c
5  * Copyright 2012 Red Hat Inc.
6  * Authors: Dave Airlie <airlied@redhat.com>,
7  *          Michael Thayer <michael.thayer@oracle.com,
8  *          Hans de Goede <hdegoede@redhat.com>
9  */
10 
11 #include <linux/vbox_err.h>
12 #include <drm/drm_fb_helper.h>
13 #include <drm/drm_crtc_helper.h>
14 
15 #include "vbox_drv.h"
16 #include "vboxvideo_guest.h"
17 #include "vboxvideo_vbe.h"
18 
19 static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
20 {
21 	struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
22 
23 	if (vbox_fb->obj)
24 		drm_gem_object_put_unlocked(vbox_fb->obj);
25 
26 	drm_framebuffer_cleanup(fb);
27 	kfree(fb);
28 }
29 
30 void vbox_report_caps(struct vbox_private *vbox)
31 {
32 	u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
33 		   VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
34 
35 	/* The host only accepts VIDEO_MODE_HINTS if it is send separately. */
36 	hgsmi_send_caps_info(vbox->guest_pool, caps);
37 	caps |= VBVACAPS_VIDEO_MODE_HINTS;
38 	hgsmi_send_caps_info(vbox->guest_pool, caps);
39 }
40 
41 /* Send information about dirty rectangles to VBVA. */
42 void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
43 				       struct drm_clip_rect *rects,
44 				       unsigned int num_rects)
45 {
46 	struct vbox_private *vbox = fb->dev->dev_private;
47 	struct drm_display_mode *mode;
48 	struct drm_crtc *crtc;
49 	int crtc_x, crtc_y;
50 	unsigned int i;
51 
52 	mutex_lock(&vbox->hw_mutex);
53 	list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
54 		if (crtc->primary->state->fb != fb)
55 			continue;
56 
57 		mode = &crtc->state->mode;
58 		crtc_x = crtc->primary->state->src_x >> 16;
59 		crtc_y = crtc->primary->state->src_y >> 16;
60 
61 		for (i = 0; i < num_rects; ++i) {
62 			struct vbva_cmd_hdr cmd_hdr;
63 			unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
64 
65 			if (rects[i].x1 > crtc_x + mode->hdisplay ||
66 			    rects[i].y1 > crtc_y + mode->vdisplay ||
67 			    rects[i].x2 < crtc_x ||
68 			    rects[i].y2 < crtc_y)
69 				continue;
70 
71 			cmd_hdr.x = (s16)rects[i].x1;
72 			cmd_hdr.y = (s16)rects[i].y1;
73 			cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
74 			cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
75 
76 			if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
77 						      vbox->guest_pool))
78 				continue;
79 
80 			vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
81 				   &cmd_hdr, sizeof(cmd_hdr));
82 			vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
83 		}
84 	}
85 	mutex_unlock(&vbox->hw_mutex);
86 }
87 
88 static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
89 				       struct drm_file *file_priv,
90 				       unsigned int flags, unsigned int color,
91 				       struct drm_clip_rect *rects,
92 				       unsigned int num_rects)
93 {
94 	vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
95 
96 	return 0;
97 }
98 
99 static const struct drm_framebuffer_funcs vbox_fb_funcs = {
100 	.destroy = vbox_user_framebuffer_destroy,
101 	.dirty = vbox_user_framebuffer_dirty,
102 };
103 
104 int vbox_framebuffer_init(struct vbox_private *vbox,
105 			  struct vbox_framebuffer *vbox_fb,
106 			  const struct drm_mode_fb_cmd2 *mode_cmd,
107 			  struct drm_gem_object *obj)
108 {
109 	int ret;
110 
111 	drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
112 	vbox_fb->obj = obj;
113 	ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
114 	if (ret) {
115 		DRM_ERROR("framebuffer init failed %d\n", ret);
116 		return ret;
117 	}
118 
119 	return 0;
120 }
121 
122 static int vbox_accel_init(struct vbox_private *vbox)
123 {
124 	struct vbva_buffer *vbva;
125 	unsigned int i;
126 
127 	vbox->vbva_info = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
128 				       sizeof(*vbox->vbva_info), GFP_KERNEL);
129 	if (!vbox->vbva_info)
130 		return -ENOMEM;
131 
132 	/* Take a command buffer for each screen from the end of usable VRAM. */
133 	vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
134 
135 	vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0,
136 					     vbox->available_vram_size,
137 					     vbox->num_crtcs *
138 					     VBVA_MIN_BUFFER_SIZE);
139 	if (!vbox->vbva_buffers)
140 		return -ENOMEM;
141 
142 	for (i = 0; i < vbox->num_crtcs; ++i) {
143 		vbva_setup_buffer_context(&vbox->vbva_info[i],
144 					  vbox->available_vram_size +
145 					  i * VBVA_MIN_BUFFER_SIZE,
146 					  VBVA_MIN_BUFFER_SIZE);
147 		vbva = (void __force *)vbox->vbva_buffers +
148 			i * VBVA_MIN_BUFFER_SIZE;
149 		if (!vbva_enable(&vbox->vbva_info[i],
150 				 vbox->guest_pool, vbva, i)) {
151 			/* very old host or driver error. */
152 			DRM_ERROR("vboxvideo: vbva_enable failed\n");
153 		}
154 	}
155 
156 	return 0;
157 }
158 
159 static void vbox_accel_fini(struct vbox_private *vbox)
160 {
161 	unsigned int i;
162 
163 	for (i = 0; i < vbox->num_crtcs; ++i)
164 		vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
165 
166 	pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
167 }
168 
169 /* Do we support the 4.3 plus mode hint reporting interface? */
170 static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
171 {
172 	u32 have_hints, have_cursor;
173 	int ret;
174 
175 	ret = hgsmi_query_conf(vbox->guest_pool,
176 			       VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
177 			       &have_hints);
178 	if (ret)
179 		return false;
180 
181 	ret = hgsmi_query_conf(vbox->guest_pool,
182 			       VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
183 			       &have_cursor);
184 	if (ret)
185 		return false;
186 
187 	return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
188 }
189 
190 bool vbox_check_supported(u16 id)
191 {
192 	u16 dispi_id;
193 
194 	vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
195 	dispi_id = inw(VBE_DISPI_IOPORT_DATA);
196 
197 	return dispi_id == id;
198 }
199 
200 int vbox_hw_init(struct vbox_private *vbox)
201 {
202 	int ret = -ENOMEM;
203 
204 	vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
205 	vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
206 
207 	DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
208 
209 	/* Map guest-heap at end of vram */
210 	vbox->guest_heap =
211 	    pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox),
212 			    GUEST_HEAP_SIZE);
213 	if (!vbox->guest_heap)
214 		return -ENOMEM;
215 
216 	/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
217 	vbox->guest_pool = gen_pool_create(4, -1);
218 	if (!vbox->guest_pool)
219 		goto err_unmap_guest_heap;
220 
221 	ret = gen_pool_add_virt(vbox->guest_pool,
222 				(unsigned long)vbox->guest_heap,
223 				GUEST_HEAP_OFFSET(vbox),
224 				GUEST_HEAP_USABLE_SIZE, -1);
225 	if (ret)
226 		goto err_destroy_guest_pool;
227 
228 	ret = hgsmi_test_query_conf(vbox->guest_pool);
229 	if (ret) {
230 		DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
231 		goto err_destroy_guest_pool;
232 	}
233 
234 	/* Reduce available VRAM size to reflect the guest heap. */
235 	vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
236 	/* Linux drm represents monitors as a 32-bit array. */
237 	hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
238 			 &vbox->num_crtcs);
239 	vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
240 
241 	if (!have_hgsmi_mode_hints(vbox)) {
242 		ret = -ENOTSUPP;
243 		goto err_destroy_guest_pool;
244 	}
245 
246 	vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
247 					     sizeof(struct vbva_modehint),
248 					     GFP_KERNEL);
249 	if (!vbox->last_mode_hints) {
250 		ret = -ENOMEM;
251 		goto err_destroy_guest_pool;
252 	}
253 
254 	ret = vbox_accel_init(vbox);
255 	if (ret)
256 		goto err_destroy_guest_pool;
257 
258 	return 0;
259 
260 err_destroy_guest_pool:
261 	gen_pool_destroy(vbox->guest_pool);
262 err_unmap_guest_heap:
263 	pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
264 	return ret;
265 }
266 
267 void vbox_hw_fini(struct vbox_private *vbox)
268 {
269 	vbox_accel_fini(vbox);
270 	gen_pool_destroy(vbox->guest_pool);
271 	pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
272 }
273 
274 int vbox_gem_create(struct vbox_private *vbox,
275 		    u32 size, bool iskernel, struct drm_gem_object **obj)
276 {
277 	struct vbox_bo *vboxbo;
278 	int ret;
279 
280 	*obj = NULL;
281 
282 	size = roundup(size, PAGE_SIZE);
283 	if (size == 0)
284 		return -EINVAL;
285 
286 	ret = vbox_bo_create(vbox, size, 0, 0, &vboxbo);
287 	if (ret) {
288 		if (ret != -ERESTARTSYS)
289 			DRM_ERROR("failed to allocate GEM object\n");
290 		return ret;
291 	}
292 
293 	*obj = &vboxbo->gem;
294 
295 	return 0;
296 }
297 
298 int vbox_dumb_create(struct drm_file *file,
299 		     struct drm_device *dev, struct drm_mode_create_dumb *args)
300 {
301 	struct vbox_private *vbox =
302 		container_of(dev, struct vbox_private, ddev);
303 	struct drm_gem_object *gobj;
304 	u32 handle;
305 	int ret;
306 
307 	args->pitch = args->width * ((args->bpp + 7) / 8);
308 	args->size = args->pitch * args->height;
309 
310 	ret = vbox_gem_create(vbox, args->size, false, &gobj);
311 	if (ret)
312 		return ret;
313 
314 	ret = drm_gem_handle_create(file, gobj, &handle);
315 	drm_gem_object_put_unlocked(gobj);
316 	if (ret)
317 		return ret;
318 
319 	args->handle = handle;
320 
321 	return 0;
322 }
323 
324 void vbox_gem_free_object(struct drm_gem_object *obj)
325 {
326 	struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
327 
328 	ttm_bo_put(&vbox_bo->bo);
329 }
330 
331 static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
332 {
333 	return drm_vma_node_offset_addr(&bo->bo.vma_node);
334 }
335 
336 int
337 vbox_dumb_mmap_offset(struct drm_file *file,
338 		      struct drm_device *dev,
339 		      u32 handle, u64 *offset)
340 {
341 	struct drm_gem_object *obj;
342 	int ret;
343 	struct vbox_bo *bo;
344 
345 	mutex_lock(&dev->struct_mutex);
346 	obj = drm_gem_object_lookup(file, handle);
347 	if (!obj) {
348 		ret = -ENOENT;
349 		goto out_unlock;
350 	}
351 
352 	bo = gem_to_vbox_bo(obj);
353 	*offset = vbox_bo_mmap_offset(bo);
354 
355 	drm_gem_object_put(obj);
356 	ret = 0;
357 
358 out_unlock:
359 	mutex_unlock(&dev->struct_mutex);
360 	return ret;
361 }
362