xref: /netbsd/sys/external/bsd/drm2/dist/drm/i915/gvt/gvt.c (revision 677dec6e)
1 /*	$NetBSD: gvt.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
2 
3 /*
4  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Kevin Tian <kevin.tian@intel.com>
27  *    Eddie Dong <eddie.dong@intel.com>
28  *
29  * Contributors:
30  *    Niu Bing <bing.niu@intel.com>
31  *    Zhi Wang <zhi.a.wang@intel.com>
32  *
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: gvt.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
37 
38 #include <linux/types.h>
39 #include <xen/xen.h>
40 #include <linux/kthread.h>
41 
42 #include "i915_drv.h"
43 #include "gvt.h"
44 #include <linux/vfio.h>
45 #include <linux/mdev.h>
46 
47 struct intel_gvt_host intel_gvt_host;
48 
49 static const char * const supported_hypervisors[] = {
50 	[INTEL_GVT_HYPERVISOR_XEN] = "XEN",
51 	[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
52 };
53 
intel_gvt_find_vgpu_type(struct intel_gvt * gvt,const char * name)54 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
55 		const char *name)
56 {
57 	int i;
58 	struct intel_vgpu_type *t;
59 	const char *driver_name = dev_driver_string(
60 			&gvt->dev_priv->drm.pdev->dev);
61 
62 	for (i = 0; i < gvt->num_types; i++) {
63 		t = &gvt->types[i];
64 		if (!strncmp(t->name, name + strlen(driver_name) + 1,
65 			sizeof(t->name)))
66 			return t;
67 	}
68 
69 	return NULL;
70 }
71 
available_instances_show(struct kobject * kobj,struct device * dev,char * buf)72 static ssize_t available_instances_show(struct kobject *kobj,
73 					struct device *dev, char *buf)
74 {
75 	struct intel_vgpu_type *type;
76 	unsigned int num = 0;
77 	void *gvt = kdev_to_i915(dev)->gvt;
78 
79 	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
80 	if (!type)
81 		num = 0;
82 	else
83 		num = type->avail_instance;
84 
85 	return sprintf(buf, "%u\n", num);
86 }
87 
device_api_show(struct kobject * kobj,struct device * dev,char * buf)88 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
89 		char *buf)
90 {
91 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
92 }
93 
description_show(struct kobject * kobj,struct device * dev,char * buf)94 static ssize_t description_show(struct kobject *kobj, struct device *dev,
95 		char *buf)
96 {
97 	struct intel_vgpu_type *type;
98 	void *gvt = kdev_to_i915(dev)->gvt;
99 
100 	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
101 	if (!type)
102 		return 0;
103 
104 	return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
105 		       "fence: %d\nresolution: %s\n"
106 		       "weight: %d\n",
107 		       BYTES_TO_MB(type->low_gm_size),
108 		       BYTES_TO_MB(type->high_gm_size),
109 		       type->fence, vgpu_edid_str(type->resolution),
110 		       type->weight);
111 }
112 
113 static MDEV_TYPE_ATTR_RO(available_instances);
114 static MDEV_TYPE_ATTR_RO(device_api);
115 static MDEV_TYPE_ATTR_RO(description);
116 
117 static struct attribute *gvt_type_attrs[] = {
118 	&mdev_type_attr_available_instances.attr,
119 	&mdev_type_attr_device_api.attr,
120 	&mdev_type_attr_description.attr,
121 	NULL,
122 };
123 
124 static struct attribute_group *gvt_vgpu_type_groups[] = {
125 	[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
126 };
127 
intel_get_gvt_attrs(struct attribute *** type_attrs,struct attribute_group *** intel_vgpu_type_groups)128 static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
129 		struct attribute_group ***intel_vgpu_type_groups)
130 {
131 	*type_attrs = gvt_type_attrs;
132 	*intel_vgpu_type_groups = gvt_vgpu_type_groups;
133 	return true;
134 }
135 
intel_gvt_init_vgpu_type_groups(struct intel_gvt * gvt)136 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
137 {
138 	int i, j;
139 	struct intel_vgpu_type *type;
140 	struct attribute_group *group;
141 
142 	for (i = 0; i < gvt->num_types; i++) {
143 		type = &gvt->types[i];
144 
145 		group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
146 		if (WARN_ON(!group))
147 			goto unwind;
148 
149 		group->name = type->name;
150 		group->attrs = gvt_type_attrs;
151 		gvt_vgpu_type_groups[i] = group;
152 	}
153 
154 	return true;
155 
156 unwind:
157 	for (j = 0; j < i; j++) {
158 		group = gvt_vgpu_type_groups[j];
159 		kfree(group);
160 	}
161 
162 	return false;
163 }
164 
intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt * gvt)165 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
166 {
167 	int i;
168 	struct attribute_group *group;
169 
170 	for (i = 0; i < gvt->num_types; i++) {
171 		group = gvt_vgpu_type_groups[i];
172 		gvt_vgpu_type_groups[i] = NULL;
173 		kfree(group);
174 	}
175 }
176 
177 static const struct intel_gvt_ops intel_gvt_ops = {
178 	.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
179 	.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
180 	.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
181 	.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
182 	.vgpu_create = intel_gvt_create_vgpu,
183 	.vgpu_destroy = intel_gvt_destroy_vgpu,
184 	.vgpu_release = intel_gvt_release_vgpu,
185 	.vgpu_reset = intel_gvt_reset_vgpu,
186 	.vgpu_activate = intel_gvt_activate_vgpu,
187 	.vgpu_deactivate = intel_gvt_deactivate_vgpu,
188 	.gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
189 	.get_gvt_attrs = intel_get_gvt_attrs,
190 	.vgpu_query_plane = intel_vgpu_query_plane,
191 	.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
192 	.write_protect_handler = intel_vgpu_page_track_handler,
193 	.emulate_hotplug = intel_vgpu_emulate_hotplug,
194 };
195 
init_device_info(struct intel_gvt * gvt)196 static void init_device_info(struct intel_gvt *gvt)
197 {
198 	struct intel_gvt_device_info *info = &gvt->device_info;
199 	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
200 
201 	info->max_support_vgpus = 8;
202 	info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
203 	info->mmio_size = 2 * 1024 * 1024;
204 	info->mmio_bar = 0;
205 	info->gtt_start_offset = 8 * 1024 * 1024;
206 	info->gtt_entry_size = 8;
207 	info->gtt_entry_size_shift = 3;
208 	info->gmadr_bytes_in_cmd = 8;
209 	info->max_surface_size = 36 * 1024 * 1024;
210 	info->msi_cap_offset = pdev->msi_cap;
211 }
212 
gvt_service_thread(void * data)213 static int gvt_service_thread(void *data)
214 {
215 	struct intel_gvt *gvt = (struct intel_gvt *)data;
216 	int ret;
217 
218 	gvt_dbg_core("service thread start\n");
219 
220 	while (!kthread_should_stop()) {
221 		ret = wait_event_interruptible(gvt->service_thread_wq,
222 				kthread_should_stop() || gvt->service_request);
223 
224 		if (kthread_should_stop())
225 			break;
226 
227 		if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
228 			continue;
229 
230 		if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
231 					(void *)&gvt->service_request))
232 			intel_gvt_emulate_vblank(gvt);
233 
234 		if (test_bit(INTEL_GVT_REQUEST_SCHED,
235 				(void *)&gvt->service_request) ||
236 			test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
237 					(void *)&gvt->service_request)) {
238 			intel_gvt_schedule(gvt);
239 		}
240 	}
241 
242 	return 0;
243 }
244 
clean_service_thread(struct intel_gvt * gvt)245 static void clean_service_thread(struct intel_gvt *gvt)
246 {
247 	kthread_stop(gvt->service_thread);
248 }
249 
init_service_thread(struct intel_gvt * gvt)250 static int init_service_thread(struct intel_gvt *gvt)
251 {
252 	init_waitqueue_head(&gvt->service_thread_wq);
253 
254 	gvt->service_thread = kthread_run(gvt_service_thread,
255 			gvt, "gvt_service_thread");
256 	if (IS_ERR(gvt->service_thread)) {
257 		gvt_err("fail to start service thread.\n");
258 		return PTR_ERR(gvt->service_thread);
259 	}
260 	return 0;
261 }
262 
263 /**
264  * intel_gvt_clean_device - clean a GVT device
265  * @dev_priv: i915 private
266  *
267  * This function is called at the driver unloading stage, to free the
268  * resources owned by a GVT device.
269  *
270  */
intel_gvt_clean_device(struct drm_i915_private * dev_priv)271 void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
272 {
273 	struct intel_gvt *gvt = to_gvt(dev_priv);
274 
275 	if (WARN_ON(!gvt))
276 		return;
277 
278 	intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
279 	intel_gvt_cleanup_vgpu_type_groups(gvt);
280 	intel_gvt_clean_vgpu_types(gvt);
281 
282 	intel_gvt_debugfs_clean(gvt);
283 	clean_service_thread(gvt);
284 	intel_gvt_clean_cmd_parser(gvt);
285 	intel_gvt_clean_sched_policy(gvt);
286 	intel_gvt_clean_workload_scheduler(gvt);
287 	intel_gvt_clean_gtt(gvt);
288 	intel_gvt_clean_irq(gvt);
289 	intel_gvt_free_firmware(gvt);
290 	intel_gvt_clean_mmio_info(gvt);
291 	idr_destroy(&gvt->vgpu_idr);
292 
293 	kfree(dev_priv->gvt);
294 	dev_priv->gvt = NULL;
295 }
296 
297 /**
298  * intel_gvt_init_device - initialize a GVT device
299  * @dev_priv: drm i915 private data
300  *
301  * This function is called at the initialization stage, to initialize
302  * necessary GVT components.
303  *
304  * Returns:
305  * Zero on success, negative error code if failed.
306  *
307  */
intel_gvt_init_device(struct drm_i915_private * dev_priv)308 int intel_gvt_init_device(struct drm_i915_private *dev_priv)
309 {
310 	struct intel_gvt *gvt;
311 	struct intel_vgpu *vgpu;
312 	int ret;
313 
314 	if (WARN_ON(dev_priv->gvt))
315 		return -EEXIST;
316 
317 	gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
318 	if (!gvt)
319 		return -ENOMEM;
320 
321 	gvt_dbg_core("init gvt device\n");
322 
323 	idr_init(&gvt->vgpu_idr);
324 	spin_lock_init(&gvt->scheduler.mmio_context_lock);
325 	mutex_init(&gvt->lock);
326 	mutex_init(&gvt->sched_lock);
327 	gvt->dev_priv = dev_priv;
328 
329 	init_device_info(gvt);
330 
331 	ret = intel_gvt_setup_mmio_info(gvt);
332 	if (ret)
333 		goto out_clean_idr;
334 
335 	intel_gvt_init_engine_mmio_context(gvt);
336 
337 	ret = intel_gvt_load_firmware(gvt);
338 	if (ret)
339 		goto out_clean_mmio_info;
340 
341 	ret = intel_gvt_init_irq(gvt);
342 	if (ret)
343 		goto out_free_firmware;
344 
345 	ret = intel_gvt_init_gtt(gvt);
346 	if (ret)
347 		goto out_clean_irq;
348 
349 	ret = intel_gvt_init_workload_scheduler(gvt);
350 	if (ret)
351 		goto out_clean_gtt;
352 
353 	ret = intel_gvt_init_sched_policy(gvt);
354 	if (ret)
355 		goto out_clean_workload_scheduler;
356 
357 	ret = intel_gvt_init_cmd_parser(gvt);
358 	if (ret)
359 		goto out_clean_sched_policy;
360 
361 	ret = init_service_thread(gvt);
362 	if (ret)
363 		goto out_clean_cmd_parser;
364 
365 	ret = intel_gvt_init_vgpu_types(gvt);
366 	if (ret)
367 		goto out_clean_thread;
368 
369 	ret = intel_gvt_init_vgpu_type_groups(gvt);
370 	if (ret == false) {
371 		gvt_err("failed to init vgpu type groups: %d\n", ret);
372 		goto out_clean_types;
373 	}
374 
375 	vgpu = intel_gvt_create_idle_vgpu(gvt);
376 	if (IS_ERR(vgpu)) {
377 		ret = PTR_ERR(vgpu);
378 		gvt_err("failed to create idle vgpu\n");
379 		goto out_clean_types;
380 	}
381 	gvt->idle_vgpu = vgpu;
382 
383 	intel_gvt_debugfs_init(gvt);
384 
385 	gvt_dbg_core("gvt device initialization is done\n");
386 	dev_priv->gvt = gvt;
387 	intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
388 	intel_gvt_host.initialized = true;
389 	return 0;
390 
391 out_clean_types:
392 	intel_gvt_clean_vgpu_types(gvt);
393 out_clean_thread:
394 	clean_service_thread(gvt);
395 out_clean_cmd_parser:
396 	intel_gvt_clean_cmd_parser(gvt);
397 out_clean_sched_policy:
398 	intel_gvt_clean_sched_policy(gvt);
399 out_clean_workload_scheduler:
400 	intel_gvt_clean_workload_scheduler(gvt);
401 out_clean_gtt:
402 	intel_gvt_clean_gtt(gvt);
403 out_clean_irq:
404 	intel_gvt_clean_irq(gvt);
405 out_free_firmware:
406 	intel_gvt_free_firmware(gvt);
407 out_clean_mmio_info:
408 	intel_gvt_clean_mmio_info(gvt);
409 out_clean_idr:
410 	idr_destroy(&gvt->vgpu_idr);
411 	kfree(gvt);
412 	return ret;
413 }
414 
415 int
intel_gvt_register_hypervisor(struct intel_gvt_mpt * m)416 intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
417 {
418 	int ret;
419 	void *gvt;
420 
421 	if (!intel_gvt_host.initialized)
422 		return -ENODEV;
423 
424 	if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
425 	    m->type != INTEL_GVT_HYPERVISOR_XEN)
426 		return -EINVAL;
427 
428 	/* Get a reference for device model module */
429 	if (!try_module_get(THIS_MODULE))
430 		return -ENODEV;
431 
432 	intel_gvt_host.mpt = m;
433 	intel_gvt_host.hypervisor_type = m->type;
434 	gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
435 
436 	ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
437 					     &intel_gvt_ops);
438 	if (ret < 0) {
439 		gvt_err("Failed to init %s hypervisor module\n",
440 			supported_hypervisors[intel_gvt_host.hypervisor_type]);
441 		module_put(THIS_MODULE);
442 		return -ENODEV;
443 	}
444 	gvt_dbg_core("Running with hypervisor %s in host mode\n",
445 		     supported_hypervisors[intel_gvt_host.hypervisor_type]);
446 	return 0;
447 }
448 EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
449 
450 void
intel_gvt_unregister_hypervisor(void)451 intel_gvt_unregister_hypervisor(void)
452 {
453 	intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
454 	module_put(THIS_MODULE);
455 }
456 EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
457