xref: /linux/samples/vfio-mdev/mdpy.c (revision c6fbb759)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Mediated virtual PCI display host device driver
4  *
5  * See mdpy-defs.h for device specs
6  *
7  *   (c) Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * based on mtty driver which is:
10  *   Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11  *	 Author: Neo Jia <cjia@nvidia.com>
12  *		 Kirti Wankhede <kwankhede@nvidia.com>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License version 2 as
16  * published by the Free Software Foundation.
17  */
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/cdev.h>
24 #include <linux/vfio.h>
25 #include <linux/iommu.h>
26 #include <linux/sysfs.h>
27 #include <linux/mdev.h>
28 #include <linux/pci.h>
29 #include <drm/drm_fourcc.h>
30 #include "mdpy-defs.h"
31 
32 #define MDPY_NAME		"mdpy"
33 #define MDPY_CLASS_NAME		"mdpy"
34 
35 #define MDPY_CONFIG_SPACE_SIZE	0xff
36 #define MDPY_MEMORY_BAR_OFFSET	PAGE_SIZE
37 #define MDPY_DISPLAY_REGION	16
38 
39 #define STORE_LE16(addr, val)	(*(u16 *)addr = val)
40 #define STORE_LE32(addr, val)	(*(u32 *)addr = val)
41 
42 
43 MODULE_LICENSE("GPL v2");
44 
45 #define MDPY_TYPE_1 "vga"
46 #define MDPY_TYPE_2 "xga"
47 #define MDPY_TYPE_3 "hd"
48 
49 static struct mdpy_type {
50 	struct mdev_type type;
51 	u32 format;
52 	u32 bytepp;
53 	u32 width;
54 	u32 height;
55 } mdpy_types[] = {
56 	{
57 		.type.sysfs_name 	= MDPY_TYPE_1,
58 		.type.pretty_name	= MDPY_CLASS_NAME "-" MDPY_TYPE_1,
59 		.format = DRM_FORMAT_XRGB8888,
60 		.bytepp = 4,
61 		.width	= 640,
62 		.height = 480,
63 	}, {
64 		.type.sysfs_name 	= MDPY_TYPE_2,
65 		.type.pretty_name	= MDPY_CLASS_NAME "-" MDPY_TYPE_2,
66 		.format = DRM_FORMAT_XRGB8888,
67 		.bytepp = 4,
68 		.width	= 1024,
69 		.height = 768,
70 	}, {
71 		.type.sysfs_name 	= MDPY_TYPE_3,
72 		.type.pretty_name	= MDPY_CLASS_NAME "-" MDPY_TYPE_3,
73 		.format = DRM_FORMAT_XRGB8888,
74 		.bytepp = 4,
75 		.width	= 1920,
76 		.height = 1080,
77 	},
78 };
79 
80 static struct mdev_type *mdpy_mdev_types[] = {
81 	&mdpy_types[0].type,
82 	&mdpy_types[1].type,
83 	&mdpy_types[2].type,
84 };
85 
86 static dev_t		mdpy_devt;
87 static struct class	*mdpy_class;
88 static struct cdev	mdpy_cdev;
89 static struct device	mdpy_dev;
90 static struct mdev_parent mdpy_parent;
91 static const struct vfio_device_ops mdpy_dev_ops;
92 
93 /* State of each mdev device */
94 struct mdev_state {
95 	struct vfio_device vdev;
96 	u8 *vconfig;
97 	u32 bar_mask;
98 	struct mutex ops_lock;
99 	struct mdev_device *mdev;
100 	struct vfio_device_info dev_info;
101 
102 	const struct mdpy_type *type;
103 	u32 memsize;
104 	void *memblk;
105 };
106 
107 static void mdpy_create_config_space(struct mdev_state *mdev_state)
108 {
109 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
110 		   MDPY_PCI_VENDOR_ID);
111 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
112 		   MDPY_PCI_DEVICE_ID);
113 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
114 		   MDPY_PCI_SUBVENDOR_ID);
115 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
116 		   MDPY_PCI_SUBDEVICE_ID);
117 
118 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
119 		   PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
120 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
121 		   PCI_STATUS_CAP_LIST);
122 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
123 		   PCI_CLASS_DISPLAY_OTHER);
124 	mdev_state->vconfig[PCI_CLASS_REVISION] =  0x01;
125 
126 	STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
127 		   PCI_BASE_ADDRESS_SPACE_MEMORY |
128 		   PCI_BASE_ADDRESS_MEM_TYPE_32	 |
129 		   PCI_BASE_ADDRESS_MEM_PREFETCH);
130 	mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
131 
132 	/* vendor specific capability for the config registers */
133 	mdev_state->vconfig[PCI_CAPABILITY_LIST]       = MDPY_VENDORCAP_OFFSET;
134 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
135 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
136 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
137 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
138 		   mdev_state->type->format);
139 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
140 		   mdev_state->type->width);
141 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
142 		   mdev_state->type->height);
143 }
144 
145 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
146 				 char *buf, u32 count)
147 {
148 	struct device *dev = mdev_dev(mdev_state->mdev);
149 	u32 cfg_addr;
150 
151 	switch (offset) {
152 	case PCI_BASE_ADDRESS_0:
153 		cfg_addr = *(u32 *)buf;
154 
155 		if (cfg_addr == 0xffffffff) {
156 			cfg_addr = (cfg_addr & mdev_state->bar_mask);
157 		} else {
158 			cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
159 			if (cfg_addr)
160 				dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
161 		}
162 
163 		cfg_addr |= (mdev_state->vconfig[offset] &
164 			     ~PCI_BASE_ADDRESS_MEM_MASK);
165 		STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
166 		break;
167 	}
168 }
169 
170 static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
171 			   size_t count, loff_t pos, bool is_write)
172 {
173 	int ret = 0;
174 
175 	mutex_lock(&mdev_state->ops_lock);
176 
177 	if (pos < MDPY_CONFIG_SPACE_SIZE) {
178 		if (is_write)
179 			handle_pci_cfg_write(mdev_state, pos, buf, count);
180 		else
181 			memcpy(buf, (mdev_state->vconfig + pos), count);
182 
183 	} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
184 		   (pos + count <=
185 		    MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
186 		pos -= MDPY_MEMORY_BAR_OFFSET;
187 		if (is_write)
188 			memcpy(mdev_state->memblk, buf, count);
189 		else
190 			memcpy(buf, mdev_state->memblk, count);
191 
192 	} else {
193 		dev_info(mdev_state->vdev.dev,
194 			 "%s: %s @0x%llx (unhandled)\n", __func__,
195 			 is_write ? "WR" : "RD", pos);
196 		ret = -1;
197 		goto accessfailed;
198 	}
199 
200 	ret = count;
201 
202 
203 accessfailed:
204 	mutex_unlock(&mdev_state->ops_lock);
205 
206 	return ret;
207 }
208 
209 static int mdpy_reset(struct mdev_state *mdev_state)
210 {
211 	u32 stride, i;
212 
213 	/* initialize with gray gradient */
214 	stride = mdev_state->type->width * mdev_state->type->bytepp;
215 	for (i = 0; i < mdev_state->type->height; i++)
216 		memset(mdev_state->memblk + i * stride,
217 		       i * 255 / mdev_state->type->height,
218 		       stride);
219 	return 0;
220 }
221 
222 static int mdpy_init_dev(struct vfio_device *vdev)
223 {
224 	struct mdev_state *mdev_state =
225 		container_of(vdev, struct mdev_state, vdev);
226 	struct mdev_device *mdev = to_mdev_device(vdev->dev);
227 	const struct mdpy_type *type =
228 		container_of(mdev->type, struct mdpy_type, type);
229 	u32 fbsize;
230 	int ret = -ENOMEM;
231 
232 	mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
233 	if (!mdev_state->vconfig)
234 		return ret;
235 
236 	fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
237 
238 	mdev_state->memblk = vmalloc_user(fbsize);
239 	if (!mdev_state->memblk)
240 		goto out_vconfig;
241 
242 	mutex_init(&mdev_state->ops_lock);
243 	mdev_state->mdev = mdev;
244 	mdev_state->type = type;
245 	mdev_state->memsize = fbsize;
246 	mdpy_create_config_space(mdev_state);
247 	mdpy_reset(mdev_state);
248 
249 	dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name,
250 		 type->width, type->height);
251 	return 0;
252 
253 out_vconfig:
254 	kfree(mdev_state->vconfig);
255 	return ret;
256 }
257 
258 static int mdpy_probe(struct mdev_device *mdev)
259 {
260 	struct mdev_state *mdev_state;
261 	int ret;
262 
263 	mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
264 				       &mdpy_dev_ops);
265 	if (IS_ERR(mdev_state))
266 		return PTR_ERR(mdev_state);
267 
268 	ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
269 	if (ret)
270 		goto err_put_vdev;
271 	dev_set_drvdata(&mdev->dev, mdev_state);
272 	return 0;
273 
274 err_put_vdev:
275 	vfio_put_device(&mdev_state->vdev);
276 	return ret;
277 }
278 
279 static void mdpy_release_dev(struct vfio_device *vdev)
280 {
281 	struct mdev_state *mdev_state =
282 		container_of(vdev, struct mdev_state, vdev);
283 
284 	vfree(mdev_state->memblk);
285 	kfree(mdev_state->vconfig);
286 	vfio_free_device(vdev);
287 }
288 
289 static void mdpy_remove(struct mdev_device *mdev)
290 {
291 	struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
292 
293 	dev_info(&mdev->dev, "%s\n", __func__);
294 
295 	vfio_unregister_group_dev(&mdev_state->vdev);
296 	vfio_put_device(&mdev_state->vdev);
297 }
298 
299 static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
300 			 size_t count, loff_t *ppos)
301 {
302 	struct mdev_state *mdev_state =
303 		container_of(vdev, struct mdev_state, vdev);
304 	unsigned int done = 0;
305 	int ret;
306 
307 	while (count) {
308 		size_t filled;
309 
310 		if (count >= 4 && !(*ppos % 4)) {
311 			u32 val;
312 
313 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
314 					  *ppos, false);
315 			if (ret <= 0)
316 				goto read_err;
317 
318 			if (copy_to_user(buf, &val, sizeof(val)))
319 				goto read_err;
320 
321 			filled = 4;
322 		} else if (count >= 2 && !(*ppos % 2)) {
323 			u16 val;
324 
325 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
326 					  *ppos, false);
327 			if (ret <= 0)
328 				goto read_err;
329 
330 			if (copy_to_user(buf, &val, sizeof(val)))
331 				goto read_err;
332 
333 			filled = 2;
334 		} else {
335 			u8 val;
336 
337 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
338 					  *ppos, false);
339 			if (ret <= 0)
340 				goto read_err;
341 
342 			if (copy_to_user(buf, &val, sizeof(val)))
343 				goto read_err;
344 
345 			filled = 1;
346 		}
347 
348 		count -= filled;
349 		done += filled;
350 		*ppos += filled;
351 		buf += filled;
352 	}
353 
354 	return done;
355 
356 read_err:
357 	return -EFAULT;
358 }
359 
360 static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
361 			  size_t count, loff_t *ppos)
362 {
363 	struct mdev_state *mdev_state =
364 		container_of(vdev, struct mdev_state, vdev);
365 	unsigned int done = 0;
366 	int ret;
367 
368 	while (count) {
369 		size_t filled;
370 
371 		if (count >= 4 && !(*ppos % 4)) {
372 			u32 val;
373 
374 			if (copy_from_user(&val, buf, sizeof(val)))
375 				goto write_err;
376 
377 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
378 					  *ppos, true);
379 			if (ret <= 0)
380 				goto write_err;
381 
382 			filled = 4;
383 		} else if (count >= 2 && !(*ppos % 2)) {
384 			u16 val;
385 
386 			if (copy_from_user(&val, buf, sizeof(val)))
387 				goto write_err;
388 
389 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
390 					  *ppos, true);
391 			if (ret <= 0)
392 				goto write_err;
393 
394 			filled = 2;
395 		} else {
396 			u8 val;
397 
398 			if (copy_from_user(&val, buf, sizeof(val)))
399 				goto write_err;
400 
401 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
402 					  *ppos, true);
403 			if (ret <= 0)
404 				goto write_err;
405 
406 			filled = 1;
407 		}
408 		count -= filled;
409 		done += filled;
410 		*ppos += filled;
411 		buf += filled;
412 	}
413 
414 	return done;
415 write_err:
416 	return -EFAULT;
417 }
418 
419 static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
420 {
421 	struct mdev_state *mdev_state =
422 		container_of(vdev, struct mdev_state, vdev);
423 
424 	if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
425 		return -EINVAL;
426 	if (vma->vm_end < vma->vm_start)
427 		return -EINVAL;
428 	if (vma->vm_end - vma->vm_start > mdev_state->memsize)
429 		return -EINVAL;
430 	if ((vma->vm_flags & VM_SHARED) == 0)
431 		return -EINVAL;
432 
433 	return remap_vmalloc_range(vma, mdev_state->memblk, 0);
434 }
435 
436 static int mdpy_get_region_info(struct mdev_state *mdev_state,
437 				struct vfio_region_info *region_info,
438 				u16 *cap_type_id, void **cap_type)
439 {
440 	if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
441 	    region_info->index != MDPY_DISPLAY_REGION)
442 		return -EINVAL;
443 
444 	switch (region_info->index) {
445 	case VFIO_PCI_CONFIG_REGION_INDEX:
446 		region_info->offset = 0;
447 		region_info->size   = MDPY_CONFIG_SPACE_SIZE;
448 		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ |
449 				       VFIO_REGION_INFO_FLAG_WRITE);
450 		break;
451 	case VFIO_PCI_BAR0_REGION_INDEX:
452 	case MDPY_DISPLAY_REGION:
453 		region_info->offset = MDPY_MEMORY_BAR_OFFSET;
454 		region_info->size   = mdev_state->memsize;
455 		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ  |
456 				       VFIO_REGION_INFO_FLAG_WRITE |
457 				       VFIO_REGION_INFO_FLAG_MMAP);
458 		break;
459 	default:
460 		region_info->size   = 0;
461 		region_info->offset = 0;
462 		region_info->flags  = 0;
463 	}
464 
465 	return 0;
466 }
467 
468 static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
469 {
470 	irq_info->count = 0;
471 	return 0;
472 }
473 
474 static int mdpy_get_device_info(struct vfio_device_info *dev_info)
475 {
476 	dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
477 	dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
478 	dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
479 	return 0;
480 }
481 
482 static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
483 				struct vfio_device_gfx_plane_info *plane)
484 {
485 	if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
486 		if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
487 				     VFIO_GFX_PLANE_TYPE_REGION))
488 			return 0;
489 		return -EINVAL;
490 	}
491 
492 	if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
493 		return -EINVAL;
494 
495 	plane->drm_format     = mdev_state->type->format;
496 	plane->width	      = mdev_state->type->width;
497 	plane->height	      = mdev_state->type->height;
498 	plane->stride	      = (mdev_state->type->width *
499 				 mdev_state->type->bytepp);
500 	plane->size	      = mdev_state->memsize;
501 	plane->region_index   = MDPY_DISPLAY_REGION;
502 
503 	/* unused */
504 	plane->drm_format_mod = 0;
505 	plane->x_pos	      = 0;
506 	plane->y_pos	      = 0;
507 	plane->x_hot	      = 0;
508 	plane->y_hot	      = 0;
509 
510 	return 0;
511 }
512 
513 static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
514 		       unsigned long arg)
515 {
516 	int ret = 0;
517 	unsigned long minsz;
518 	struct mdev_state *mdev_state =
519 		container_of(vdev, struct mdev_state, vdev);
520 
521 	switch (cmd) {
522 	case VFIO_DEVICE_GET_INFO:
523 	{
524 		struct vfio_device_info info;
525 
526 		minsz = offsetofend(struct vfio_device_info, num_irqs);
527 
528 		if (copy_from_user(&info, (void __user *)arg, minsz))
529 			return -EFAULT;
530 
531 		if (info.argsz < minsz)
532 			return -EINVAL;
533 
534 		ret = mdpy_get_device_info(&info);
535 		if (ret)
536 			return ret;
537 
538 		memcpy(&mdev_state->dev_info, &info, sizeof(info));
539 
540 		if (copy_to_user((void __user *)arg, &info, minsz))
541 			return -EFAULT;
542 
543 		return 0;
544 	}
545 	case VFIO_DEVICE_GET_REGION_INFO:
546 	{
547 		struct vfio_region_info info;
548 		u16 cap_type_id = 0;
549 		void *cap_type = NULL;
550 
551 		minsz = offsetofend(struct vfio_region_info, offset);
552 
553 		if (copy_from_user(&info, (void __user *)arg, minsz))
554 			return -EFAULT;
555 
556 		if (info.argsz < minsz)
557 			return -EINVAL;
558 
559 		ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
560 					   &cap_type);
561 		if (ret)
562 			return ret;
563 
564 		if (copy_to_user((void __user *)arg, &info, minsz))
565 			return -EFAULT;
566 
567 		return 0;
568 	}
569 
570 	case VFIO_DEVICE_GET_IRQ_INFO:
571 	{
572 		struct vfio_irq_info info;
573 
574 		minsz = offsetofend(struct vfio_irq_info, count);
575 
576 		if (copy_from_user(&info, (void __user *)arg, minsz))
577 			return -EFAULT;
578 
579 		if ((info.argsz < minsz) ||
580 		    (info.index >= mdev_state->dev_info.num_irqs))
581 			return -EINVAL;
582 
583 		ret = mdpy_get_irq_info(&info);
584 		if (ret)
585 			return ret;
586 
587 		if (copy_to_user((void __user *)arg, &info, minsz))
588 			return -EFAULT;
589 
590 		return 0;
591 	}
592 
593 	case VFIO_DEVICE_QUERY_GFX_PLANE:
594 	{
595 		struct vfio_device_gfx_plane_info plane;
596 
597 		minsz = offsetofend(struct vfio_device_gfx_plane_info,
598 				    region_index);
599 
600 		if (copy_from_user(&plane, (void __user *)arg, minsz))
601 			return -EFAULT;
602 
603 		if (plane.argsz < minsz)
604 			return -EINVAL;
605 
606 		ret = mdpy_query_gfx_plane(mdev_state, &plane);
607 		if (ret)
608 			return ret;
609 
610 		if (copy_to_user((void __user *)arg, &plane, minsz))
611 			return -EFAULT;
612 
613 		return 0;
614 	}
615 
616 	case VFIO_DEVICE_SET_IRQS:
617 		return -EINVAL;
618 
619 	case VFIO_DEVICE_RESET:
620 		return mdpy_reset(mdev_state);
621 	}
622 	return -ENOTTY;
623 }
624 
625 static ssize_t
626 resolution_show(struct device *dev, struct device_attribute *attr,
627 		char *buf)
628 {
629 	struct mdev_state *mdev_state = dev_get_drvdata(dev);
630 
631 	return sprintf(buf, "%dx%d\n",
632 		       mdev_state->type->width,
633 		       mdev_state->type->height);
634 }
635 static DEVICE_ATTR_RO(resolution);
636 
637 static struct attribute *mdev_dev_attrs[] = {
638 	&dev_attr_resolution.attr,
639 	NULL,
640 };
641 
642 static const struct attribute_group mdev_dev_group = {
643 	.name  = "vendor",
644 	.attrs = mdev_dev_attrs,
645 };
646 
647 static const struct attribute_group *mdev_dev_groups[] = {
648 	&mdev_dev_group,
649 	NULL,
650 };
651 
652 static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf)
653 {
654 	struct mdpy_type *type = container_of(mtype, struct mdpy_type, type);
655 
656 	return sprintf(buf, "virtual display, %dx%d framebuffer\n",
657 		       type->width, type->height);
658 }
659 
660 static const struct vfio_device_ops mdpy_dev_ops = {
661 	.init = mdpy_init_dev,
662 	.release = mdpy_release_dev,
663 	.read = mdpy_read,
664 	.write = mdpy_write,
665 	.ioctl = mdpy_ioctl,
666 	.mmap = mdpy_mmap,
667 };
668 
669 static struct mdev_driver mdpy_driver = {
670 	.device_api = VFIO_DEVICE_API_PCI_STRING,
671 	.max_instances = 4,
672 	.driver = {
673 		.name = "mdpy",
674 		.owner = THIS_MODULE,
675 		.mod_name = KBUILD_MODNAME,
676 		.dev_groups = mdev_dev_groups,
677 	},
678 	.probe = mdpy_probe,
679 	.remove	= mdpy_remove,
680 	.show_description = mdpy_show_description,
681 };
682 
683 static const struct file_operations vd_fops = {
684 	.owner		= THIS_MODULE,
685 };
686 
687 static void mdpy_device_release(struct device *dev)
688 {
689 	/* nothing */
690 }
691 
692 static int __init mdpy_dev_init(void)
693 {
694 	int ret = 0;
695 
696 	ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
697 	if (ret < 0) {
698 		pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
699 		return ret;
700 	}
701 	cdev_init(&mdpy_cdev, &vd_fops);
702 	cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
703 	pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
704 
705 	ret = mdev_register_driver(&mdpy_driver);
706 	if (ret)
707 		goto err_cdev;
708 
709 	mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
710 	if (IS_ERR(mdpy_class)) {
711 		pr_err("Error: failed to register mdpy_dev class\n");
712 		ret = PTR_ERR(mdpy_class);
713 		goto err_driver;
714 	}
715 	mdpy_dev.class = mdpy_class;
716 	mdpy_dev.release = mdpy_device_release;
717 	dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
718 
719 	ret = device_register(&mdpy_dev);
720 	if (ret)
721 		goto err_class;
722 
723 	ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver,
724 				   mdpy_mdev_types,
725 				   ARRAY_SIZE(mdpy_mdev_types));
726 	if (ret)
727 		goto err_device;
728 
729 	return 0;
730 
731 err_device:
732 	device_unregister(&mdpy_dev);
733 err_class:
734 	class_destroy(mdpy_class);
735 err_driver:
736 	mdev_unregister_driver(&mdpy_driver);
737 err_cdev:
738 	cdev_del(&mdpy_cdev);
739 	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
740 	return ret;
741 }
742 
743 static void __exit mdpy_dev_exit(void)
744 {
745 	mdpy_dev.bus = NULL;
746 	mdev_unregister_parent(&mdpy_parent);
747 
748 	device_unregister(&mdpy_dev);
749 	mdev_unregister_driver(&mdpy_driver);
750 	cdev_del(&mdpy_cdev);
751 	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
752 	class_destroy(mdpy_class);
753 	mdpy_class = NULL;
754 }
755 
756 module_param_named(count, mdpy_driver.max_instances, int, 0444);
757 MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
758 
759 module_init(mdpy_dev_init)
760 module_exit(mdpy_dev_exit)
761