xref: /linux/drivers/s390/cio/vfio_ccw_chp.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Channel path related status regions for vfio_ccw
4  *
5  * Copyright IBM Corp. 2020
6  *
7  * Author(s): Farhan Ali <alifm@linux.ibm.com>
8  *            Eric Farman <farman@linux.ibm.com>
9  */
10 
11 #include <linux/slab.h>
12 #include <linux/vfio.h>
13 #include "vfio_ccw_private.h"
14 
15 static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private,
16 					  char __user *buf, size_t count,
17 					  loff_t *ppos)
18 {
19 	unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
20 	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
21 	struct ccw_schib_region *region;
22 	int ret;
23 
24 	if (pos + count > sizeof(*region))
25 		return -EINVAL;
26 
27 	mutex_lock(&private->io_mutex);
28 	region = private->region[i].data;
29 
30 	if (cio_update_schib(private->sch)) {
31 		ret = -ENODEV;
32 		goto out;
33 	}
34 
35 	memcpy(region, &private->sch->schib, sizeof(*region));
36 
37 	if (copy_to_user(buf, (void *)region + pos, count)) {
38 		ret = -EFAULT;
39 		goto out;
40 	}
41 
42 	ret = count;
43 
44 out:
45 	mutex_unlock(&private->io_mutex);
46 	return ret;
47 }
48 
49 static ssize_t vfio_ccw_schib_region_write(struct vfio_ccw_private *private,
50 					   const char __user *buf, size_t count,
51 					   loff_t *ppos)
52 {
53 	return -EINVAL;
54 }
55 
56 
57 static void vfio_ccw_schib_region_release(struct vfio_ccw_private *private,
58 					  struct vfio_ccw_region *region)
59 {
60 
61 }
62 
63 static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = {
64 	.read = vfio_ccw_schib_region_read,
65 	.write = vfio_ccw_schib_region_write,
66 	.release = vfio_ccw_schib_region_release,
67 };
68 
69 int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private)
70 {
71 	return vfio_ccw_register_dev_region(private,
72 					    VFIO_REGION_SUBTYPE_CCW_SCHIB,
73 					    &vfio_ccw_schib_region_ops,
74 					    sizeof(struct ccw_schib_region),
75 					    VFIO_REGION_INFO_FLAG_READ,
76 					    private->schib_region);
77 }
78 
79 static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
80 					char __user *buf, size_t count,
81 					loff_t *ppos)
82 {
83 	unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
84 	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
85 	struct ccw_crw_region *region;
86 	struct vfio_ccw_crw *crw;
87 	int ret;
88 
89 	if (pos + count > sizeof(*region))
90 		return -EINVAL;
91 
92 	crw = list_first_entry_or_null(&private->crw,
93 				       struct vfio_ccw_crw, next);
94 
95 	if (crw)
96 		list_del(&crw->next);
97 
98 	mutex_lock(&private->io_mutex);
99 	region = private->region[i].data;
100 
101 	if (crw)
102 		memcpy(&region->crw, &crw->crw, sizeof(region->crw));
103 
104 	if (copy_to_user(buf, (void *)region + pos, count))
105 		ret = -EFAULT;
106 	else
107 		ret = count;
108 
109 	region->crw = 0;
110 
111 	mutex_unlock(&private->io_mutex);
112 
113 	kfree(crw);
114 
115 	/* Notify the guest if more CRWs are on our queue */
116 	if (!list_empty(&private->crw) && private->crw_trigger)
117 		eventfd_signal(private->crw_trigger, 1);
118 
119 	return ret;
120 }
121 
122 static ssize_t vfio_ccw_crw_region_write(struct vfio_ccw_private *private,
123 					 const char __user *buf, size_t count,
124 					 loff_t *ppos)
125 {
126 	return -EINVAL;
127 }
128 
129 static void vfio_ccw_crw_region_release(struct vfio_ccw_private *private,
130 					struct vfio_ccw_region *region)
131 {
132 
133 }
134 
135 static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = {
136 	.read = vfio_ccw_crw_region_read,
137 	.write = vfio_ccw_crw_region_write,
138 	.release = vfio_ccw_crw_region_release,
139 };
140 
141 int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private)
142 {
143 	return vfio_ccw_register_dev_region(private,
144 					    VFIO_REGION_SUBTYPE_CCW_CRW,
145 					    &vfio_ccw_crw_region_ops,
146 					    sizeof(struct ccw_crw_region),
147 					    VFIO_REGION_INFO_FLAG_READ,
148 					    private->crw_region);
149 }
150