xref: /dragonfly/sys/dev/drm/i915/gvt/mpt.h (revision 9317c2d0)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Eddie Dong <eddie.dong@intel.com>
25  *    Dexuan Cui
26  *    Jike Song <jike.song@intel.com>
27  *
28  * Contributors:
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *
31  */
32 
33 #ifndef _GVT_MPT_H_
34 #define _GVT_MPT_H_
35 
36 /**
37  * DOC: Hypervisor Service APIs for GVT-g Core Logic
38  *
39  * This is the glue layer between specific hypervisor MPT modules and GVT-g core
40  * logic. Each kind of hypervisor MPT module provides a collection of function
41  * callbacks and will be attached to GVT host when the driver is loading.
42  * GVT-g core logic will call these APIs to request specific services from
43  * hypervisor.
44  */
45 
46 /**
47  * intel_gvt_hypervisor_detect_host - check if GVT-g is running within
48  * hypervisor host/privilged domain
49  *
50  * Returns:
51  * Zero on success, -ENODEV if current kernel is running inside a VM
52  */
53 static inline int intel_gvt_hypervisor_detect_host(void)
54 {
55 	return intel_gvt_host.mpt->detect_host();
56 }
57 
58 /**
59  * intel_gvt_hypervisor_host_init - init GVT-g host side
60  *
61  * Returns:
62  * Zero on success, negative error code if failed
63  */
64 static inline int intel_gvt_hypervisor_host_init(struct device *dev,
65 			void *gvt, const void *ops)
66 {
67 	/* optional to provide */
68 	if (!intel_gvt_host.mpt->host_init)
69 		return 0;
70 
71 	return intel_gvt_host.mpt->host_init(dev, gvt, ops);
72 }
73 
74 /**
75  * intel_gvt_hypervisor_host_exit - exit GVT-g host side
76  */
77 static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
78 			void *gvt)
79 {
80 	/* optional to provide */
81 	if (!intel_gvt_host.mpt->host_exit)
82 		return;
83 
84 	intel_gvt_host.mpt->host_exit(dev, gvt);
85 }
86 
87 /**
88  * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
89  * related stuffs inside hypervisor.
90  *
91  * Returns:
92  * Zero on success, negative error code if failed.
93  */
94 static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
95 {
96 	/* optional to provide */
97 	if (!intel_gvt_host.mpt->attach_vgpu)
98 		return 0;
99 
100 	return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
101 }
102 
103 /**
104  * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
105  * related stuffs inside hypervisor.
106  *
107  * Returns:
108  * Zero on success, negative error code if failed.
109  */
110 static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
111 {
112 	/* optional to provide */
113 	if (!intel_gvt_host.mpt->detach_vgpu)
114 		return;
115 
116 	intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
117 }
118 
119 #define MSI_CAP_CONTROL(offset) (offset + 2)
120 #define MSI_CAP_ADDRESS(offset) (offset + 4)
121 #define MSI_CAP_DATA(offset) (offset + 8)
122 #define MSI_CAP_EN 0x1
123 
124 /**
125  * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
126  *
127  * Returns:
128  * Zero on success, negative error code if failed.
129  */
130 static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
131 {
132 	unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
133 	u16 control, data;
134 	u32 addr;
135 	int ret;
136 
137 	control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
138 	addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
139 	data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
140 
141 	/* Do not generate MSI if MSIEN is disable */
142 	if (!(control & MSI_CAP_EN))
143 		return 0;
144 
145 	if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
146 		return -EINVAL;
147 
148 	gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
149 		    data);
150 
151 	ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
152 	if (ret)
153 		return ret;
154 	return 0;
155 }
156 
157 /**
158  * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
159  * @p: host kernel virtual address
160  *
161  * Returns:
162  * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
163  */
164 static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
165 {
166 	return intel_gvt_host.mpt->from_virt_to_mfn(p);
167 }
168 
169 /**
170  * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
171  * @vgpu: a vGPU
172  * @p: intel_vgpu_guest_page
173  *
174  * Returns:
175  * Zero on success, negative error code if failed.
176  */
177 static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
178 		struct intel_vgpu_guest_page *p)
179 {
180 	int ret;
181 
182 	if (p->writeprotection)
183 		return 0;
184 
185 	ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
186 	if (ret)
187 		return ret;
188 	p->writeprotection = true;
189 	atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
190 	return 0;
191 }
192 
193 /**
194  * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
195  * guest page
196  * @vgpu: a vGPU
197  * @p: intel_vgpu_guest_page
198  *
199  * Returns:
200  * Zero on success, negative error code if failed.
201  */
202 static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
203 		struct intel_vgpu_guest_page *p)
204 {
205 	int ret;
206 
207 	if (!p->writeprotection)
208 		return 0;
209 
210 	ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
211 	if (ret)
212 		return ret;
213 	p->writeprotection = false;
214 	atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
215 	return 0;
216 }
217 
218 /**
219  * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
220  * @vgpu: a vGPU
221  * @gpa: guest physical address
222  * @buf: host data buffer
223  * @len: data length
224  *
225  * Returns:
226  * Zero on success, negative error code if failed.
227  */
228 static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
229 		unsigned long gpa, void *buf, unsigned long len)
230 {
231 	return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
232 }
233 
234 /**
235  * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
236  * @vgpu: a vGPU
237  * @gpa: guest physical address
238  * @buf: host data buffer
239  * @len: data length
240  *
241  * Returns:
242  * Zero on success, negative error code if failed.
243  */
244 static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
245 		unsigned long gpa, void *buf, unsigned long len)
246 {
247 	return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
248 }
249 
250 /**
251  * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
252  * @vgpu: a vGPU
253  * @gpfn: guest pfn
254  *
255  * Returns:
256  * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
257  */
258 static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
259 		struct intel_vgpu *vgpu, unsigned long gfn)
260 {
261 	return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
262 }
263 
264 /**
265  * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
266  * @vgpu: a vGPU
267  * @gfn: guest PFN
268  * @mfn: host PFN
269  * @nr: amount of PFNs
270  * @map: map or unmap
271  *
272  * Returns:
273  * Zero on success, negative error code if failed.
274  */
275 static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
276 		struct intel_vgpu *vgpu, unsigned long gfn,
277 		unsigned long mfn, unsigned int nr,
278 		bool map)
279 {
280 	/* a MPT implementation could have MMIO mapped elsewhere */
281 	if (!intel_gvt_host.mpt->map_gfn_to_mfn)
282 		return 0;
283 
284 	return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
285 						  map);
286 }
287 
288 /**
289  * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
290  * @vgpu: a vGPU
291  * @start: the beginning of the guest physical address region
292  * @end: the end of the guest physical address region
293  * @map: map or unmap
294  *
295  * Returns:
296  * Zero on success, negative error code if failed.
297  */
298 static inline int intel_gvt_hypervisor_set_trap_area(
299 		struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
300 {
301 	/* a MPT implementation could have MMIO trapped elsewhere */
302 	if (!intel_gvt_host.mpt->set_trap_area)
303 		return 0;
304 
305 	return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
306 }
307 
308 #endif /* _GVT_MPT_H_ */
309