xref: /dragonfly/sys/dev/drm/i915/gvt/mpt.h (revision 5ca0a96d)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Eddie Dong <eddie.dong@intel.com>
25  *    Dexuan Cui
26  *    Jike Song <jike.song@intel.com>
27  *
28  * Contributors:
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *
31  */
32 
33 #ifndef _GVT_MPT_H_
34 #define _GVT_MPT_H_
35 
36 /**
37  * DOC: Hypervisor Service APIs for GVT-g Core Logic
38  *
39  * This is the glue layer between specific hypervisor MPT modules and GVT-g core
40  * logic. Each kind of hypervisor MPT module provides a collection of function
41  * callbacks and will be attached to GVT host when the driver is loading.
42  * GVT-g core logic will call these APIs to request specific services from
43  * hypervisor.
44  */
45 
46 /**
47  * intel_gvt_hypervisor_host_init - init GVT-g host side
48  *
49  * Returns:
50  * Zero on success, negative error code if failed
51  */
52 static inline int intel_gvt_hypervisor_host_init(struct device *dev,
53 			void *gvt, const void *ops)
54 {
55 	/* optional to provide */
56 	if (!intel_gvt_host.mpt->host_init)
57 		return 0;
58 
59 	return intel_gvt_host.mpt->host_init(dev, gvt, ops);
60 }
61 
62 /**
63  * intel_gvt_hypervisor_host_exit - exit GVT-g host side
64  */
65 static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
66 			void *gvt)
67 {
68 	/* optional to provide */
69 	if (!intel_gvt_host.mpt->host_exit)
70 		return;
71 
72 	intel_gvt_host.mpt->host_exit(dev, gvt);
73 }
74 
75 /**
76  * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
77  * related stuffs inside hypervisor.
78  *
79  * Returns:
80  * Zero on success, negative error code if failed.
81  */
82 static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
83 {
84 	/* optional to provide */
85 	if (!intel_gvt_host.mpt->attach_vgpu)
86 		return 0;
87 
88 	return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
89 }
90 
91 /**
92  * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
93  * related stuffs inside hypervisor.
94  *
95  * Returns:
96  * Zero on success, negative error code if failed.
97  */
98 static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
99 {
100 	/* optional to provide */
101 	if (!intel_gvt_host.mpt->detach_vgpu)
102 		return;
103 
104 	intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
105 }
106 
107 #define MSI_CAP_CONTROL(offset) (offset + 2)
108 #define MSI_CAP_ADDRESS(offset) (offset + 4)
109 #define MSI_CAP_DATA(offset) (offset + 8)
110 #define MSI_CAP_EN 0x1
111 
112 /**
113  * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
114  *
115  * Returns:
116  * Zero on success, negative error code if failed.
117  */
118 static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
119 {
120 	unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
121 	u16 control, data;
122 	u32 addr;
123 	int ret;
124 
125 	control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
126 	addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
127 	data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
128 
129 	/* Do not generate MSI if MSIEN is disable */
130 	if (!(control & MSI_CAP_EN))
131 		return 0;
132 
133 	if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
134 		return -EINVAL;
135 
136 	trace_inject_msi(vgpu->id, addr, data);
137 
138 	ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
139 	if (ret)
140 		return ret;
141 	return 0;
142 }
143 
144 /**
145  * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
146  * @p: host kernel virtual address
147  *
148  * Returns:
149  * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
150  */
151 static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
152 {
153 	return intel_gvt_host.mpt->from_virt_to_mfn(p);
154 }
155 
156 /**
157  * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
158  * @vgpu: a vGPU
159  * @p: intel_vgpu_guest_page
160  *
161  * Returns:
162  * Zero on success, negative error code if failed.
163  */
164 static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
165 		struct intel_vgpu_guest_page *p)
166 {
167 	int ret;
168 
169 	if (p->writeprotection)
170 		return 0;
171 
172 	ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
173 	if (ret)
174 		return ret;
175 	p->writeprotection = true;
176 	atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
177 	return 0;
178 }
179 
180 /**
181  * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
182  * guest page
183  * @vgpu: a vGPU
184  * @p: intel_vgpu_guest_page
185  *
186  * Returns:
187  * Zero on success, negative error code if failed.
188  */
189 static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
190 		struct intel_vgpu_guest_page *p)
191 {
192 	int ret;
193 
194 	if (!p->writeprotection)
195 		return 0;
196 
197 	ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
198 	if (ret)
199 		return ret;
200 	p->writeprotection = false;
201 	atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
202 	return 0;
203 }
204 
205 /**
206  * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
207  * @vgpu: a vGPU
208  * @gpa: guest physical address
209  * @buf: host data buffer
210  * @len: data length
211  *
212  * Returns:
213  * Zero on success, negative error code if failed.
214  */
215 static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
216 		unsigned long gpa, void *buf, unsigned long len)
217 {
218 	return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
219 }
220 
221 /**
222  * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
223  * @vgpu: a vGPU
224  * @gpa: guest physical address
225  * @buf: host data buffer
226  * @len: data length
227  *
228  * Returns:
229  * Zero on success, negative error code if failed.
230  */
231 static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
232 		unsigned long gpa, void *buf, unsigned long len)
233 {
234 	return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
235 }
236 
237 /**
238  * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
239  * @vgpu: a vGPU
240  * @gpfn: guest pfn
241  *
242  * Returns:
243  * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
244  */
245 static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
246 		struct intel_vgpu *vgpu, unsigned long gfn)
247 {
248 	return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
249 }
250 
251 /**
252  * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
253  * @vgpu: a vGPU
254  * @gfn: guest PFN
255  * @mfn: host PFN
256  * @nr: amount of PFNs
257  * @map: map or unmap
258  *
259  * Returns:
260  * Zero on success, negative error code if failed.
261  */
262 static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
263 		struct intel_vgpu *vgpu, unsigned long gfn,
264 		unsigned long mfn, unsigned int nr,
265 		bool map)
266 {
267 	/* a MPT implementation could have MMIO mapped elsewhere */
268 	if (!intel_gvt_host.mpt->map_gfn_to_mfn)
269 		return 0;
270 
271 	return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
272 						  map);
273 }
274 
275 /**
276  * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
277  * @vgpu: a vGPU
278  * @start: the beginning of the guest physical address region
279  * @end: the end of the guest physical address region
280  * @map: map or unmap
281  *
282  * Returns:
283  * Zero on success, negative error code if failed.
284  */
285 static inline int intel_gvt_hypervisor_set_trap_area(
286 		struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
287 {
288 	/* a MPT implementation could have MMIO trapped elsewhere */
289 	if (!intel_gvt_host.mpt->set_trap_area)
290 		return 0;
291 
292 	return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
293 }
294 
295 #endif /* _GVT_MPT_H_ */
296