xref: /dragonfly/sys/dev/drm/i915/gvt/mpt.h (revision 26720ae0)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Eddie Dong <eddie.dong@intel.com>
25  *    Dexuan Cui
26  *    Jike Song <jike.song@intel.com>
27  *
28  * Contributors:
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *
31  */
32 
33 #ifndef _GVT_MPT_H_
34 #define _GVT_MPT_H_
35 
36 /**
37  * DOC: Hypervisor Service APIs for GVT-g Core Logic
38  *
39  * This is the glue layer between specific hypervisor MPT modules and GVT-g core
40  * logic. Each kind of hypervisor MPT module provides a collection of function
41  * callbacks and will be attached to GVT host when the driver is loading.
42  * GVT-g core logic will call these APIs to request specific services from
43  * hypervisor.
44  */
45 
46 /**
47  * intel_gvt_hypervisor_host_init - init GVT-g host side
48  *
49  * Returns:
50  * Zero on success, negative error code if failed
51  */
52 static inline int intel_gvt_hypervisor_host_init(struct device *dev,
53 			void *gvt, const void *ops)
54 {
55 	/* optional to provide */
56 	if (!intel_gvt_host.mpt->host_init)
57 		return 0;
58 
59 	return intel_gvt_host.mpt->host_init(dev, gvt, ops);
60 }
61 
62 /**
63  * intel_gvt_hypervisor_host_exit - exit GVT-g host side
64  */
65 static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
66 			void *gvt)
67 {
68 	/* optional to provide */
69 	if (!intel_gvt_host.mpt->host_exit)
70 		return;
71 
72 	intel_gvt_host.mpt->host_exit(dev, gvt);
73 }
74 
75 /**
76  * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
77  * related stuffs inside hypervisor.
78  *
79  * Returns:
80  * Zero on success, negative error code if failed.
81  */
82 static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
83 {
84 	/* optional to provide */
85 	if (!intel_gvt_host.mpt->attach_vgpu)
86 		return 0;
87 
88 	return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
89 }
90 
91 /**
92  * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
93  * related stuffs inside hypervisor.
94  *
95  * Returns:
96  * Zero on success, negative error code if failed.
97  */
98 static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
99 {
100 	/* optional to provide */
101 	if (!intel_gvt_host.mpt->detach_vgpu)
102 		return;
103 
104 	intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
105 }
106 
107 #define MSI_CAP_CONTROL(offset) (offset + 2)
108 #define MSI_CAP_ADDRESS(offset) (offset + 4)
109 #define MSI_CAP_DATA(offset) (offset + 8)
110 #define MSI_CAP_EN 0x1
111 
112 /**
113  * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
114  *
115  * Returns:
116  * Zero on success, negative error code if failed.
117  */
118 static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
119 {
120 	unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
121 	u16 control, data;
122 	u32 addr;
123 	int ret;
124 
125 	control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
126 	addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
127 	data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
128 
129 	/* Do not generate MSI if MSIEN is disable */
130 	if (!(control & MSI_CAP_EN))
131 		return 0;
132 
133 	if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
134 		return -EINVAL;
135 
136 	gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
137 		    data);
138 
139 	ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
140 	if (ret)
141 		return ret;
142 	return 0;
143 }
144 
145 /**
146  * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
147  * @p: host kernel virtual address
148  *
149  * Returns:
150  * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
151  */
152 static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
153 {
154 	return intel_gvt_host.mpt->from_virt_to_mfn(p);
155 }
156 
157 /**
158  * intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
159  * @vgpu: a vGPU
160  * @p: intel_vgpu_guest_page
161  *
162  * Returns:
163  * Zero on success, negative error code if failed.
164  */
165 static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
166 		struct intel_vgpu_guest_page *p)
167 {
168 	int ret;
169 
170 	if (p->writeprotection)
171 		return 0;
172 
173 	ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
174 	if (ret)
175 		return ret;
176 	p->writeprotection = true;
177 	atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
178 	return 0;
179 }
180 
181 /**
182  * intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
183  * guest page
184  * @vgpu: a vGPU
185  * @p: intel_vgpu_guest_page
186  *
187  * Returns:
188  * Zero on success, negative error code if failed.
189  */
190 static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
191 		struct intel_vgpu_guest_page *p)
192 {
193 	int ret;
194 
195 	if (!p->writeprotection)
196 		return 0;
197 
198 	ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
199 	if (ret)
200 		return ret;
201 	p->writeprotection = false;
202 	atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
203 	return 0;
204 }
205 
206 /**
207  * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
208  * @vgpu: a vGPU
209  * @gpa: guest physical address
210  * @buf: host data buffer
211  * @len: data length
212  *
213  * Returns:
214  * Zero on success, negative error code if failed.
215  */
216 static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
217 		unsigned long gpa, void *buf, unsigned long len)
218 {
219 	return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
220 }
221 
222 /**
223  * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
224  * @vgpu: a vGPU
225  * @gpa: guest physical address
226  * @buf: host data buffer
227  * @len: data length
228  *
229  * Returns:
230  * Zero on success, negative error code if failed.
231  */
232 static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
233 		unsigned long gpa, void *buf, unsigned long len)
234 {
235 	return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
236 }
237 
238 /**
239  * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
240  * @vgpu: a vGPU
241  * @gpfn: guest pfn
242  *
243  * Returns:
244  * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
245  */
246 static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
247 		struct intel_vgpu *vgpu, unsigned long gfn)
248 {
249 	return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
250 }
251 
252 /**
253  * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
254  * @vgpu: a vGPU
255  * @gfn: guest PFN
256  * @mfn: host PFN
257  * @nr: amount of PFNs
258  * @map: map or unmap
259  *
260  * Returns:
261  * Zero on success, negative error code if failed.
262  */
263 static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
264 		struct intel_vgpu *vgpu, unsigned long gfn,
265 		unsigned long mfn, unsigned int nr,
266 		bool map)
267 {
268 	/* a MPT implementation could have MMIO mapped elsewhere */
269 	if (!intel_gvt_host.mpt->map_gfn_to_mfn)
270 		return 0;
271 
272 	return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
273 						  map);
274 }
275 
276 /**
277  * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
278  * @vgpu: a vGPU
279  * @start: the beginning of the guest physical address region
280  * @end: the end of the guest physical address region
281  * @map: map or unmap
282  *
283  * Returns:
284  * Zero on success, negative error code if failed.
285  */
286 static inline int intel_gvt_hypervisor_set_trap_area(
287 		struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
288 {
289 	/* a MPT implementation could have MMIO trapped elsewhere */
290 	if (!intel_gvt_host.mpt->set_trap_area)
291 		return 0;
292 
293 	return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
294 }
295 
296 #endif /* _GVT_MPT_H_ */
297