xref: /openbsd/sys/arch/sparc64/include/hypervisor.h (revision 771fbea0)
1 /*	$OpenBSD: hypervisor.h,v 1.20 2019/10/20 16:27:19 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2008 Mark Kettenis
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * UltraSPARC Hypervisor API.
21  */
22 
23 /*
24  * API versioning
25  */
26 
27 int64_t	hv_api_get_version(uint64_t api_group,
28 	    uint64_t *major_number, uint64_t *minor_number);
29 
30 /*
31  * Domain services
32  */
33 
34 int64_t hv_mach_desc(paddr_t buffer, psize_t *length);
35 int64_t hv_mach_pri(paddr_t buffer, psize_t *length);
36 
37 /*
38  * CPU services
39  */
40 
41 void	hv_cpu_yield(void);
42 int64_t	hv_cpu_qconf(uint64_t queue, uint64_t base, uint64_t nentries);
43 
44 #define CPU_MONDO_QUEUE			0x3c
45 #define DEVICE_MONDO_QUEUE		0x3d
46 #define RESUMABLE_ERROR_QUEUE		0x3e
47 #define NONRESUMABLE_ERROR_QUEUE	0x3f
48 
49 int64_t	hv_cpu_mondo_send(uint64_t ncpus, paddr_t cpulist, paddr_t data);
50 int64_t	hv_cpu_myid(uint64_t *cpuid);
51 
52 /*
53  * MMU services
54  */
55 
56 int64_t	hv_mmu_demap_page(vaddr_t vaddr, uint64_t context, uint64_t flags);
57 int64_t	hv_mmu_demap_ctx(uint64_t context, uint64_t flags);
58 int64_t	hv_mmu_demap_all(uint64_t flags);
59 int64_t	hv_mmu_map_perm_addr(vaddr_t vaddr, uint64_t tte, uint64_t flags);
60 int64_t	hv_mmu_unmap_perm_addr(vaddr_t vaddr, uint64_t flags);
61 int64_t	hv_mmu_map_addr(vaddr_t vaddr, uint64_t context, uint64_t tte,
62 	    uint64_t flags);
63 int64_t	hv_mmu_unmap_addr(vaddr_t vaddr, uint64_t context, uint64_t flags);
64 
65 #define MAP_DTLB	0x1
66 #define MAP_ITLB	0x2
67 
68 struct tsb_desc {
69 	uint16_t	td_idxpgsz;
70 	uint16_t	td_assoc;
71 	uint32_t	td_size;
72 	uint32_t	td_ctxidx;
73 	uint32_t	td_pgsz;
74 	paddr_t		td_pa;
75 	uint64_t	td_reserved;
76 };
77 
78 int64_t	hv_mmu_tsb_ctx0(uint64_t ntsb, paddr_t tsbptr);
79 int64_t	hv_mmu_tsb_ctxnon0(uint64_t ntsb, paddr_t tsbptr);
80 
81 /*
82  * Cache and memory services
83  */
84 
85 int64_t	hv_mem_scrub(paddr_t raddr, psize_t length);
86 int64_t	hv_mem_sync(paddr_t raddr, psize_t length);
87 
88 /*
89  * Device interrupt services
90  */
91 
92 int64_t	hv_intr_devino_to_sysino(uint64_t devhandle, uint64_t devino,
93 	    uint64_t *sysino);
94 int64_t	hv_intr_getenabled(uint64_t sysino, uint64_t *intr_enabled);
95 int64_t	hv_intr_setenabled(uint64_t sysino, uint64_t intr_enabled);
96 int64_t	hv_intr_getstate(uint64_t sysino, uint64_t *intr_state);
97 int64_t	hv_intr_setstate(uint64_t sysino, uint64_t intr_state);
98 int64_t	hv_intr_gettarget(uint64_t sysino, uint64_t *cpuid);
99 int64_t	hv_intr_settarget(uint64_t sysino, uint64_t cpuid);
100 
101 #define INTR_DISABLED	0
102 #define INTR_ENABLED	1
103 
104 #define INTR_IDLE	0
105 #define INTR_RECEIVED	1
106 #define INTR_DELIVERED	2
107 
108 int64_t	hv_vintr_getcookie(uint64_t devhandle, uint64_t devino,
109 	    uint64_t *cookie_value);
110 int64_t	hv_vintr_setcookie(uint64_t devhandle, uint64_t devino,
111 	    uint64_t cookie_value);
112 int64_t	hv_vintr_getenabled(uint64_t devhandle, uint64_t devino,
113 	    uint64_t *intr_enabled);
114 int64_t	hv_vintr_setenabled(uint64_t devhandle, uint64_t devino,
115 	    uint64_t intr_enabled);
116 int64_t	hv_vintr_getstate(uint64_t devhandle, uint64_t devino,
117 	    uint64_t *intr_state);
118 int64_t	hv_vintr_setstate(uint64_t devhandle, uint64_t devino,
119 	    uint64_t intr_state);
120 int64_t	hv_vintr_gettarget(uint64_t devhandle, uint64_t devino,
121 	    uint64_t *cpuid);
122 int64_t	hv_vintr_settarget(uint64_t devhandle, uint64_t devino,
123 	    uint64_t cpuid);
124 
125 /*
126  * Time of day services
127  */
128 
129 int64_t	hv_tod_get(uint64_t *tod);
130 int64_t	hv_tod_set(uint64_t tod);
131 
132 /*
133  * Console services
134  */
135 
136 int64_t	hv_cons_getchar(int64_t *ch);
137 int64_t	hv_cons_putchar(int64_t ch);
138 int64_t	hv_api_putchar(int64_t ch);
139 
140 #define CONS_BREAK	-1
141 #define CONS_HUP	-2
142 
143 /*
144  * Domain state services
145  */
146 
147 int64_t	hv_soft_state_set(uint64_t software_state,
148 	    paddr_t software_description_ptr);
149 
150 #define SIS_NORMAL	0x1
151 #define SIS_TRANSITION	0x2
152 
153 /*
154  * PCI I/O services
155  */
156 
157 int64_t	hv_pci_iommu_map(uint64_t devhandle, uint64_t tsbid,
158 	    uint64_t nttes, uint64_t io_attributes, paddr_t io_page_list_p,
159 	    uint64_t *nttes_mapped);
160 int64_t	hv_pci_iommu_demap(uint64_t devhandle, uint64_t tsbid,
161 	    uint64_t nttes, uint64_t *nttes_demapped);
162 int64_t	hv_pci_iommu_getmap(uint64_t devhandle, uint64_t tsbid,
163 	    uint64_t *io_attributes, paddr_t *r_addr);
164 int64_t	hv_pci_iommu_getbypass(uint64_t devhandle, paddr_t r_addr,
165 	    uint64_t io_attributes, uint64_t *io_addr);
166 
167 int64_t	hv_pci_config_get(uint64_t devhandle, uint64_t pci_device,
168             uint64_t pci_config_offset, uint64_t size,
169 	    uint64_t *error_flag, uint64_t *data);
170 int64_t	hv_pci_config_put(uint64_t devhandle, uint64_t pci_device,
171             uint64_t pci_config_offset, uint64_t size, uint64_t data,
172 	    uint64_t *error_flag);
173 
174 #define PCI_MAP_ATTR_READ  0x01		/* From memory */
175 #define PCI_MAP_ATTR_WRITE 0x02		/* To memory */
176 
177 /*
178  * PCI MSI services
179  */
180 
181 int64_t hv_pci_msiq_conf(uint64_t devhandle, uint64_t msiqid,
182 	    uint64_t r_addr, uint64_t nentries);
183 int64_t hv_pci_msiq_info(uint64_t devhandle, uint64_t msiqid,
184 	    uint64_t *r_addr, uint64_t *nentries);
185 
186 int64_t hv_pci_msiq_getvalid(uint64_t devhandle, uint64_t msiqid,
187 	    uint64_t *msiqvalid);
188 int64_t hv_pci_msiq_setvalid(uint64_t devhandle, uint64_t msiqid,
189 	    uint64_t msiqvalid);
190 
191 #define PCI_MSIQ_INVALID	0
192 #define PCI_MSIQ_VALID		1
193 
194 int64_t hv_pci_msiq_getstate(uint64_t devhandle, uint64_t msiqid,
195 	    uint64_t *msiqstate);
196 int64_t hv_pci_msiq_setstate(uint64_t devhandle, uint64_t msiqid,
197 	    uint64_t msiqstate);
198 
199 #define PCI_MSIQSTATE_IDLE	0
200 #define PCI_MSIQSTATE_ERROR	1
201 
202 int64_t hv_pci_msiq_gethead(uint64_t devhandle, uint64_t msiqid,
203 	    uint64_t *msiqhead);
204 int64_t hv_pci_msiq_sethead(uint64_t devhandle, uint64_t msiqid,
205 	    uint64_t msiqhead);
206 int64_t hv_pci_msiq_gettail(uint64_t devhandle, uint64_t msiqid,
207 	    uint64_t *msiqtail);
208 
209 int64_t hv_pci_msi_getvalid(uint64_t devhandle, uint64_t msinum,
210 	    uint64_t *msivalidstate);
211 int64_t hv_pci_msi_setvalid(uint64_t devhandle, uint64_t msinum,
212 	    uint64_t msivalidstate);
213 
214 #define PCI_MSI_INVALID		0
215 #define PCI_MSI_VALID		1
216 
217 int64_t hv_pci_msi_getmsiq(uint64_t devhandle, uint64_t msinum,
218 	    uint64_t *msiqid);
219 int64_t hv_pci_msi_setmsiq(uint64_t devhandle, uint64_t msinum,
220 	    uint64_t msitype, uint64_t msiqid);
221 
222 int64_t hv_pci_msi_getstate(uint64_t devhandle, uint64_t msinum,
223 	    uint64_t *msistate);
224 int64_t hv_pci_msi_setstate(uint64_t devhandle, uint64_t msinum,
225 	    uint64_t msistate);
226 
227 #define PCI_MSISTATE_IDLE	0
228 #define PCI_MSISTATE_DELIVERED	1
229 
230 int64_t hv_pci_msg_getmsiq(uint64_t devhandle, uint64_t msg,
231 	    uint64_t *msiqid);
232 int64_t hv_pci_msg_setmsiq(uint64_t devhandle, uint64_t msg,
233 	    uint64_t msiqid);
234 
235 int64_t hv_pci_msg_getvalid(uint64_t devhandle, uint64_t msg,
236 	    uint64_t *msgvalidstate);
237 int64_t hv_pci_msg_setvalid(uint64_t devhandle, uint64_t msg,
238 	    uint64_t msgvalidstate);
239 
240 #define PCIE_MSG_INVALID	0
241 #define PCIE_MSG_VALID		1
242 
243 #define PCIE_PME_MSG		0x18
244 #define PCIE_PME_ACK_MSG	0x1b
245 #define PCIE_CORR_MSG		0x30
246 #define PCIE_NONFATAL_MSG	0x31
247 #define PCIE_FATAL_MSG		0x32
248 
249 /*
250  * Logical Domain Channel services
251  */
252 
253 int64_t hv_ldc_tx_qconf(uint64_t ldc_id, paddr_t base_raddr,
254 	    uint64_t nentries);
255 int64_t hv_ldc_tx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
256 	    uint64_t *nentries);
257 int64_t hv_ldc_tx_get_state(uint64_t ldc_id, uint64_t *head_offset,
258 	    uint64_t *tail_offset, uint64_t *channel_state);
259 int64_t hv_ldc_tx_set_qtail(uint64_t ldc_id, uint64_t tail_offset);
260 int64_t hv_ldc_rx_qconf(uint64_t ldc_id, paddr_t base_raddr,
261 	    uint64_t nentries);
262 int64_t hv_ldc_rx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
263 	    uint64_t *nentries);
264 int64_t hv_ldc_rx_get_state(uint64_t ldc_id, uint64_t *head_offset,
265 	    uint64_t *tail_offset, uint64_t *channel_state);
266 int64_t hv_ldc_rx_set_qhead(uint64_t ldc_id, uint64_t head_offset);
267 
268 #define LDC_CHANNEL_DOWN	0
269 #define LDC_CHANNEL_UP		1
270 #define LDC_CHANNEL_RESET	2
271 
272 /* Used by drivers only, not part of the hypervisor API. */
273 #define LDC_CHANNEL_INIT	((uint64_t)-1)
274 
275 int64_t	hv_ldc_set_map_table(uint64_t ldc_id, paddr_t base_raddr,
276 	    uint64_t nentries);
277 int64_t	hv_ldc_get_map_table(uint64_t ldc_id, paddr_t *base_raddr,
278 	    uint64_t *nentries);
279 int64_t hv_ldc_copy(uint64_t ldc_id, uint64_t flags, uint64_t cookie,
280 	    paddr_t raddr, psize_t length, psize_t *ret_length);
281 
282 #define LDC_COPY_IN		0
283 #define LDC_COPY_OUT		1
284 
285 int64_t hv_ldc_mapin(uint64_t ldc_id, uint64_t cookie, paddr_t *raddr,
286 	    uint64_t *perms);
287 int64_t hv_ldc_unmap(paddr_t raddr, uint64_t *perms);
288 
289 /*
290  * Static Direct I/O services
291  */
292 
293 int64_t hv_pci_iov_root_configured(uint64_t devhandle);
294 int64_t	hv_pci_real_config_get(uint64_t devhandle, uint64_t pci_device,
295             uint64_t pci_config_offset, uint64_t size,
296 	    uint64_t *error_flag, uint64_t *data);
297 int64_t	hv_pci_real_config_put(uint64_t devhandle, uint64_t pci_device,
298             uint64_t pci_config_offset, uint64_t size, uint64_t data,
299 	    uint64_t *error_flag);
300 int64_t hv_pci_error_send(uint64_t devhandle, uint64_t devino,
301 	    uint64_t pci_device);
302 
303 /*
304  * Cryptographic services
305  */
306 
307 int64_t	hv_rng_get_diag_control(void);
308 int64_t	hv_rng_ctl_read(paddr_t raddr, uint64_t *state, uint64_t *delta);
309 int64_t	hv_rng_ctl_write(paddr_t raddr, uint64_t state, uint64_t timeout,
310 	uint64_t *delta);
311 
312 #define RNG_STATE_UNCONFIGURED	0
313 #define RNG_STATE_CONFIGURED	1
314 #define RNG_STATE_HEALTHCHECK	2
315 #define RNG_STATE_ERROR		3
316 
317 int64_t	hv_rng_data_read_diag(paddr_t raddr, uint64_t size, uint64_t *delta);
318 int64_t	hv_rng_data_read(paddr_t raddr, uint64_t *delta);
319 
320 /*
321  * Error codes
322  */
323 
324 #define H_EOK		0
325 #define H_ENOCPU	1
326 #define H_ENORADDR	2
327 #define H_ENOINTR	3
328 #define H_EBADPGSZ	4
329 #define H_EBADTSB	5
330 #define H_EINVAL	6
331 #define H_EBADTRAP	7
332 #define H_EBADALIGN	8
333 #define H_EWOULDBLOCK	9
334 #define H_ENOACCESS	10
335 #define H_EIO		11
336 #define H_ECPUERROR	12
337 #define H_ENOTSUPPORTED	13
338 #define H_ENOMAP	14
339 #define H_ETOOMANY	15
340 #define H_ECHANNEL	16
341 
342 extern uint64_t sun4v_group_interrupt_major;
343 extern uint64_t sun4v_group_sdio_major;
344 
345 int64_t sun4v_intr_devino_to_sysino(uint64_t, uint64_t, uint64_t *);
346 int64_t sun4v_intr_setcookie(uint64_t, uint64_t, uint64_t);
347 int64_t sun4v_intr_setenabled(uint64_t, uint64_t, uint64_t);
348 int64_t	sun4v_intr_setstate(uint64_t, uint64_t, uint64_t);
349 int64_t	sun4v_intr_settarget(uint64_t, uint64_t, uint64_t);
350