xref: /openbsd/sys/arch/sparc64/include/hypervisor.h (revision e6da68a2)
1 /*	$OpenBSD: hypervisor.h,v 1.21 2024/04/08 20:00:27 miod Exp $	*/
2 
3 /*
4  * Copyright (c) 2008 Mark Kettenis
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * UltraSPARC Hypervisor API.
21  */
22 
23 /*
24  * API versioning
25  */
26 
27 int64_t	hv_api_get_version(uint64_t api_group,
28 	    uint64_t *major_number, uint64_t *minor_number);
29 
30 /*
31  * Domain services
32  */
33 
34 int64_t hv_mach_desc(paddr_t buffer, psize_t *length);
35 int64_t hv_mach_pri(paddr_t buffer, psize_t *length);
36 
37 /*
38  * CPU services
39  */
40 
41 void	hv_cpu_yield(void);
42 int64_t	hv_cpu_qconf(uint64_t queue, uint64_t base, uint64_t nentries);
43 
44 #define CPU_MONDO_QUEUE			0x3c
45 #define DEVICE_MONDO_QUEUE		0x3d
46 #define RESUMABLE_ERROR_QUEUE		0x3e
47 #define NONRESUMABLE_ERROR_QUEUE	0x3f
48 
49 int64_t	hv_cpu_mondo_send(uint64_t ncpus, paddr_t cpulist, paddr_t data);
50 int64_t	hv_cpu_myid(uint64_t *cpuid);
51 
52 /*
53  * MMU services
54  */
55 
56 int64_t	hv_mmu_demap_page(vaddr_t vaddr, uint64_t context, uint64_t flags);
57 int64_t	hv_mmu_demap_ctx(uint64_t context, uint64_t flags);
58 int64_t	hv_mmu_map_perm_addr(vaddr_t vaddr, uint64_t tte, uint64_t flags);
59 int64_t	hv_mmu_unmap_perm_addr(vaddr_t vaddr, uint64_t flags);
60 int64_t	hv_mmu_map_addr(vaddr_t vaddr, uint64_t context, uint64_t tte,
61 	    uint64_t flags);
62 int64_t	hv_mmu_unmap_addr(vaddr_t vaddr, uint64_t context, uint64_t flags);
63 
64 #define MAP_DTLB	0x1
65 #define MAP_ITLB	0x2
66 
67 struct tsb_desc {
68 	uint16_t	td_idxpgsz;
69 	uint16_t	td_assoc;
70 	uint32_t	td_size;
71 	uint32_t	td_ctxidx;
72 	uint32_t	td_pgsz;
73 	paddr_t		td_pa;
74 	uint64_t	td_reserved;
75 };
76 
77 int64_t	hv_mmu_tsb_ctx0(uint64_t ntsb, paddr_t tsbptr);
78 int64_t	hv_mmu_tsb_ctxnon0(uint64_t ntsb, paddr_t tsbptr);
79 
80 /*
81  * Cache and memory services
82  */
83 
84 int64_t	hv_mem_scrub(paddr_t raddr, psize_t length);
85 int64_t	hv_mem_sync(paddr_t raddr, psize_t length);
86 
87 /*
88  * Device interrupt services
89  */
90 
91 int64_t	hv_intr_devino_to_sysino(uint64_t devhandle, uint64_t devino,
92 	    uint64_t *sysino);
93 int64_t	hv_intr_getenabled(uint64_t sysino, uint64_t *intr_enabled);
94 int64_t	hv_intr_setenabled(uint64_t sysino, uint64_t intr_enabled);
95 int64_t	hv_intr_getstate(uint64_t sysino, uint64_t *intr_state);
96 int64_t	hv_intr_setstate(uint64_t sysino, uint64_t intr_state);
97 int64_t	hv_intr_gettarget(uint64_t sysino, uint64_t *cpuid);
98 int64_t	hv_intr_settarget(uint64_t sysino, uint64_t cpuid);
99 
100 #define INTR_DISABLED	0
101 #define INTR_ENABLED	1
102 
103 #define INTR_IDLE	0
104 #define INTR_RECEIVED	1
105 #define INTR_DELIVERED	2
106 
107 int64_t	hv_vintr_getcookie(uint64_t devhandle, uint64_t devino,
108 	    uint64_t *cookie_value);
109 int64_t	hv_vintr_setcookie(uint64_t devhandle, uint64_t devino,
110 	    uint64_t cookie_value);
111 int64_t	hv_vintr_getenabled(uint64_t devhandle, uint64_t devino,
112 	    uint64_t *intr_enabled);
113 int64_t	hv_vintr_setenabled(uint64_t devhandle, uint64_t devino,
114 	    uint64_t intr_enabled);
115 int64_t	hv_vintr_getstate(uint64_t devhandle, uint64_t devino,
116 	    uint64_t *intr_state);
117 int64_t	hv_vintr_setstate(uint64_t devhandle, uint64_t devino,
118 	    uint64_t intr_state);
119 int64_t	hv_vintr_gettarget(uint64_t devhandle, uint64_t devino,
120 	    uint64_t *cpuid);
121 int64_t	hv_vintr_settarget(uint64_t devhandle, uint64_t devino,
122 	    uint64_t cpuid);
123 
124 /*
125  * Time of day services
126  */
127 
128 int64_t	hv_tod_get(uint64_t *tod);
129 int64_t	hv_tod_set(uint64_t tod);
130 
131 /*
132  * Console services
133  */
134 
135 int64_t	hv_cons_getchar(int64_t *ch);
136 int64_t	hv_cons_putchar(int64_t ch);
137 int64_t	hv_api_putchar(int64_t ch);
138 
139 #define CONS_BREAK	-1
140 #define CONS_HUP	-2
141 
142 /*
143  * Domain state services
144  */
145 
146 int64_t	hv_soft_state_set(uint64_t software_state,
147 	    paddr_t software_description_ptr);
148 
149 #define SIS_NORMAL	0x1
150 #define SIS_TRANSITION	0x2
151 
152 /*
153  * PCI I/O services
154  */
155 
156 int64_t	hv_pci_iommu_map(uint64_t devhandle, uint64_t tsbid,
157 	    uint64_t nttes, uint64_t io_attributes, paddr_t io_page_list_p,
158 	    uint64_t *nttes_mapped);
159 int64_t	hv_pci_iommu_demap(uint64_t devhandle, uint64_t tsbid,
160 	    uint64_t nttes, uint64_t *nttes_demapped);
161 int64_t	hv_pci_iommu_getmap(uint64_t devhandle, uint64_t tsbid,
162 	    uint64_t *io_attributes, paddr_t *r_addr);
163 int64_t	hv_pci_iommu_getbypass(uint64_t devhandle, paddr_t r_addr,
164 	    uint64_t io_attributes, uint64_t *io_addr);
165 
166 int64_t	hv_pci_config_get(uint64_t devhandle, uint64_t pci_device,
167             uint64_t pci_config_offset, uint64_t size,
168 	    uint64_t *error_flag, uint64_t *data);
169 int64_t	hv_pci_config_put(uint64_t devhandle, uint64_t pci_device,
170             uint64_t pci_config_offset, uint64_t size, uint64_t data,
171 	    uint64_t *error_flag);
172 
173 #define PCI_MAP_ATTR_READ  0x01		/* From memory */
174 #define PCI_MAP_ATTR_WRITE 0x02		/* To memory */
175 
176 /*
177  * PCI MSI services
178  */
179 
180 int64_t hv_pci_msiq_conf(uint64_t devhandle, uint64_t msiqid,
181 	    uint64_t r_addr, uint64_t nentries);
182 int64_t hv_pci_msiq_info(uint64_t devhandle, uint64_t msiqid,
183 	    uint64_t *r_addr, uint64_t *nentries);
184 
185 int64_t hv_pci_msiq_getvalid(uint64_t devhandle, uint64_t msiqid,
186 	    uint64_t *msiqvalid);
187 int64_t hv_pci_msiq_setvalid(uint64_t devhandle, uint64_t msiqid,
188 	    uint64_t msiqvalid);
189 
190 #define PCI_MSIQ_INVALID	0
191 #define PCI_MSIQ_VALID		1
192 
193 int64_t hv_pci_msiq_getstate(uint64_t devhandle, uint64_t msiqid,
194 	    uint64_t *msiqstate);
195 int64_t hv_pci_msiq_setstate(uint64_t devhandle, uint64_t msiqid,
196 	    uint64_t msiqstate);
197 
198 #define PCI_MSIQSTATE_IDLE	0
199 #define PCI_MSIQSTATE_ERROR	1
200 
201 int64_t hv_pci_msiq_gethead(uint64_t devhandle, uint64_t msiqid,
202 	    uint64_t *msiqhead);
203 int64_t hv_pci_msiq_sethead(uint64_t devhandle, uint64_t msiqid,
204 	    uint64_t msiqhead);
205 int64_t hv_pci_msiq_gettail(uint64_t devhandle, uint64_t msiqid,
206 	    uint64_t *msiqtail);
207 
208 int64_t hv_pci_msi_getvalid(uint64_t devhandle, uint64_t msinum,
209 	    uint64_t *msivalidstate);
210 int64_t hv_pci_msi_setvalid(uint64_t devhandle, uint64_t msinum,
211 	    uint64_t msivalidstate);
212 
213 #define PCI_MSI_INVALID		0
214 #define PCI_MSI_VALID		1
215 
216 int64_t hv_pci_msi_getmsiq(uint64_t devhandle, uint64_t msinum,
217 	    uint64_t *msiqid);
218 int64_t hv_pci_msi_setmsiq(uint64_t devhandle, uint64_t msinum,
219 	    uint64_t msitype, uint64_t msiqid);
220 
221 int64_t hv_pci_msi_getstate(uint64_t devhandle, uint64_t msinum,
222 	    uint64_t *msistate);
223 int64_t hv_pci_msi_setstate(uint64_t devhandle, uint64_t msinum,
224 	    uint64_t msistate);
225 
226 #define PCI_MSISTATE_IDLE	0
227 #define PCI_MSISTATE_DELIVERED	1
228 
229 int64_t hv_pci_msg_getmsiq(uint64_t devhandle, uint64_t msg,
230 	    uint64_t *msiqid);
231 int64_t hv_pci_msg_setmsiq(uint64_t devhandle, uint64_t msg,
232 	    uint64_t msiqid);
233 
234 int64_t hv_pci_msg_getvalid(uint64_t devhandle, uint64_t msg,
235 	    uint64_t *msgvalidstate);
236 int64_t hv_pci_msg_setvalid(uint64_t devhandle, uint64_t msg,
237 	    uint64_t msgvalidstate);
238 
239 #define PCIE_MSG_INVALID	0
240 #define PCIE_MSG_VALID		1
241 
242 #define PCIE_PME_MSG		0x18
243 #define PCIE_PME_ACK_MSG	0x1b
244 #define PCIE_CORR_MSG		0x30
245 #define PCIE_NONFATAL_MSG	0x31
246 #define PCIE_FATAL_MSG		0x32
247 
248 /*
249  * Logical Domain Channel services
250  */
251 
252 int64_t hv_ldc_tx_qconf(uint64_t ldc_id, paddr_t base_raddr,
253 	    uint64_t nentries);
254 int64_t hv_ldc_tx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
255 	    uint64_t *nentries);
256 int64_t hv_ldc_tx_get_state(uint64_t ldc_id, uint64_t *head_offset,
257 	    uint64_t *tail_offset, uint64_t *channel_state);
258 int64_t hv_ldc_tx_set_qtail(uint64_t ldc_id, uint64_t tail_offset);
259 int64_t hv_ldc_rx_qconf(uint64_t ldc_id, paddr_t base_raddr,
260 	    uint64_t nentries);
261 int64_t hv_ldc_rx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
262 	    uint64_t *nentries);
263 int64_t hv_ldc_rx_get_state(uint64_t ldc_id, uint64_t *head_offset,
264 	    uint64_t *tail_offset, uint64_t *channel_state);
265 int64_t hv_ldc_rx_set_qhead(uint64_t ldc_id, uint64_t head_offset);
266 
267 #define LDC_CHANNEL_DOWN	0
268 #define LDC_CHANNEL_UP		1
269 #define LDC_CHANNEL_RESET	2
270 
271 /* Used by drivers only, not part of the hypervisor API. */
272 #define LDC_CHANNEL_INIT	((uint64_t)-1)
273 
274 int64_t	hv_ldc_set_map_table(uint64_t ldc_id, paddr_t base_raddr,
275 	    uint64_t nentries);
276 int64_t	hv_ldc_get_map_table(uint64_t ldc_id, paddr_t *base_raddr,
277 	    uint64_t *nentries);
278 int64_t hv_ldc_copy(uint64_t ldc_id, uint64_t flags, uint64_t cookie,
279 	    paddr_t raddr, psize_t length, psize_t *ret_length);
280 
281 #define LDC_COPY_IN		0
282 #define LDC_COPY_OUT		1
283 
284 int64_t hv_ldc_mapin(uint64_t ldc_id, uint64_t cookie, paddr_t *raddr,
285 	    uint64_t *perms);
286 int64_t hv_ldc_unmap(paddr_t raddr, uint64_t *perms);
287 
288 /*
289  * Static Direct I/O services
290  */
291 
292 int64_t hv_pci_iov_root_configured(uint64_t devhandle);
293 int64_t	hv_pci_real_config_get(uint64_t devhandle, uint64_t pci_device,
294             uint64_t pci_config_offset, uint64_t size,
295 	    uint64_t *error_flag, uint64_t *data);
296 int64_t	hv_pci_real_config_put(uint64_t devhandle, uint64_t pci_device,
297             uint64_t pci_config_offset, uint64_t size, uint64_t data,
298 	    uint64_t *error_flag);
299 int64_t hv_pci_error_send(uint64_t devhandle, uint64_t devino,
300 	    uint64_t pci_device);
301 
302 /*
303  * Cryptographic services
304  */
305 
306 int64_t	hv_rng_get_diag_control(void);
307 int64_t	hv_rng_ctl_read(paddr_t raddr, uint64_t *state, uint64_t *delta);
308 int64_t	hv_rng_ctl_write(paddr_t raddr, uint64_t state, uint64_t timeout,
309 	uint64_t *delta);
310 
311 #define RNG_STATE_UNCONFIGURED	0
312 #define RNG_STATE_CONFIGURED	1
313 #define RNG_STATE_HEALTHCHECK	2
314 #define RNG_STATE_ERROR		3
315 
316 int64_t	hv_rng_data_read_diag(paddr_t raddr, uint64_t size, uint64_t *delta);
317 int64_t	hv_rng_data_read(paddr_t raddr, uint64_t *delta);
318 
319 /*
320  * Error codes
321  */
322 
323 #define H_EOK		0
324 #define H_ENOCPU	1
325 #define H_ENORADDR	2
326 #define H_ENOINTR	3
327 #define H_EBADPGSZ	4
328 #define H_EBADTSB	5
329 #define H_EINVAL	6
330 #define H_EBADTRAP	7
331 #define H_EBADALIGN	8
332 #define H_EWOULDBLOCK	9
333 #define H_ENOACCESS	10
334 #define H_EIO		11
335 #define H_ECPUERROR	12
336 #define H_ENOTSUPPORTED	13
337 #define H_ENOMAP	14
338 #define H_ETOOMANY	15
339 #define H_ECHANNEL	16
340 
341 extern uint64_t sun4v_group_interrupt_major;
342 extern uint64_t sun4v_group_sdio_major;
343 
344 int64_t sun4v_intr_devino_to_sysino(uint64_t, uint64_t, uint64_t *);
345 int64_t sun4v_intr_setcookie(uint64_t, uint64_t, uint64_t);
346 int64_t sun4v_intr_setenabled(uint64_t, uint64_t, uint64_t);
347 int64_t	sun4v_intr_setstate(uint64_t, uint64_t, uint64_t);
348 int64_t	sun4v_intr_settarget(uint64_t, uint64_t, uint64_t);
349