1 /****************************************************************************** 2 * arch-arm.h 3 * 4 * Guest OS interface to ARM Xen. 5 * 6 * SPDX-License-Identifier: MIT 7 * 8 * Copyright 2011 (C) Citrix Systems 9 */ 10 11 #ifndef __XEN_PUBLIC_ARCH_ARM_H__ 12 #define __XEN_PUBLIC_ARCH_ARM_H__ 13 14 /* 15 * `incontents 50 arm_abi Hypercall Calling Convention 16 * 17 * A hypercall is issued using the ARM HVC instruction. 18 * 19 * A hypercall can take up to 5 arguments. These are passed in 20 * registers, the first argument in x0/r0 (for arm64/arm32 guests 21 * respectively irrespective of whether the underlying hypervisor is 22 * 32- or 64-bit), the second argument in x1/r1, the third in x2/r2, 23 * the forth in x3/r3 and the fifth in x4/r4. 24 * 25 * The hypercall number is passed in r12 (arm) or x16 (arm64). In both 26 * cases the relevant ARM procedure calling convention specifies this 27 * is an inter-procedure-call scratch register (e.g. for use in linker 28 * stubs). This use does not conflict with use during a hypercall. 29 * 30 * The HVC ISS must contain a Xen specific TAG: XEN_HYPERCALL_TAG. 31 * 32 * The return value is in x0/r0. 33 * 34 * The hypercall will clobber x16/r12 and the argument registers used 35 * by that hypercall (except r0 which is the return value) i.e. in 36 * addition to x16/r12 a 2 argument hypercall will clobber x1/r1 and a 37 * 4 argument hypercall will clobber x1/r1, x2/r2 and x3/r3. 38 * 39 * Parameter structs passed to hypercalls are laid out according to 40 * the Procedure Call Standard for the ARM Architecture (AAPCS, AKA 41 * EABI) and Procedure Call Standard for the ARM 64-bit Architecture 42 * (AAPCS64). Where there is a conflict the 64-bit standard should be 43 * used regardless of guest type. Structures which are passed as 44 * hypercall arguments are always little endian. 45 * 46 * All memory which is shared with other entities in the system 47 * (including the hypervisor and other guests) must reside in memory 48 * which is mapped as Normal Inner-cacheable. This applies to: 49 * - hypercall arguments passed via a pointer to guest memory. 50 * - memory shared via the grant table mechanism (including PV I/O 51 * rings etc). 52 * - memory shared with the hypervisor (struct shared_info, struct 53 * vcpu_info, the grant table, etc). 54 * 55 * Any Inner cache allocation strategy (Write-Back, Write-Through etc) 56 * is acceptable. There is no restriction on the Outer-cacheability. 57 */ 58 59 /* 60 * `incontents 55 arm_hcall Supported Hypercalls 61 * 62 * Xen on ARM makes extensive use of hardware facilities and therefore 63 * only a subset of the potential hypercalls are required. 64 * 65 * Since ARM uses second stage paging any machine/physical addresses 66 * passed to hypercalls are Guest Physical Addresses (Intermediate 67 * Physical Addresses) unless otherwise noted. 68 * 69 * The following hypercalls (and sub operations) are supported on the 70 * ARM platform. Other hypercalls should be considered 71 * unavailable/unsupported. 72 * 73 * HYPERVISOR_memory_op 74 * All generic sub-operations. 75 * 76 * In addition the following arch specific sub-ops: 77 * * XENMEM_add_to_physmap 78 * * XENMEM_add_to_physmap_batch 79 * 80 * HYPERVISOR_domctl 81 * All generic sub-operations, with the exception of: 82 * * XEN_DOMCTL_iomem_permission (not yet implemented) 83 * * XEN_DOMCTL_irq_permission (not yet implemented) 84 * 85 * HYPERVISOR_sched_op 86 * All generic sub-operations, with the exception of: 87 * * SCHEDOP_block -- prefer wfi hardware instruction 88 * 89 * HYPERVISOR_console_io 90 * All generic sub-operations 91 * 92 * HYPERVISOR_xen_version 93 * All generic sub-operations 94 * 95 * HYPERVISOR_event_channel_op 96 * All generic sub-operations 97 * 98 * HYPERVISOR_physdev_op 99 * No sub-operations are currently supported 100 * 101 * HYPERVISOR_sysctl 102 * All generic sub-operations, with the exception of: 103 * * XEN_SYSCTL_page_offline_op 104 * * XEN_SYSCTL_get_pmstat 105 * * XEN_SYSCTL_pm_op 106 * 107 * HYPERVISOR_hvm_op 108 * Exactly these sub-operations are supported: 109 * * HVMOP_set_param 110 * * HVMOP_get_param 111 * 112 * HYPERVISOR_grant_table_op 113 * All generic sub-operations 114 * 115 * HYPERVISOR_vcpu_op 116 * Exactly these sub-operations are supported: 117 * * VCPUOP_register_vcpu_info 118 * * VCPUOP_register_runstate_memory_area 119 * 120 * 121 * Other notes on the ARM ABI: 122 * 123 * - struct start_info is not exported to ARM guests. 124 * 125 * - struct shared_info is mapped by ARM guests using the 126 * HYPERVISOR_memory_op sub-op XENMEM_add_to_physmap, passing 127 * XENMAPSPACE_shared_info as space parameter. 128 * 129 * - All the per-cpu struct vcpu_info are mapped by ARM guests using the 130 * HYPERVISOR_vcpu_op sub-op VCPUOP_register_vcpu_info, including cpu0 131 * struct vcpu_info. 132 * 133 * - The grant table is mapped using the HYPERVISOR_memory_op sub-op 134 * XENMEM_add_to_physmap, passing XENMAPSPACE_grant_table as space 135 * parameter. The memory range specified under the Xen compatible 136 * hypervisor node on device tree can be used as target gpfn for the 137 * mapping. 138 * 139 * - Xenstore is initialized by using the two hvm_params 140 * HVM_PARAM_STORE_PFN and HVM_PARAM_STORE_EVTCHN. They can be read 141 * with the HYPERVISOR_hvm_op sub-op HVMOP_get_param. 142 * 143 * - The paravirtualized console is initialized by using the two 144 * hvm_params HVM_PARAM_CONSOLE_PFN and HVM_PARAM_CONSOLE_EVTCHN. They 145 * can be read with the HYPERVISOR_hvm_op sub-op HVMOP_get_param. 146 * 147 * - Event channel notifications are delivered using the percpu GIC 148 * interrupt specified under the Xen compatible hypervisor node on 149 * device tree. 150 * 151 * - The device tree Xen compatible node is fully described under Linux 152 * at Documentation/devicetree/bindings/arm/xen.txt. 153 */ 154 155 #define XEN_HYPERCALL_TAG 0xEA1 156 157 #define uint64_aligned_t UINT64 __attribute__((aligned(8))) 158 159 #ifndef __ASSEMBLY__ 160 #define ___DEFINE_XEN_GUEST_HANDLE(name, type) \ 161 typedef union { type *p; unsigned long q; } \ 162 __guest_handle_ ## name; \ 163 typedef union { type *p; uint64_aligned_t q; } \ 164 __guest_handle_64_ ## name; 165 166 /* 167 * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field 168 * in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes 169 * aligned. 170 * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an 171 * hypercall argument. It is 4 bytes on aarch and 8 bytes on aarch64. 172 */ 173 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \ 174 ___DEFINE_XEN_GUEST_HANDLE(name, type); \ 175 ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type) 176 #define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name) 177 #define __XEN_GUEST_HANDLE(name) __guest_handle_64_ ## name 178 #define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name) 179 /* this is going to be changed on 64 bit */ 180 #define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name 181 #define set_xen_guest_handle_raw(hnd, val) \ 182 do { \ 183 typeof(&(hnd)) _sxghr_tmp = &(hnd); \ 184 _sxghr_tmp->q = 0; \ 185 _sxghr_tmp->p = val; \ 186 } while ( 0 ) 187 #ifdef __XEN_TOOLS__ 188 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) 189 #endif 190 #define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val) 191 192 #if defined(__GNUC__) && !defined(__STRICT_ANSI__) 193 /* Anonymous union includes both 32- and 64-bit names (e.g., r0/x0). */ 194 # define __DECL_REG(n64, n32) union { \ 195 UINT64 n64; \ 196 UINT32 n32; \ 197 } 198 #else 199 /* Non-gcc sources must always use the proper 64-bit name (e.g., x0). */ 200 #define __DECL_REG(n64, n32) UINT64 n64 201 #endif 202 203 struct vcpu_guest_core_regs 204 { 205 /* Aarch64 Aarch32 */ 206 __DECL_REG(x0, r0_usr); 207 __DECL_REG(x1, r1_usr); 208 __DECL_REG(x2, r2_usr); 209 __DECL_REG(x3, r3_usr); 210 __DECL_REG(x4, r4_usr); 211 __DECL_REG(x5, r5_usr); 212 __DECL_REG(x6, r6_usr); 213 __DECL_REG(x7, r7_usr); 214 __DECL_REG(x8, r8_usr); 215 __DECL_REG(x9, r9_usr); 216 __DECL_REG(x10, r10_usr); 217 __DECL_REG(x11, r11_usr); 218 __DECL_REG(x12, r12_usr); 219 220 __DECL_REG(x13, sp_usr); 221 __DECL_REG(x14, lr_usr); 222 223 __DECL_REG(x15, __unused_sp_hyp); 224 225 __DECL_REG(x16, lr_irq); 226 __DECL_REG(x17, sp_irq); 227 228 __DECL_REG(x18, lr_svc); 229 __DECL_REG(x19, sp_svc); 230 231 __DECL_REG(x20, lr_abt); 232 __DECL_REG(x21, sp_abt); 233 234 __DECL_REG(x22, lr_und); 235 __DECL_REG(x23, sp_und); 236 237 __DECL_REG(x24, r8_fiq); 238 __DECL_REG(x25, r9_fiq); 239 __DECL_REG(x26, r10_fiq); 240 __DECL_REG(x27, r11_fiq); 241 __DECL_REG(x28, r12_fiq); 242 243 __DECL_REG(x29, sp_fiq); 244 __DECL_REG(x30, lr_fiq); 245 246 /* Return address and mode */ 247 __DECL_REG(pc64, pc32); /* ELR_EL2 */ 248 UINT32 cpsr; /* SPSR_EL2 */ 249 250 union { 251 UINT32 spsr_el1; /* AArch64 */ 252 UINT32 spsr_svc; /* AArch32 */ 253 }; 254 255 /* AArch32 guests only */ 256 UINT32 spsr_fiq, spsr_irq, spsr_und, spsr_abt; 257 258 /* AArch64 guests only */ 259 UINT64 sp_el0; 260 UINT64 sp_el1, elr_el1; 261 }; 262 typedef struct vcpu_guest_core_regs vcpu_guest_core_regs_t; 263 DEFINE_XEN_GUEST_HANDLE(vcpu_guest_core_regs_t); 264 265 #undef __DECL_REG 266 267 typedef UINT64 xen_pfn_t; 268 #define PRI_xen_pfn PRIx64 269 270 /* Maximum number of virtual CPUs in legacy multi-processor guests. */ 271 /* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */ 272 #define XEN_LEGACY_MAX_VCPUS 1 273 274 typedef UINT64 xen_ulong_t; 275 #define PRI_xen_ulong PRIx64 276 277 #if defined(__XEN__) || defined(__XEN_TOOLS__) 278 struct vcpu_guest_context { 279 #define _VGCF_online 0 280 #define VGCF_online (1<<_VGCF_online) 281 UINT32 flags; /* VGCF_* */ 282 283 struct vcpu_guest_core_regs user_regs; /* Core CPU registers */ 284 285 UINT32 sctlr; 286 UINT64 ttbcr, ttbr0, ttbr1; 287 }; 288 typedef struct vcpu_guest_context vcpu_guest_context_t; 289 DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t); 290 #endif 291 292 struct arch_vcpu_info { 293 }; 294 typedef struct arch_vcpu_info arch_vcpu_info_t; 295 296 struct arch_shared_info { 297 }; 298 typedef struct arch_shared_info arch_shared_info_t; 299 typedef UINT64 xen_callback_t; 300 301 #endif 302 303 #if defined(__XEN__) || defined(__XEN_TOOLS__) 304 305 /* PSR bits (CPSR, SPSR)*/ 306 307 #define PSR_THUMB (1<<5) /* Thumb Mode enable */ 308 #define PSR_FIQ_MASK (1<<6) /* Fast Interrupt mask */ 309 #define PSR_IRQ_MASK (1<<7) /* Interrupt mask */ 310 #define PSR_ABT_MASK (1<<8) /* Asynchronous Abort mask */ 311 #define PSR_BIG_ENDIAN (1<<9) /* arm32: Big Endian Mode */ 312 #define PSR_DBG_MASK (1<<9) /* arm64: Debug Exception mask */ 313 #define PSR_IT_MASK (0x0600fc00) /* Thumb If-Then Mask */ 314 #define PSR_JAZELLE (1<<24) /* Jazelle Mode */ 315 316 /* 32 bit modes */ 317 #define PSR_MODE_USR 0x10 318 #define PSR_MODE_FIQ 0x11 319 #define PSR_MODE_IRQ 0x12 320 #define PSR_MODE_SVC 0x13 321 #define PSR_MODE_MON 0x16 322 #define PSR_MODE_ABT 0x17 323 #define PSR_MODE_HYP 0x1a 324 #define PSR_MODE_UND 0x1b 325 #define PSR_MODE_SYS 0x1f 326 327 /* 64 bit modes */ 328 #define PSR_MODE_BIT 0x10 /* Set iff AArch32 */ 329 #define PSR_MODE_EL3h 0x0d 330 #define PSR_MODE_EL3t 0x0c 331 #define PSR_MODE_EL2h 0x09 332 #define PSR_MODE_EL2t 0x08 333 #define PSR_MODE_EL1h 0x05 334 #define PSR_MODE_EL1t 0x04 335 #define PSR_MODE_EL0t 0x00 336 337 #define PSR_GUEST32_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_SVC) 338 #define PSR_GUEST64_INIT (PSR_ABT_MASK|PSR_FIQ_MASK|PSR_IRQ_MASK|PSR_MODE_EL1h) 339 340 #define SCTLR_GUEST_INIT 0x00c50078 341 342 /* 343 * Virtual machine platform (memory layout, interrupts) 344 * 345 * These are defined for consistency between the tools and the 346 * hypervisor. Guests must not rely on these hardcoded values but 347 * should instead use the FDT. 348 */ 349 350 /* Physical Address Space */ 351 352 /* vGIC mappings: Only one set of mapping is used by the guest. 353 * Therefore they can overlap. 354 */ 355 356 /* vGIC v2 mappings */ 357 #define GUEST_GICD_BASE 0x03001000ULL 358 #define GUEST_GICD_SIZE 0x00001000ULL 359 #define GUEST_GICC_BASE 0x03002000ULL 360 #define GUEST_GICC_SIZE 0x00000100ULL 361 362 /* vGIC v3 mappings */ 363 #define GUEST_GICV3_GICD_BASE 0x03001000ULL 364 #define GUEST_GICV3_GICD_SIZE 0x00010000ULL 365 366 #define GUEST_GICV3_RDIST_STRIDE 0x20000ULL 367 #define GUEST_GICV3_RDIST_REGIONS 1 368 369 #define GUEST_GICV3_GICR0_BASE 0x03020000ULL /* vCPU0 - vCPU7 */ 370 #define GUEST_GICV3_GICR0_SIZE 0x00100000ULL 371 372 /* 16MB == 4096 pages reserved for guest to use as a region to map its 373 * grant table in. 374 */ 375 #define GUEST_GNTTAB_BASE 0x38000000ULL 376 #define GUEST_GNTTAB_SIZE 0x01000000ULL 377 378 #define GUEST_MAGIC_BASE 0x39000000ULL 379 #define GUEST_MAGIC_SIZE 0x01000000ULL 380 381 #define GUEST_RAM_BANKS 2 382 383 #define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */ 384 #define GUEST_RAM0_SIZE 0xc0000000ULL 385 386 #define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */ 387 #define GUEST_RAM1_SIZE 0xfe00000000ULL 388 389 #define GUEST_RAM_BASE GUEST_RAM0_BASE /* Lowest RAM address */ 390 /* Largest amount of actual RAM, not including holes */ 391 #define GUEST_RAM_MAX (GUEST_RAM0_SIZE + GUEST_RAM1_SIZE) 392 /* Suitable for e.g. const uint64_t ramfoo[] = GUEST_RAM_BANK_FOOS; */ 393 #define GUEST_RAM_BANK_BASES { GUEST_RAM0_BASE, GUEST_RAM1_BASE } 394 #define GUEST_RAM_BANK_SIZES { GUEST_RAM0_SIZE, GUEST_RAM1_SIZE } 395 396 /* Interrupts */ 397 #define GUEST_TIMER_VIRT_PPI 27 398 #define GUEST_TIMER_PHYS_S_PPI 29 399 #define GUEST_TIMER_PHYS_NS_PPI 30 400 #define GUEST_EVTCHN_PPI 31 401 402 /* PSCI functions */ 403 #define PSCI_cpu_suspend 0 404 #define PSCI_cpu_off 1 405 #define PSCI_cpu_on 2 406 #define PSCI_migrate 3 407 408 #endif 409 410 #endif /* __XEN_PUBLIC_ARCH_ARM_H__ */ 411 412 /* 413 * Local variables: 414 * mode: C 415 * c-file-style: "BSD" 416 * c-basic-offset: 4 417 * tab-width: 4 418 * indent-tabs-mode: nil 419 * End: 420 */ 421