1 #include "sysemu/sysemu.h" 2 #include "cpu.h" 3 #include "helper_regs.h" 4 #include "hw/ppc/spapr.h" 5 #include "mmu-hash64.h" 6 7 static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r, 8 target_ulong pte_index) 9 { 10 target_ulong rb, va_low; 11 12 rb = (v & ~0x7fULL) << 16; /* AVA field */ 13 va_low = pte_index >> 3; 14 if (v & HPTE64_V_SECONDARY) { 15 va_low = ~va_low; 16 } 17 /* xor vsid from AVA */ 18 if (!(v & HPTE64_V_1TB_SEG)) { 19 va_low ^= v >> 12; 20 } else { 21 va_low ^= v >> 24; 22 } 23 va_low &= 0x7ff; 24 if (v & HPTE64_V_LARGE) { 25 rb |= 1; /* L field */ 26 #if 0 /* Disable that P7 specific bit for now */ 27 if (r & 0xff000) { 28 /* non-16MB large page, must be 64k */ 29 /* (masks depend on page size) */ 30 rb |= 0x1000; /* page encoding in LP field */ 31 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ 32 rb |= (va_low & 0xfe); /* AVAL field */ 33 } 34 #endif 35 } else { 36 /* 4kB page */ 37 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of AVA */ 38 } 39 rb |= (v >> 54) & 0x300; /* B field */ 40 return rb; 41 } 42 43 static target_ulong h_enter(PowerPCCPU *cpu, sPAPREnvironment *spapr, 44 target_ulong opcode, target_ulong *args) 45 { 46 CPUPPCState *env = &cpu->env; 47 target_ulong flags = args[0]; 48 target_ulong pte_index = args[1]; 49 target_ulong pteh = args[2]; 50 target_ulong ptel = args[3]; 51 target_ulong page_shift = 12; 52 target_ulong raddr; 53 target_ulong i; 54 hwaddr hpte; 55 56 /* only handle 4k and 16M pages for now */ 57 if (pteh & HPTE64_V_LARGE) { 58 #if 0 /* We don't support 64k pages yet */ 59 if ((ptel & 0xf000) == 0x1000) { 60 /* 64k page */ 61 } else 62 #endif 63 if ((ptel & 0xff000) == 0) { 64 /* 16M page */ 65 page_shift = 24; 66 /* lowest AVA bit must be 0 for 16M pages */ 67 if (pteh & 0x80) { 68 return H_PARAMETER; 69 } 70 } else { 71 return H_PARAMETER; 72 } 73 } 74 75 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1); 76 77 if (raddr < spapr->ram_limit) { 78 /* Regular RAM - should have WIMG=0010 */ 79 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) { 80 return H_PARAMETER; 81 } 82 } else { 83 /* Looks like an IO address */ 84 /* FIXME: What WIMG combinations could be sensible for IO? 85 * For now we allow WIMG=010x, but are there others? */ 86 /* FIXME: Should we check against registered IO addresses? */ 87 if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) { 88 return H_PARAMETER; 89 } 90 } 91 92 pteh &= ~0x60ULL; 93 94 if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) { 95 return H_PARAMETER; 96 } 97 if (likely((flags & H_EXACT) == 0)) { 98 pte_index &= ~7ULL; 99 hpte = pte_index * HASH_PTE_SIZE_64; 100 for (i = 0; ; ++i) { 101 if (i == 8) { 102 return H_PTEG_FULL; 103 } 104 if ((ppc_hash64_load_hpte0(env, hpte) & HPTE64_V_VALID) == 0) { 105 break; 106 } 107 hpte += HASH_PTE_SIZE_64; 108 } 109 } else { 110 i = 0; 111 hpte = pte_index * HASH_PTE_SIZE_64; 112 if (ppc_hash64_load_hpte0(env, hpte) & HPTE64_V_VALID) { 113 return H_PTEG_FULL; 114 } 115 } 116 ppc_hash64_store_hpte1(env, hpte, ptel); 117 /* eieio(); FIXME: need some sort of barrier for smp? */ 118 ppc_hash64_store_hpte0(env, hpte, pteh | HPTE64_V_HPTE_DIRTY); 119 120 args[0] = pte_index + i; 121 return H_SUCCESS; 122 } 123 124 typedef enum { 125 REMOVE_SUCCESS = 0, 126 REMOVE_NOT_FOUND = 1, 127 REMOVE_PARM = 2, 128 REMOVE_HW = 3, 129 } RemoveResult; 130 131 static RemoveResult remove_hpte(CPUPPCState *env, target_ulong ptex, 132 target_ulong avpn, 133 target_ulong flags, 134 target_ulong *vp, target_ulong *rp) 135 { 136 hwaddr hpte; 137 target_ulong v, r, rb; 138 139 if ((ptex * HASH_PTE_SIZE_64) & ~env->htab_mask) { 140 return REMOVE_PARM; 141 } 142 143 hpte = ptex * HASH_PTE_SIZE_64; 144 145 v = ppc_hash64_load_hpte0(env, hpte); 146 r = ppc_hash64_load_hpte1(env, hpte); 147 148 if ((v & HPTE64_V_VALID) == 0 || 149 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || 150 ((flags & H_ANDCOND) && (v & avpn) != 0)) { 151 return REMOVE_NOT_FOUND; 152 } 153 *vp = v; 154 *rp = r; 155 ppc_hash64_store_hpte0(env, hpte, HPTE64_V_HPTE_DIRTY); 156 rb = compute_tlbie_rb(v, r, ptex); 157 ppc_tlb_invalidate_one(env, rb); 158 return REMOVE_SUCCESS; 159 } 160 161 static target_ulong h_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr, 162 target_ulong opcode, target_ulong *args) 163 { 164 CPUPPCState *env = &cpu->env; 165 target_ulong flags = args[0]; 166 target_ulong pte_index = args[1]; 167 target_ulong avpn = args[2]; 168 RemoveResult ret; 169 170 ret = remove_hpte(env, pte_index, avpn, flags, 171 &args[0], &args[1]); 172 173 switch (ret) { 174 case REMOVE_SUCCESS: 175 return H_SUCCESS; 176 177 case REMOVE_NOT_FOUND: 178 return H_NOT_FOUND; 179 180 case REMOVE_PARM: 181 return H_PARAMETER; 182 183 case REMOVE_HW: 184 return H_HARDWARE; 185 } 186 187 g_assert_not_reached(); 188 } 189 190 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL 191 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL 192 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL 193 #define H_BULK_REMOVE_END 0xc000000000000000ULL 194 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL 195 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL 196 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL 197 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL 198 #define H_BULK_REMOVE_HW 0x3000000000000000ULL 199 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL 200 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL 201 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL 202 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL 203 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL 204 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL 205 206 #define H_BULK_REMOVE_MAX_BATCH 4 207 208 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPREnvironment *spapr, 209 target_ulong opcode, target_ulong *args) 210 { 211 CPUPPCState *env = &cpu->env; 212 int i; 213 214 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { 215 target_ulong *tsh = &args[i*2]; 216 target_ulong tsl = args[i*2 + 1]; 217 target_ulong v, r, ret; 218 219 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { 220 break; 221 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { 222 return H_PARAMETER; 223 } 224 225 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; 226 *tsh |= H_BULK_REMOVE_RESPONSE; 227 228 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) { 229 *tsh |= H_BULK_REMOVE_PARM; 230 return H_PARAMETER; 231 } 232 233 ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl, 234 (*tsh & H_BULK_REMOVE_FLAGS) >> 26, 235 &v, &r); 236 237 *tsh |= ret << 60; 238 239 switch (ret) { 240 case REMOVE_SUCCESS: 241 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43; 242 break; 243 244 case REMOVE_PARM: 245 return H_PARAMETER; 246 247 case REMOVE_HW: 248 return H_HARDWARE; 249 } 250 } 251 252 return H_SUCCESS; 253 } 254 255 static target_ulong h_protect(PowerPCCPU *cpu, sPAPREnvironment *spapr, 256 target_ulong opcode, target_ulong *args) 257 { 258 CPUPPCState *env = &cpu->env; 259 target_ulong flags = args[0]; 260 target_ulong pte_index = args[1]; 261 target_ulong avpn = args[2]; 262 hwaddr hpte; 263 target_ulong v, r, rb; 264 265 if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) { 266 return H_PARAMETER; 267 } 268 269 hpte = pte_index * HASH_PTE_SIZE_64; 270 271 v = ppc_hash64_load_hpte0(env, hpte); 272 r = ppc_hash64_load_hpte1(env, hpte); 273 274 if ((v & HPTE64_V_VALID) == 0 || 275 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { 276 return H_NOT_FOUND; 277 } 278 279 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N | 280 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO); 281 r |= (flags << 55) & HPTE64_R_PP0; 282 r |= (flags << 48) & HPTE64_R_KEY_HI; 283 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO); 284 rb = compute_tlbie_rb(v, r, pte_index); 285 ppc_hash64_store_hpte0(env, hpte, (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY); 286 ppc_tlb_invalidate_one(env, rb); 287 ppc_hash64_store_hpte1(env, hpte, r); 288 /* Don't need a memory barrier, due to qemu's global lock */ 289 ppc_hash64_store_hpte0(env, hpte, v | HPTE64_V_HPTE_DIRTY); 290 return H_SUCCESS; 291 } 292 293 static target_ulong h_read(PowerPCCPU *cpu, sPAPREnvironment *spapr, 294 target_ulong opcode, target_ulong *args) 295 { 296 CPUPPCState *env = &cpu->env; 297 target_ulong flags = args[0]; 298 target_ulong pte_index = args[1]; 299 uint8_t *hpte; 300 int i, ridx, n_entries = 1; 301 302 if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) { 303 return H_PARAMETER; 304 } 305 306 if (flags & H_READ_4) { 307 /* Clear the two low order bits */ 308 pte_index &= ~(3ULL); 309 n_entries = 4; 310 } 311 312 hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64); 313 314 for (i = 0, ridx = 0; i < n_entries; i++) { 315 args[ridx++] = ldq_p(hpte); 316 args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2)); 317 hpte += HASH_PTE_SIZE_64; 318 } 319 320 return H_SUCCESS; 321 } 322 323 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr, 324 target_ulong opcode, target_ulong *args) 325 { 326 /* FIXME: actually implement this */ 327 return H_HARDWARE; 328 } 329 330 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL 331 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL 332 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL 333 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL 334 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL 335 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL 336 337 #define VPA_MIN_SIZE 640 338 #define VPA_SIZE_OFFSET 0x4 339 #define VPA_SHARED_PROC_OFFSET 0x9 340 #define VPA_SHARED_PROC_VAL 0x2 341 342 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa) 343 { 344 CPUState *cs = ENV_GET_CPU(env); 345 uint16_t size; 346 uint8_t tmp; 347 348 if (vpa == 0) { 349 hcall_dprintf("Can't cope with registering a VPA at logical 0\n"); 350 return H_HARDWARE; 351 } 352 353 if (vpa % env->dcache_line_size) { 354 return H_PARAMETER; 355 } 356 /* FIXME: bounds check the address */ 357 358 size = lduw_be_phys(vpa + 0x4); 359 360 if (size < VPA_MIN_SIZE) { 361 return H_PARAMETER; 362 } 363 364 /* VPA is not allowed to cross a page boundary */ 365 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) { 366 return H_PARAMETER; 367 } 368 369 env->vpa_addr = vpa; 370 371 tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET); 372 tmp |= VPA_SHARED_PROC_VAL; 373 stb_phys(env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp); 374 375 return H_SUCCESS; 376 } 377 378 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa) 379 { 380 if (env->slb_shadow_addr) { 381 return H_RESOURCE; 382 } 383 384 if (env->dtl_addr) { 385 return H_RESOURCE; 386 } 387 388 env->vpa_addr = 0; 389 return H_SUCCESS; 390 } 391 392 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr) 393 { 394 CPUState *cs = ENV_GET_CPU(env); 395 uint32_t size; 396 397 if (addr == 0) { 398 hcall_dprintf("Can't cope with SLB shadow at logical 0\n"); 399 return H_HARDWARE; 400 } 401 402 size = ldl_be_phys(cs->as, addr + 0x4); 403 if (size < 0x8) { 404 return H_PARAMETER; 405 } 406 407 if ((addr / 4096) != ((addr + size - 1) / 4096)) { 408 return H_PARAMETER; 409 } 410 411 if (!env->vpa_addr) { 412 return H_RESOURCE; 413 } 414 415 env->slb_shadow_addr = addr; 416 env->slb_shadow_size = size; 417 418 return H_SUCCESS; 419 } 420 421 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr) 422 { 423 env->slb_shadow_addr = 0; 424 env->slb_shadow_size = 0; 425 return H_SUCCESS; 426 } 427 428 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr) 429 { 430 CPUState *cs = ENV_GET_CPU(env); 431 uint32_t size; 432 433 if (addr == 0) { 434 hcall_dprintf("Can't cope with DTL at logical 0\n"); 435 return H_HARDWARE; 436 } 437 438 size = ldl_be_phys(cs->as, addr + 0x4); 439 440 if (size < 48) { 441 return H_PARAMETER; 442 } 443 444 if (!env->vpa_addr) { 445 return H_RESOURCE; 446 } 447 448 env->dtl_addr = addr; 449 env->dtl_size = size; 450 451 return H_SUCCESS; 452 } 453 454 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr) 455 { 456 env->dtl_addr = 0; 457 env->dtl_size = 0; 458 459 return H_SUCCESS; 460 } 461 462 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr, 463 target_ulong opcode, target_ulong *args) 464 { 465 target_ulong flags = args[0]; 466 target_ulong procno = args[1]; 467 target_ulong vpa = args[2]; 468 target_ulong ret = H_PARAMETER; 469 CPUPPCState *tenv; 470 CPUState *tcpu; 471 472 tcpu = qemu_get_cpu(procno); 473 if (!tcpu) { 474 return H_PARAMETER; 475 } 476 tenv = tcpu->env_ptr; 477 478 switch (flags) { 479 case FLAGS_REGISTER_VPA: 480 ret = register_vpa(tenv, vpa); 481 break; 482 483 case FLAGS_DEREGISTER_VPA: 484 ret = deregister_vpa(tenv, vpa); 485 break; 486 487 case FLAGS_REGISTER_SLBSHADOW: 488 ret = register_slb_shadow(tenv, vpa); 489 break; 490 491 case FLAGS_DEREGISTER_SLBSHADOW: 492 ret = deregister_slb_shadow(tenv, vpa); 493 break; 494 495 case FLAGS_REGISTER_DTL: 496 ret = register_dtl(tenv, vpa); 497 break; 498 499 case FLAGS_DEREGISTER_DTL: 500 ret = deregister_dtl(tenv, vpa); 501 break; 502 } 503 504 return ret; 505 } 506 507 static target_ulong h_cede(PowerPCCPU *cpu, sPAPREnvironment *spapr, 508 target_ulong opcode, target_ulong *args) 509 { 510 CPUPPCState *env = &cpu->env; 511 CPUState *cs = CPU(cpu); 512 513 env->msr |= (1ULL << MSR_EE); 514 hreg_compute_hflags(env); 515 if (!cpu_has_work(cs)) { 516 cs->halted = 1; 517 env->exception_index = EXCP_HLT; 518 cs->exit_request = 1; 519 } 520 return H_SUCCESS; 521 } 522 523 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPREnvironment *spapr, 524 target_ulong opcode, target_ulong *args) 525 { 526 target_ulong rtas_r3 = args[0]; 527 uint32_t token = rtas_ld(rtas_r3, 0); 528 uint32_t nargs = rtas_ld(rtas_r3, 1); 529 uint32_t nret = rtas_ld(rtas_r3, 2); 530 531 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12, 532 nret, rtas_r3 + 12 + 4*nargs); 533 } 534 535 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr, 536 target_ulong opcode, target_ulong *args) 537 { 538 CPUState *cs = CPU(cpu); 539 target_ulong size = args[0]; 540 target_ulong addr = args[1]; 541 542 switch (size) { 543 case 1: 544 args[0] = ldub_phys(cs->as, addr); 545 return H_SUCCESS; 546 case 2: 547 args[0] = lduw_phys(addr); 548 return H_SUCCESS; 549 case 4: 550 args[0] = ldl_phys(cs->as, addr); 551 return H_SUCCESS; 552 case 8: 553 args[0] = ldq_phys(cs->as, addr); 554 return H_SUCCESS; 555 } 556 return H_PARAMETER; 557 } 558 559 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPREnvironment *spapr, 560 target_ulong opcode, target_ulong *args) 561 { 562 target_ulong size = args[0]; 563 target_ulong addr = args[1]; 564 target_ulong val = args[2]; 565 566 switch (size) { 567 case 1: 568 stb_phys(addr, val); 569 return H_SUCCESS; 570 case 2: 571 stw_phys(addr, val); 572 return H_SUCCESS; 573 case 4: 574 stl_phys(addr, val); 575 return H_SUCCESS; 576 case 8: 577 stq_phys(addr, val); 578 return H_SUCCESS; 579 } 580 return H_PARAMETER; 581 } 582 583 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr, 584 target_ulong opcode, target_ulong *args) 585 { 586 CPUState *cs = CPU(cpu); 587 588 target_ulong dst = args[0]; /* Destination address */ 589 target_ulong src = args[1]; /* Source address */ 590 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */ 591 target_ulong count = args[3]; /* Element count */ 592 target_ulong op = args[4]; /* 0 = copy, 1 = invert */ 593 uint64_t tmp; 594 unsigned int mask = (1 << esize) - 1; 595 int step = 1 << esize; 596 597 if (count > 0x80000000) { 598 return H_PARAMETER; 599 } 600 601 if ((dst & mask) || (src & mask) || (op > 1)) { 602 return H_PARAMETER; 603 } 604 605 if (dst >= src && dst < (src + (count << esize))) { 606 dst = dst + ((count - 1) << esize); 607 src = src + ((count - 1) << esize); 608 step = -step; 609 } 610 611 while (count--) { 612 switch (esize) { 613 case 0: 614 tmp = ldub_phys(cs->as, src); 615 break; 616 case 1: 617 tmp = lduw_phys(src); 618 break; 619 case 2: 620 tmp = ldl_phys(cs->as, src); 621 break; 622 case 3: 623 tmp = ldq_phys(cs->as, src); 624 break; 625 default: 626 return H_PARAMETER; 627 } 628 if (op == 1) { 629 tmp = ~tmp; 630 } 631 switch (esize) { 632 case 0: 633 stb_phys(dst, tmp); 634 break; 635 case 1: 636 stw_phys(dst, tmp); 637 break; 638 case 2: 639 stl_phys(dst, tmp); 640 break; 641 case 3: 642 stq_phys(dst, tmp); 643 break; 644 } 645 dst = dst + step; 646 src = src + step; 647 } 648 649 return H_SUCCESS; 650 } 651 652 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPREnvironment *spapr, 653 target_ulong opcode, target_ulong *args) 654 { 655 /* Nothing to do on emulation, KVM will trap this in the kernel */ 656 return H_SUCCESS; 657 } 658 659 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPREnvironment *spapr, 660 target_ulong opcode, target_ulong *args) 661 { 662 /* Nothing to do on emulation, KVM will trap this in the kernel */ 663 return H_SUCCESS; 664 } 665 666 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr, 667 target_ulong opcode, target_ulong *args) 668 { 669 CPUState *cs; 670 target_ulong mflags = args[0]; 671 target_ulong resource = args[1]; 672 target_ulong value1 = args[2]; 673 target_ulong value2 = args[3]; 674 target_ulong ret = H_P2; 675 676 if (resource == H_SET_MODE_ENDIAN) { 677 if (value1) { 678 ret = H_P3; 679 goto out; 680 } 681 if (value2) { 682 ret = H_P4; 683 goto out; 684 } 685 686 switch (mflags) { 687 case H_SET_MODE_ENDIAN_BIG: 688 CPU_FOREACH(cs) { 689 PowerPCCPU *cp = POWERPC_CPU(cs); 690 CPUPPCState *env = &cp->env; 691 env->spr[SPR_LPCR] &= ~LPCR_ILE; 692 } 693 ret = H_SUCCESS; 694 break; 695 696 case H_SET_MODE_ENDIAN_LITTLE: 697 CPU_FOREACH(cs) { 698 PowerPCCPU *cp = POWERPC_CPU(cs); 699 CPUPPCState *env = &cp->env; 700 env->spr[SPR_LPCR] |= LPCR_ILE; 701 } 702 ret = H_SUCCESS; 703 break; 704 705 default: 706 ret = H_UNSUPPORTED_FLAG; 707 } 708 } 709 710 out: 711 return ret; 712 } 713 714 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; 715 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1]; 716 717 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) 718 { 719 spapr_hcall_fn *slot; 720 721 if (opcode <= MAX_HCALL_OPCODE) { 722 assert((opcode & 0x3) == 0); 723 724 slot = &papr_hypercall_table[opcode / 4]; 725 } else { 726 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); 727 728 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 729 } 730 731 assert(!(*slot)); 732 *slot = fn; 733 } 734 735 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, 736 target_ulong *args) 737 { 738 if ((opcode <= MAX_HCALL_OPCODE) 739 && ((opcode & 0x3) == 0)) { 740 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4]; 741 742 if (fn) { 743 return fn(cpu, spapr, opcode, args); 744 } 745 } else if ((opcode >= KVMPPC_HCALL_BASE) && 746 (opcode <= KVMPPC_HCALL_MAX)) { 747 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 748 749 if (fn) { 750 return fn(cpu, spapr, opcode, args); 751 } 752 } 753 754 hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx "\n", opcode); 755 return H_FUNCTION; 756 } 757 758 static void hypercall_register_types(void) 759 { 760 /* hcall-pft */ 761 spapr_register_hypercall(H_ENTER, h_enter); 762 spapr_register_hypercall(H_REMOVE, h_remove); 763 spapr_register_hypercall(H_PROTECT, h_protect); 764 spapr_register_hypercall(H_READ, h_read); 765 766 /* hcall-bulk */ 767 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove); 768 769 /* hcall-dabr */ 770 spapr_register_hypercall(H_SET_DABR, h_set_dabr); 771 772 /* hcall-splpar */ 773 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa); 774 spapr_register_hypercall(H_CEDE, h_cede); 775 776 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate 777 * here between the "CI" and the "CACHE" variants, they will use whatever 778 * mapping attributes qemu is using. When using KVM, the kernel will 779 * enforce the attributes more strongly 780 */ 781 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load); 782 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store); 783 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load); 784 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store); 785 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi); 786 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf); 787 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop); 788 789 /* qemu/KVM-PPC specific hcalls */ 790 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas); 791 792 spapr_register_hypercall(H_SET_MODE, h_set_mode); 793 } 794 795 type_init(hypercall_register_types) 796