xref: /qemu/hw/ppc/spapr_hcall.c (revision 33848cee)
1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "sysemu/sysemu.h"
4 #include "qemu/log.h"
5 #include "cpu.h"
6 #include "exec/exec-all.h"
7 #include "helper_regs.h"
8 #include "hw/ppc/spapr.h"
9 #include "mmu-hash64.h"
10 #include "cpu-models.h"
11 #include "trace.h"
12 #include "sysemu/kvm.h"
13 #include "kvm_ppc.h"
14 #include "hw/ppc/spapr_ovec.h"
15 
16 struct SPRSyncState {
17     int spr;
18     target_ulong value;
19     target_ulong mask;
20 };
21 
22 static void do_spr_sync(CPUState *cs, run_on_cpu_data arg)
23 {
24     struct SPRSyncState *s = arg.host_ptr;
25     PowerPCCPU *cpu = POWERPC_CPU(cs);
26     CPUPPCState *env = &cpu->env;
27 
28     cpu_synchronize_state(cs);
29     env->spr[s->spr] &= ~s->mask;
30     env->spr[s->spr] |= s->value;
31 }
32 
33 static void set_spr(CPUState *cs, int spr, target_ulong value,
34                     target_ulong mask)
35 {
36     struct SPRSyncState s = {
37         .spr = spr,
38         .value = value,
39         .mask = mask
40     };
41     run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s));
42 }
43 
44 static bool has_spr(PowerPCCPU *cpu, int spr)
45 {
46     /* We can test whether the SPR is defined by checking for a valid name */
47     return cpu->env.spr_cb[spr].name != NULL;
48 }
49 
50 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
51 {
52     /*
53      * hash value/pteg group index is normalized by htab_mask
54      */
55     if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
56         return false;
57     }
58     return true;
59 }
60 
61 static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr)
62 {
63     MachineState *machine = MACHINE(spapr);
64     MemoryHotplugState *hpms = &spapr->hotplug_memory;
65 
66     if (addr < machine->ram_size) {
67         return true;
68     }
69     if ((addr >= hpms->base)
70         && ((addr - hpms->base) < memory_region_size(&hpms->mr))) {
71         return true;
72     }
73 
74     return false;
75 }
76 
77 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
78                             target_ulong opcode, target_ulong *args)
79 {
80     CPUPPCState *env = &cpu->env;
81     target_ulong flags = args[0];
82     target_ulong pte_index = args[1];
83     target_ulong pteh = args[2];
84     target_ulong ptel = args[3];
85     unsigned apshift;
86     target_ulong raddr;
87     target_ulong index;
88     uint64_t token;
89 
90     apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
91     if (!apshift) {
92         /* Bad page size encoding */
93         return H_PARAMETER;
94     }
95 
96     raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
97 
98     if (is_ram_address(spapr, raddr)) {
99         /* Regular RAM - should have WIMG=0010 */
100         if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
101             return H_PARAMETER;
102         }
103     } else {
104         target_ulong wimg_flags;
105         /* Looks like an IO address */
106         /* FIXME: What WIMG combinations could be sensible for IO?
107          * For now we allow WIMG=010x, but are there others? */
108         /* FIXME: Should we check against registered IO addresses? */
109         wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M));
110 
111         if (wimg_flags != HPTE64_R_I &&
112             wimg_flags != (HPTE64_R_I | HPTE64_R_M)) {
113             return H_PARAMETER;
114         }
115     }
116 
117     pteh &= ~0x60ULL;
118 
119     if (!valid_pte_index(env, pte_index)) {
120         return H_PARAMETER;
121     }
122 
123     index = 0;
124     if (likely((flags & H_EXACT) == 0)) {
125         pte_index &= ~7ULL;
126         token = ppc_hash64_start_access(cpu, pte_index);
127         for (; index < 8; index++) {
128             if (!(ppc_hash64_load_hpte0(cpu, token, index) & HPTE64_V_VALID)) {
129                 break;
130             }
131         }
132         ppc_hash64_stop_access(cpu, token);
133         if (index == 8) {
134             return H_PTEG_FULL;
135         }
136     } else {
137         token = ppc_hash64_start_access(cpu, pte_index);
138         if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) {
139             ppc_hash64_stop_access(cpu, token);
140             return H_PTEG_FULL;
141         }
142         ppc_hash64_stop_access(cpu, token);
143     }
144 
145     ppc_hash64_store_hpte(cpu, pte_index + index,
146                           pteh | HPTE64_V_HPTE_DIRTY, ptel);
147 
148     args[0] = pte_index + index;
149     return H_SUCCESS;
150 }
151 
152 typedef enum {
153     REMOVE_SUCCESS = 0,
154     REMOVE_NOT_FOUND = 1,
155     REMOVE_PARM = 2,
156     REMOVE_HW = 3,
157 } RemoveResult;
158 
159 static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
160                                 target_ulong avpn,
161                                 target_ulong flags,
162                                 target_ulong *vp, target_ulong *rp)
163 {
164     CPUPPCState *env = &cpu->env;
165     uint64_t token;
166     target_ulong v, r;
167 
168     if (!valid_pte_index(env, ptex)) {
169         return REMOVE_PARM;
170     }
171 
172     token = ppc_hash64_start_access(cpu, ptex);
173     v = ppc_hash64_load_hpte0(cpu, token, 0);
174     r = ppc_hash64_load_hpte1(cpu, token, 0);
175     ppc_hash64_stop_access(cpu, token);
176 
177     if ((v & HPTE64_V_VALID) == 0 ||
178         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
179         ((flags & H_ANDCOND) && (v & avpn) != 0)) {
180         return REMOVE_NOT_FOUND;
181     }
182     *vp = v;
183     *rp = r;
184     ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
185     ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
186     return REMOVE_SUCCESS;
187 }
188 
189 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
190                              target_ulong opcode, target_ulong *args)
191 {
192     CPUPPCState *env = &cpu->env;
193     target_ulong flags = args[0];
194     target_ulong pte_index = args[1];
195     target_ulong avpn = args[2];
196     RemoveResult ret;
197 
198     ret = remove_hpte(cpu, pte_index, avpn, flags,
199                       &args[0], &args[1]);
200 
201     switch (ret) {
202     case REMOVE_SUCCESS:
203         check_tlb_flush(env, true);
204         return H_SUCCESS;
205 
206     case REMOVE_NOT_FOUND:
207         return H_NOT_FOUND;
208 
209     case REMOVE_PARM:
210         return H_PARAMETER;
211 
212     case REMOVE_HW:
213         return H_HARDWARE;
214     }
215 
216     g_assert_not_reached();
217 }
218 
219 #define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
220 #define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
221 #define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
222 #define   H_BULK_REMOVE_END            0xc000000000000000ULL
223 #define H_BULK_REMOVE_CODE             0x3000000000000000ULL
224 #define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
225 #define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
226 #define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
227 #define   H_BULK_REMOVE_HW             0x3000000000000000ULL
228 #define H_BULK_REMOVE_RC               0x0c00000000000000ULL
229 #define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
230 #define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
231 #define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
232 #define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
233 #define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
234 
235 #define H_BULK_REMOVE_MAX_BATCH        4
236 
237 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
238                                   target_ulong opcode, target_ulong *args)
239 {
240     CPUPPCState *env = &cpu->env;
241     int i;
242     target_ulong rc = H_SUCCESS;
243 
244     for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
245         target_ulong *tsh = &args[i*2];
246         target_ulong tsl = args[i*2 + 1];
247         target_ulong v, r, ret;
248 
249         if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
250             break;
251         } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
252             return H_PARAMETER;
253         }
254 
255         *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
256         *tsh |= H_BULK_REMOVE_RESPONSE;
257 
258         if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
259             *tsh |= H_BULK_REMOVE_PARM;
260             return H_PARAMETER;
261         }
262 
263         ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
264                           (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
265                           &v, &r);
266 
267         *tsh |= ret << 60;
268 
269         switch (ret) {
270         case REMOVE_SUCCESS:
271             *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
272             break;
273 
274         case REMOVE_PARM:
275             rc = H_PARAMETER;
276             goto exit;
277 
278         case REMOVE_HW:
279             rc = H_HARDWARE;
280             goto exit;
281         }
282     }
283  exit:
284     check_tlb_flush(env, true);
285 
286     return rc;
287 }
288 
289 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
290                               target_ulong opcode, target_ulong *args)
291 {
292     CPUPPCState *env = &cpu->env;
293     target_ulong flags = args[0];
294     target_ulong pte_index = args[1];
295     target_ulong avpn = args[2];
296     uint64_t token;
297     target_ulong v, r;
298 
299     if (!valid_pte_index(env, pte_index)) {
300         return H_PARAMETER;
301     }
302 
303     token = ppc_hash64_start_access(cpu, pte_index);
304     v = ppc_hash64_load_hpte0(cpu, token, 0);
305     r = ppc_hash64_load_hpte1(cpu, token, 0);
306     ppc_hash64_stop_access(cpu, token);
307 
308     if ((v & HPTE64_V_VALID) == 0 ||
309         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
310         return H_NOT_FOUND;
311     }
312 
313     r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
314            HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
315     r |= (flags << 55) & HPTE64_R_PP0;
316     r |= (flags << 48) & HPTE64_R_KEY_HI;
317     r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
318     ppc_hash64_store_hpte(cpu, pte_index,
319                           (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
320     ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
321     /* Flush the tlb */
322     check_tlb_flush(env, true);
323     /* Don't need a memory barrier, due to qemu's global lock */
324     ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
325     return H_SUCCESS;
326 }
327 
328 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
329                            target_ulong opcode, target_ulong *args)
330 {
331     CPUPPCState *env = &cpu->env;
332     target_ulong flags = args[0];
333     target_ulong pte_index = args[1];
334     uint8_t *hpte;
335     int i, ridx, n_entries = 1;
336 
337     if (!valid_pte_index(env, pte_index)) {
338         return H_PARAMETER;
339     }
340 
341     if (flags & H_READ_4) {
342         /* Clear the two low order bits */
343         pte_index &= ~(3ULL);
344         n_entries = 4;
345     }
346 
347     hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
348 
349     for (i = 0, ridx = 0; i < n_entries; i++) {
350         args[ridx++] = ldq_p(hpte);
351         args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
352         hpte += HASH_PTE_SIZE_64;
353     }
354 
355     return H_SUCCESS;
356 }
357 
358 static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr,
359                                 target_ulong opcode, target_ulong *args)
360 {
361     cpu_synchronize_state(CPU(cpu));
362     cpu->env.spr[SPR_SPRG0] = args[0];
363 
364     return H_SUCCESS;
365 }
366 
367 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
368                                target_ulong opcode, target_ulong *args)
369 {
370     if (!has_spr(cpu, SPR_DABR)) {
371         return H_HARDWARE;              /* DABR register not available */
372     }
373     cpu_synchronize_state(CPU(cpu));
374 
375     if (has_spr(cpu, SPR_DABRX)) {
376         cpu->env.spr[SPR_DABRX] = 0x3;  /* Use Problem and Privileged state */
377     } else if (!(args[0] & 0x4)) {      /* Breakpoint Translation set? */
378         return H_RESERVED_DABR;
379     }
380 
381     cpu->env.spr[SPR_DABR] = args[0];
382     return H_SUCCESS;
383 }
384 
385 static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
386                                 target_ulong opcode, target_ulong *args)
387 {
388     target_ulong dabrx = args[1];
389 
390     if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
391         return H_HARDWARE;
392     }
393 
394     if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
395         || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
396         return H_PARAMETER;
397     }
398 
399     cpu_synchronize_state(CPU(cpu));
400     cpu->env.spr[SPR_DABRX] = dabrx;
401     cpu->env.spr[SPR_DABR] = args[0];
402 
403     return H_SUCCESS;
404 }
405 
406 static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr,
407                                 target_ulong opcode, target_ulong *args)
408 {
409     target_ulong flags = args[0];
410     hwaddr dst = args[1];
411     hwaddr src = args[2];
412     hwaddr len = TARGET_PAGE_SIZE;
413     uint8_t *pdst, *psrc;
414     target_long ret = H_SUCCESS;
415 
416     if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
417                   | H_COPY_PAGE | H_ZERO_PAGE)) {
418         qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
419                       flags);
420         return H_PARAMETER;
421     }
422 
423     /* Map-in destination */
424     if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
425         return H_PARAMETER;
426     }
427     pdst = cpu_physical_memory_map(dst, &len, 1);
428     if (!pdst || len != TARGET_PAGE_SIZE) {
429         return H_PARAMETER;
430     }
431 
432     if (flags & H_COPY_PAGE) {
433         /* Map-in source, copy to destination, and unmap source again */
434         if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
435             ret = H_PARAMETER;
436             goto unmap_out;
437         }
438         psrc = cpu_physical_memory_map(src, &len, 0);
439         if (!psrc || len != TARGET_PAGE_SIZE) {
440             ret = H_PARAMETER;
441             goto unmap_out;
442         }
443         memcpy(pdst, psrc, len);
444         cpu_physical_memory_unmap(psrc, len, 0, len);
445     } else if (flags & H_ZERO_PAGE) {
446         memset(pdst, 0, len);          /* Just clear the destination page */
447     }
448 
449     if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
450         kvmppc_dcbst_range(cpu, pdst, len);
451     }
452     if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
453         if (kvm_enabled()) {
454             kvmppc_icbi_range(cpu, pdst, len);
455         } else {
456             tb_flush(CPU(cpu));
457         }
458     }
459 
460 unmap_out:
461     cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
462     return ret;
463 }
464 
465 #define FLAGS_REGISTER_VPA         0x0000200000000000ULL
466 #define FLAGS_REGISTER_DTL         0x0000400000000000ULL
467 #define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
468 #define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
469 #define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
470 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
471 
472 #define VPA_MIN_SIZE           640
473 #define VPA_SIZE_OFFSET        0x4
474 #define VPA_SHARED_PROC_OFFSET 0x9
475 #define VPA_SHARED_PROC_VAL    0x2
476 
477 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
478 {
479     CPUState *cs = CPU(ppc_env_get_cpu(env));
480     uint16_t size;
481     uint8_t tmp;
482 
483     if (vpa == 0) {
484         hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
485         return H_HARDWARE;
486     }
487 
488     if (vpa % env->dcache_line_size) {
489         return H_PARAMETER;
490     }
491     /* FIXME: bounds check the address */
492 
493     size = lduw_be_phys(cs->as, vpa + 0x4);
494 
495     if (size < VPA_MIN_SIZE) {
496         return H_PARAMETER;
497     }
498 
499     /* VPA is not allowed to cross a page boundary */
500     if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
501         return H_PARAMETER;
502     }
503 
504     env->vpa_addr = vpa;
505 
506     tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
507     tmp |= VPA_SHARED_PROC_VAL;
508     stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
509 
510     return H_SUCCESS;
511 }
512 
513 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
514 {
515     if (env->slb_shadow_addr) {
516         return H_RESOURCE;
517     }
518 
519     if (env->dtl_addr) {
520         return H_RESOURCE;
521     }
522 
523     env->vpa_addr = 0;
524     return H_SUCCESS;
525 }
526 
527 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
528 {
529     CPUState *cs = CPU(ppc_env_get_cpu(env));
530     uint32_t size;
531 
532     if (addr == 0) {
533         hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
534         return H_HARDWARE;
535     }
536 
537     size = ldl_be_phys(cs->as, addr + 0x4);
538     if (size < 0x8) {
539         return H_PARAMETER;
540     }
541 
542     if ((addr / 4096) != ((addr + size - 1) / 4096)) {
543         return H_PARAMETER;
544     }
545 
546     if (!env->vpa_addr) {
547         return H_RESOURCE;
548     }
549 
550     env->slb_shadow_addr = addr;
551     env->slb_shadow_size = size;
552 
553     return H_SUCCESS;
554 }
555 
556 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
557 {
558     env->slb_shadow_addr = 0;
559     env->slb_shadow_size = 0;
560     return H_SUCCESS;
561 }
562 
563 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
564 {
565     CPUState *cs = CPU(ppc_env_get_cpu(env));
566     uint32_t size;
567 
568     if (addr == 0) {
569         hcall_dprintf("Can't cope with DTL at logical 0\n");
570         return H_HARDWARE;
571     }
572 
573     size = ldl_be_phys(cs->as, addr + 0x4);
574 
575     if (size < 48) {
576         return H_PARAMETER;
577     }
578 
579     if (!env->vpa_addr) {
580         return H_RESOURCE;
581     }
582 
583     env->dtl_addr = addr;
584     env->dtl_size = size;
585 
586     return H_SUCCESS;
587 }
588 
589 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
590 {
591     env->dtl_addr = 0;
592     env->dtl_size = 0;
593 
594     return H_SUCCESS;
595 }
596 
597 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr,
598                                    target_ulong opcode, target_ulong *args)
599 {
600     target_ulong flags = args[0];
601     target_ulong procno = args[1];
602     target_ulong vpa = args[2];
603     target_ulong ret = H_PARAMETER;
604     CPUPPCState *tenv;
605     PowerPCCPU *tcpu;
606 
607     tcpu = ppc_get_vcpu_by_dt_id(procno);
608     if (!tcpu) {
609         return H_PARAMETER;
610     }
611     tenv = &tcpu->env;
612 
613     switch (flags) {
614     case FLAGS_REGISTER_VPA:
615         ret = register_vpa(tenv, vpa);
616         break;
617 
618     case FLAGS_DEREGISTER_VPA:
619         ret = deregister_vpa(tenv, vpa);
620         break;
621 
622     case FLAGS_REGISTER_SLBSHADOW:
623         ret = register_slb_shadow(tenv, vpa);
624         break;
625 
626     case FLAGS_DEREGISTER_SLBSHADOW:
627         ret = deregister_slb_shadow(tenv, vpa);
628         break;
629 
630     case FLAGS_REGISTER_DTL:
631         ret = register_dtl(tenv, vpa);
632         break;
633 
634     case FLAGS_DEREGISTER_DTL:
635         ret = deregister_dtl(tenv, vpa);
636         break;
637     }
638 
639     return ret;
640 }
641 
642 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr,
643                            target_ulong opcode, target_ulong *args)
644 {
645     CPUPPCState *env = &cpu->env;
646     CPUState *cs = CPU(cpu);
647 
648     env->msr |= (1ULL << MSR_EE);
649     hreg_compute_hflags(env);
650     if (!cpu_has_work(cs)) {
651         cs->halted = 1;
652         cs->exception_index = EXCP_HLT;
653         cs->exit_request = 1;
654     }
655     return H_SUCCESS;
656 }
657 
658 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr,
659                            target_ulong opcode, target_ulong *args)
660 {
661     target_ulong rtas_r3 = args[0];
662     uint32_t token = rtas_ld(rtas_r3, 0);
663     uint32_t nargs = rtas_ld(rtas_r3, 1);
664     uint32_t nret = rtas_ld(rtas_r3, 2);
665 
666     return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
667                            nret, rtas_r3 + 12 + 4*nargs);
668 }
669 
670 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr,
671                                    target_ulong opcode, target_ulong *args)
672 {
673     CPUState *cs = CPU(cpu);
674     target_ulong size = args[0];
675     target_ulong addr = args[1];
676 
677     switch (size) {
678     case 1:
679         args[0] = ldub_phys(cs->as, addr);
680         return H_SUCCESS;
681     case 2:
682         args[0] = lduw_phys(cs->as, addr);
683         return H_SUCCESS;
684     case 4:
685         args[0] = ldl_phys(cs->as, addr);
686         return H_SUCCESS;
687     case 8:
688         args[0] = ldq_phys(cs->as, addr);
689         return H_SUCCESS;
690     }
691     return H_PARAMETER;
692 }
693 
694 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr,
695                                     target_ulong opcode, target_ulong *args)
696 {
697     CPUState *cs = CPU(cpu);
698 
699     target_ulong size = args[0];
700     target_ulong addr = args[1];
701     target_ulong val  = args[2];
702 
703     switch (size) {
704     case 1:
705         stb_phys(cs->as, addr, val);
706         return H_SUCCESS;
707     case 2:
708         stw_phys(cs->as, addr, val);
709         return H_SUCCESS;
710     case 4:
711         stl_phys(cs->as, addr, val);
712         return H_SUCCESS;
713     case 8:
714         stq_phys(cs->as, addr, val);
715         return H_SUCCESS;
716     }
717     return H_PARAMETER;
718 }
719 
720 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr,
721                                     target_ulong opcode, target_ulong *args)
722 {
723     CPUState *cs = CPU(cpu);
724 
725     target_ulong dst   = args[0]; /* Destination address */
726     target_ulong src   = args[1]; /* Source address */
727     target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
728     target_ulong count = args[3]; /* Element count */
729     target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
730     uint64_t tmp;
731     unsigned int mask = (1 << esize) - 1;
732     int step = 1 << esize;
733 
734     if (count > 0x80000000) {
735         return H_PARAMETER;
736     }
737 
738     if ((dst & mask) || (src & mask) || (op > 1)) {
739         return H_PARAMETER;
740     }
741 
742     if (dst >= src && dst < (src + (count << esize))) {
743             dst = dst + ((count - 1) << esize);
744             src = src + ((count - 1) << esize);
745             step = -step;
746     }
747 
748     while (count--) {
749         switch (esize) {
750         case 0:
751             tmp = ldub_phys(cs->as, src);
752             break;
753         case 1:
754             tmp = lduw_phys(cs->as, src);
755             break;
756         case 2:
757             tmp = ldl_phys(cs->as, src);
758             break;
759         case 3:
760             tmp = ldq_phys(cs->as, src);
761             break;
762         default:
763             return H_PARAMETER;
764         }
765         if (op == 1) {
766             tmp = ~tmp;
767         }
768         switch (esize) {
769         case 0:
770             stb_phys(cs->as, dst, tmp);
771             break;
772         case 1:
773             stw_phys(cs->as, dst, tmp);
774             break;
775         case 2:
776             stl_phys(cs->as, dst, tmp);
777             break;
778         case 3:
779             stq_phys(cs->as, dst, tmp);
780             break;
781         }
782         dst = dst + step;
783         src = src + step;
784     }
785 
786     return H_SUCCESS;
787 }
788 
789 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
790                                    target_ulong opcode, target_ulong *args)
791 {
792     /* Nothing to do on emulation, KVM will trap this in the kernel */
793     return H_SUCCESS;
794 }
795 
796 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr,
797                                    target_ulong opcode, target_ulong *args)
798 {
799     /* Nothing to do on emulation, KVM will trap this in the kernel */
800     return H_SUCCESS;
801 }
802 
803 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
804                                            target_ulong mflags,
805                                            target_ulong value1,
806                                            target_ulong value2)
807 {
808     CPUState *cs;
809 
810     if (value1) {
811         return H_P3;
812     }
813     if (value2) {
814         return H_P4;
815     }
816 
817     switch (mflags) {
818     case H_SET_MODE_ENDIAN_BIG:
819         CPU_FOREACH(cs) {
820             set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
821         }
822         spapr_pci_switch_vga(true);
823         return H_SUCCESS;
824 
825     case H_SET_MODE_ENDIAN_LITTLE:
826         CPU_FOREACH(cs) {
827             set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
828         }
829         spapr_pci_switch_vga(false);
830         return H_SUCCESS;
831     }
832 
833     return H_UNSUPPORTED_FLAG;
834 }
835 
836 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
837                                                         target_ulong mflags,
838                                                         target_ulong value1,
839                                                         target_ulong value2)
840 {
841     CPUState *cs;
842     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
843 
844     if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
845         return H_P2;
846     }
847     if (value1) {
848         return H_P3;
849     }
850     if (value2) {
851         return H_P4;
852     }
853 
854     if (mflags == AIL_RESERVED) {
855         return H_UNSUPPORTED_FLAG;
856     }
857 
858     CPU_FOREACH(cs) {
859         set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
860     }
861 
862     return H_SUCCESS;
863 }
864 
865 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr,
866                                target_ulong opcode, target_ulong *args)
867 {
868     target_ulong resource = args[1];
869     target_ulong ret = H_P2;
870 
871     switch (resource) {
872     case H_SET_MODE_RESOURCE_LE:
873         ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
874         break;
875     case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
876         ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
877                                                   args[2], args[3]);
878         break;
879     }
880 
881     return ret;
882 }
883 
884 typedef struct {
885     uint32_t cpu_version;
886     Error *err;
887 } SetCompatState;
888 
889 static void do_set_compat(CPUState *cs, run_on_cpu_data arg)
890 {
891     PowerPCCPU *cpu = POWERPC_CPU(cs);
892     SetCompatState *s = arg.host_ptr;
893 
894     cpu_synchronize_state(cs);
895     ppc_set_compat(cpu, s->cpu_version, &s->err);
896 }
897 
898 #define get_compat_level(cpuver) ( \
899     ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
900     ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
901     ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
902     ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
903 
904 static void cas_handle_compat_cpu(PowerPCCPUClass *pcc, uint32_t pvr,
905                                   unsigned max_lvl, unsigned *compat_lvl,
906                                   unsigned *cpu_version)
907 {
908     unsigned lvl = get_compat_level(pvr);
909     bool is205, is206, is207;
910 
911     if (!lvl) {
912         return;
913     }
914 
915     /* If it is a logical PVR, try to determine the highest level */
916     is205 = (pcc->pcr_supported & PCR_COMPAT_2_05) &&
917             (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
918     is206 = (pcc->pcr_supported & PCR_COMPAT_2_06) &&
919             ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
920              (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
921     is207 = (pcc->pcr_supported & PCR_COMPAT_2_07) &&
922             (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_07));
923 
924     if (is205 || is206 || is207) {
925         if (!max_lvl) {
926             /* User did not set the level, choose the highest */
927             if (*compat_lvl <= lvl) {
928                 *compat_lvl = lvl;
929                 *cpu_version = pvr;
930             }
931         } else if (max_lvl >= lvl) {
932             /* User chose the level, don't set higher than this */
933             *compat_lvl = lvl;
934             *cpu_version = pvr;
935         }
936     }
937 }
938 
939 static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
940                                                   sPAPRMachineState *spapr,
941                                                   target_ulong opcode,
942                                                   target_ulong *args)
943 {
944     target_ulong list = ppc64_phys_to_real(args[0]);
945     target_ulong ov_table;
946     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu_);
947     CPUState *cs;
948     bool cpu_match = false, cpu_update = true;
949     unsigned old_cpu_version = cpu_->cpu_version;
950     unsigned compat_lvl = 0, cpu_version = 0;
951     unsigned max_lvl = get_compat_level(cpu_->max_compat);
952     int counter;
953     sPAPROptionVector *ov5_guest, *ov5_cas_old, *ov5_updates;
954 
955     /* Parse PVR list */
956     for (counter = 0; counter < 512; ++counter) {
957         uint32_t pvr, pvr_mask;
958 
959         pvr_mask = ldl_be_phys(&address_space_memory, list);
960         list += 4;
961         pvr = ldl_be_phys(&address_space_memory, list);
962         list += 4;
963 
964         trace_spapr_cas_pvr_try(pvr);
965         if (!max_lvl &&
966             ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) {
967             cpu_match = true;
968             cpu_version = 0;
969         } else if (pvr == cpu_->cpu_version) {
970             cpu_match = true;
971             cpu_version = cpu_->cpu_version;
972         } else if (!cpu_match) {
973             cas_handle_compat_cpu(pcc, pvr, max_lvl, &compat_lvl, &cpu_version);
974         }
975         /* Terminator record */
976         if (~pvr_mask & pvr) {
977             break;
978         }
979     }
980 
981     /* Parsing finished */
982     trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match,
983                         cpu_version, pcc->pcr_mask);
984 
985     /* Update CPUs */
986     if (old_cpu_version != cpu_version) {
987         CPU_FOREACH(cs) {
988             SetCompatState s = {
989                 .cpu_version = cpu_version,
990                 .err = NULL,
991             };
992 
993             run_on_cpu(cs, do_set_compat, RUN_ON_CPU_HOST_PTR(&s));
994 
995             if (s.err) {
996                 error_report_err(s.err);
997                 return H_HARDWARE;
998             }
999         }
1000     }
1001 
1002     if (!cpu_version) {
1003         cpu_update = false;
1004     }
1005 
1006     /* For the future use: here @ov_table points to the first option vector */
1007     ov_table = list;
1008 
1009     ov5_guest = spapr_ovec_parse_vector(ov_table, 5);
1010 
1011     /* NOTE: there are actually a number of ov5 bits where input from the
1012      * guest is always zero, and the platform/QEMU enables them independently
1013      * of guest input. To model these properly we'd want some sort of mask,
1014      * but since they only currently apply to memory migration as defined
1015      * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need
1016      * to worry about this for now.
1017      */
1018     ov5_cas_old = spapr_ovec_clone(spapr->ov5_cas);
1019     /* full range of negotiated ov5 capabilities */
1020     spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest);
1021     spapr_ovec_cleanup(ov5_guest);
1022     /* capabilities that have been added since CAS-generated guest reset.
1023      * if capabilities have since been removed, generate another reset
1024      */
1025     ov5_updates = spapr_ovec_new();
1026     spapr->cas_reboot = spapr_ovec_diff(ov5_updates,
1027                                         ov5_cas_old, spapr->ov5_cas);
1028 
1029     if (!spapr->cas_reboot) {
1030         spapr->cas_reboot =
1031             (spapr_h_cas_compose_response(spapr, args[1], args[2], cpu_update,
1032                                           ov5_updates) != 0);
1033     }
1034     spapr_ovec_cleanup(ov5_updates);
1035 
1036     if (spapr->cas_reboot) {
1037         qemu_system_reset_request();
1038     }
1039 
1040     return H_SUCCESS;
1041 }
1042 
1043 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
1044 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
1045 
1046 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
1047 {
1048     spapr_hcall_fn *slot;
1049 
1050     if (opcode <= MAX_HCALL_OPCODE) {
1051         assert((opcode & 0x3) == 0);
1052 
1053         slot = &papr_hypercall_table[opcode / 4];
1054     } else {
1055         assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
1056 
1057         slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1058     }
1059 
1060     assert(!(*slot));
1061     *slot = fn;
1062 }
1063 
1064 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
1065                              target_ulong *args)
1066 {
1067     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1068 
1069     if ((opcode <= MAX_HCALL_OPCODE)
1070         && ((opcode & 0x3) == 0)) {
1071         spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
1072 
1073         if (fn) {
1074             return fn(cpu, spapr, opcode, args);
1075         }
1076     } else if ((opcode >= KVMPPC_HCALL_BASE) &&
1077                (opcode <= KVMPPC_HCALL_MAX)) {
1078         spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1079 
1080         if (fn) {
1081             return fn(cpu, spapr, opcode, args);
1082         }
1083     }
1084 
1085     qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
1086                   opcode);
1087     return H_FUNCTION;
1088 }
1089 
1090 static void hypercall_register_types(void)
1091 {
1092     /* hcall-pft */
1093     spapr_register_hypercall(H_ENTER, h_enter);
1094     spapr_register_hypercall(H_REMOVE, h_remove);
1095     spapr_register_hypercall(H_PROTECT, h_protect);
1096     spapr_register_hypercall(H_READ, h_read);
1097 
1098     /* hcall-bulk */
1099     spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
1100 
1101     /* hcall-splpar */
1102     spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
1103     spapr_register_hypercall(H_CEDE, h_cede);
1104 
1105     /* processor register resource access h-calls */
1106     spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
1107     spapr_register_hypercall(H_SET_DABR, h_set_dabr);
1108     spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
1109     spapr_register_hypercall(H_PAGE_INIT, h_page_init);
1110     spapr_register_hypercall(H_SET_MODE, h_set_mode);
1111 
1112     /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1113      * here between the "CI" and the "CACHE" variants, they will use whatever
1114      * mapping attributes qemu is using. When using KVM, the kernel will
1115      * enforce the attributes more strongly
1116      */
1117     spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
1118     spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
1119     spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
1120     spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1121     spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1122     spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1123     spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1124 
1125     /* qemu/KVM-PPC specific hcalls */
1126     spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1127 
1128     /* ibm,client-architecture-support support */
1129     spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1130 }
1131 
1132 type_init(hypercall_register_types)
1133