1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch KVM
4 *
5 * Copyright (c) 2023 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include <sys/ioctl.h>
10 #include <linux/kvm.h>
11
12 #include "qemu/timer.h"
13 #include "qemu/error-report.h"
14 #include "qemu/main-loop.h"
15 #include "sysemu/sysemu.h"
16 #include "sysemu/kvm.h"
17 #include "sysemu/kvm_int.h"
18 #include "hw/pci/pci.h"
19 #include "exec/memattrs.h"
20 #include "exec/address-spaces.h"
21 #include "hw/boards.h"
22 #include "hw/irq.h"
23 #include "qemu/log.h"
24 #include "hw/loader.h"
25 #include "sysemu/runstate.h"
26 #include "cpu-csr.h"
27 #include "kvm_loongarch.h"
28 #include "trace.h"
29
30 static bool cap_has_mp_state;
31 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
32 KVM_CAP_LAST_INFO
33 };
34
kvm_loongarch_get_regs_core(CPUState * cs)35 static int kvm_loongarch_get_regs_core(CPUState *cs)
36 {
37 int ret = 0;
38 int i;
39 struct kvm_regs regs;
40 CPULoongArchState *env = cpu_env(cs);
41
42 /* Get the current register set as KVM seems it */
43 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
44 if (ret < 0) {
45 trace_kvm_failed_get_regs_core(strerror(errno));
46 return ret;
47 }
48 /* gpr[0] value is always 0 */
49 env->gpr[0] = 0;
50 for (i = 1; i < 32; i++) {
51 env->gpr[i] = regs.gpr[i];
52 }
53
54 env->pc = regs.pc;
55 return ret;
56 }
57
kvm_loongarch_put_regs_core(CPUState * cs)58 static int kvm_loongarch_put_regs_core(CPUState *cs)
59 {
60 int ret = 0;
61 int i;
62 struct kvm_regs regs;
63 CPULoongArchState *env = cpu_env(cs);
64
65 /* Set the registers based on QEMU's view of things */
66 for (i = 0; i < 32; i++) {
67 regs.gpr[i] = env->gpr[i];
68 }
69
70 regs.pc = env->pc;
71 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
72 if (ret < 0) {
73 trace_kvm_failed_put_regs_core(strerror(errno));
74 }
75
76 return ret;
77 }
78
kvm_loongarch_get_csr(CPUState * cs)79 static int kvm_loongarch_get_csr(CPUState *cs)
80 {
81 int ret = 0;
82 CPULoongArchState *env = cpu_env(cs);
83
84 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
85 &env->CSR_CRMD);
86
87 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
88 &env->CSR_PRMD);
89
90 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
91 &env->CSR_EUEN);
92
93 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
94 &env->CSR_MISC);
95
96 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
97 &env->CSR_ECFG);
98
99 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
100 &env->CSR_ESTAT);
101
102 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
103 &env->CSR_ERA);
104
105 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
106 &env->CSR_BADV);
107
108 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
109 &env->CSR_BADI);
110
111 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
112 &env->CSR_EENTRY);
113
114 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
115 &env->CSR_TLBIDX);
116
117 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
118 &env->CSR_TLBEHI);
119
120 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
121 &env->CSR_TLBELO0);
122
123 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
124 &env->CSR_TLBELO1);
125
126 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
127 &env->CSR_ASID);
128
129 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
130 &env->CSR_PGDL);
131
132 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
133 &env->CSR_PGDH);
134
135 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
136 &env->CSR_PGD);
137
138 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
139 &env->CSR_PWCL);
140
141 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
142 &env->CSR_PWCH);
143
144 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
145 &env->CSR_STLBPS);
146
147 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
148 &env->CSR_RVACFG);
149
150 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
151 &env->CSR_CPUID);
152
153 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
154 &env->CSR_PRCFG1);
155
156 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
157 &env->CSR_PRCFG2);
158
159 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
160 &env->CSR_PRCFG3);
161
162 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
163 &env->CSR_SAVE[0]);
164
165 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
166 &env->CSR_SAVE[1]);
167
168 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
169 &env->CSR_SAVE[2]);
170
171 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
172 &env->CSR_SAVE[3]);
173
174 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
175 &env->CSR_SAVE[4]);
176
177 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
178 &env->CSR_SAVE[5]);
179
180 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
181 &env->CSR_SAVE[6]);
182
183 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
184 &env->CSR_SAVE[7]);
185
186 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
187 &env->CSR_TID);
188
189 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
190 &env->CSR_CNTC);
191
192 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
193 &env->CSR_TICLR);
194
195 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
196 &env->CSR_LLBCTL);
197
198 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
199 &env->CSR_IMPCTL1);
200
201 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
202 &env->CSR_IMPCTL2);
203
204 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
205 &env->CSR_TLBRENTRY);
206
207 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
208 &env->CSR_TLBRBADV);
209
210 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
211 &env->CSR_TLBRERA);
212
213 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
214 &env->CSR_TLBRSAVE);
215
216 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
217 &env->CSR_TLBRELO0);
218
219 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
220 &env->CSR_TLBRELO1);
221
222 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
223 &env->CSR_TLBREHI);
224
225 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
226 &env->CSR_TLBRPRMD);
227
228 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
229 &env->CSR_DMW[0]);
230
231 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
232 &env->CSR_DMW[1]);
233
234 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
235 &env->CSR_DMW[2]);
236
237 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
238 &env->CSR_DMW[3]);
239
240 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
241 &env->CSR_TVAL);
242
243 ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
244 &env->CSR_TCFG);
245
246 return ret;
247 }
248
kvm_loongarch_put_csr(CPUState * cs,int level)249 static int kvm_loongarch_put_csr(CPUState *cs, int level)
250 {
251 int ret = 0;
252 CPULoongArchState *env = cpu_env(cs);
253
254 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
255 &env->CSR_CRMD);
256
257 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
258 &env->CSR_PRMD);
259
260 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
261 &env->CSR_EUEN);
262
263 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
264 &env->CSR_MISC);
265
266 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
267 &env->CSR_ECFG);
268
269 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
270 &env->CSR_ESTAT);
271
272 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
273 &env->CSR_ERA);
274
275 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
276 &env->CSR_BADV);
277
278 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
279 &env->CSR_BADI);
280
281 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
282 &env->CSR_EENTRY);
283
284 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
285 &env->CSR_TLBIDX);
286
287 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
288 &env->CSR_TLBEHI);
289
290 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
291 &env->CSR_TLBELO0);
292
293 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
294 &env->CSR_TLBELO1);
295
296 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
297 &env->CSR_ASID);
298
299 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
300 &env->CSR_PGDL);
301
302 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
303 &env->CSR_PGDH);
304
305 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
306 &env->CSR_PGD);
307
308 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
309 &env->CSR_PWCL);
310
311 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
312 &env->CSR_PWCH);
313
314 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
315 &env->CSR_STLBPS);
316
317 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
318 &env->CSR_RVACFG);
319
320 /* CPUID is constant after poweron, it should be set only once */
321 if (level >= KVM_PUT_FULL_STATE) {
322 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
323 &env->CSR_CPUID);
324 }
325
326 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
327 &env->CSR_PRCFG1);
328
329 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
330 &env->CSR_PRCFG2);
331
332 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
333 &env->CSR_PRCFG3);
334
335 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
336 &env->CSR_SAVE[0]);
337
338 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
339 &env->CSR_SAVE[1]);
340
341 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
342 &env->CSR_SAVE[2]);
343
344 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
345 &env->CSR_SAVE[3]);
346
347 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
348 &env->CSR_SAVE[4]);
349
350 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
351 &env->CSR_SAVE[5]);
352
353 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
354 &env->CSR_SAVE[6]);
355
356 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
357 &env->CSR_SAVE[7]);
358
359 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
360 &env->CSR_TID);
361
362 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
363 &env->CSR_CNTC);
364
365 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
366 &env->CSR_TICLR);
367
368 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
369 &env->CSR_LLBCTL);
370
371 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
372 &env->CSR_IMPCTL1);
373
374 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
375 &env->CSR_IMPCTL2);
376
377 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
378 &env->CSR_TLBRENTRY);
379
380 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
381 &env->CSR_TLBRBADV);
382
383 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
384 &env->CSR_TLBRERA);
385
386 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
387 &env->CSR_TLBRSAVE);
388
389 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
390 &env->CSR_TLBRELO0);
391
392 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
393 &env->CSR_TLBRELO1);
394
395 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
396 &env->CSR_TLBREHI);
397
398 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
399 &env->CSR_TLBRPRMD);
400
401 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
402 &env->CSR_DMW[0]);
403
404 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
405 &env->CSR_DMW[1]);
406
407 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
408 &env->CSR_DMW[2]);
409
410 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
411 &env->CSR_DMW[3]);
412 /*
413 * timer cfg must be put at last since it is used to enable
414 * guest timer
415 */
416 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
417 &env->CSR_TVAL);
418
419 ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
420 &env->CSR_TCFG);
421 return ret;
422 }
423
kvm_loongarch_get_regs_fp(CPUState * cs)424 static int kvm_loongarch_get_regs_fp(CPUState *cs)
425 {
426 int ret, i;
427 struct kvm_fpu fpu;
428 CPULoongArchState *env = cpu_env(cs);
429
430 ret = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
431 if (ret < 0) {
432 trace_kvm_failed_get_fpu(strerror(errno));
433 return ret;
434 }
435
436 env->fcsr0 = fpu.fcsr;
437 for (i = 0; i < 32; i++) {
438 env->fpr[i].vreg.UD[0] = fpu.fpr[i].val64[0];
439 }
440 for (i = 0; i < 8; i++) {
441 env->cf[i] = fpu.fcc & 0xFF;
442 fpu.fcc = fpu.fcc >> 8;
443 }
444
445 return ret;
446 }
447
kvm_loongarch_put_regs_fp(CPUState * cs)448 static int kvm_loongarch_put_regs_fp(CPUState *cs)
449 {
450 int ret, i;
451 struct kvm_fpu fpu;
452 CPULoongArchState *env = cpu_env(cs);
453
454 fpu.fcsr = env->fcsr0;
455 fpu.fcc = 0;
456 for (i = 0; i < 32; i++) {
457 fpu.fpr[i].val64[0] = env->fpr[i].vreg.UD[0];
458 }
459
460 for (i = 0; i < 8; i++) {
461 fpu.fcc |= env->cf[i] << (8 * i);
462 }
463
464 ret = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
465 if (ret < 0) {
466 trace_kvm_failed_put_fpu(strerror(errno));
467 }
468
469 return ret;
470 }
471
kvm_arch_reset_vcpu(CPULoongArchState * env)472 void kvm_arch_reset_vcpu(CPULoongArchState *env)
473 {
474 env->mp_state = KVM_MP_STATE_RUNNABLE;
475 }
476
kvm_loongarch_get_mpstate(CPUState * cs)477 static int kvm_loongarch_get_mpstate(CPUState *cs)
478 {
479 int ret = 0;
480 struct kvm_mp_state mp_state;
481 CPULoongArchState *env = cpu_env(cs);
482
483 if (cap_has_mp_state) {
484 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
485 if (ret) {
486 trace_kvm_failed_get_mpstate(strerror(errno));
487 return ret;
488 }
489 env->mp_state = mp_state.mp_state;
490 }
491
492 return ret;
493 }
494
kvm_loongarch_put_mpstate(CPUState * cs)495 static int kvm_loongarch_put_mpstate(CPUState *cs)
496 {
497 int ret = 0;
498 struct kvm_mp_state mp_state = {
499 .mp_state = cpu_env(cs)->mp_state
500 };
501
502 if (cap_has_mp_state) {
503 ret = kvm_vcpu_ioctl(cs, KVM_SET_MP_STATE, &mp_state);
504 if (ret) {
505 trace_kvm_failed_put_mpstate(strerror(errno));
506 }
507 }
508
509 return ret;
510 }
511
kvm_loongarch_get_cpucfg(CPUState * cs)512 static int kvm_loongarch_get_cpucfg(CPUState *cs)
513 {
514 int i, ret = 0;
515 uint64_t val;
516 CPULoongArchState *env = cpu_env(cs);
517
518 for (i = 0; i < 21; i++) {
519 ret = kvm_get_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
520 if (ret < 0) {
521 trace_kvm_failed_get_cpucfg(strerror(errno));
522 }
523 env->cpucfg[i] = (uint32_t)val;
524 }
525 return ret;
526 }
527
kvm_check_cpucfg2(CPUState * cs)528 static int kvm_check_cpucfg2(CPUState *cs)
529 {
530 int ret;
531 uint64_t val;
532 struct kvm_device_attr attr = {
533 .group = KVM_LOONGARCH_VCPU_CPUCFG,
534 .attr = 2,
535 .addr = (uint64_t)&val,
536 };
537 CPULoongArchState *env = cpu_env(cs);
538
539 ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr);
540
541 if (!ret) {
542 kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr);
543 env->cpucfg[2] &= val;
544
545 if (FIELD_EX32(env->cpucfg[2], CPUCFG2, FP)) {
546 /* The FP minimal version is 1. */
547 env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, FP_VER, 1);
548 }
549
550 if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LLFTP)) {
551 /* The LLFTP minimal version is 1. */
552 env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LLFTP_VER, 1);
553 }
554 }
555
556 return ret;
557 }
558
kvm_loongarch_put_cpucfg(CPUState * cs)559 static int kvm_loongarch_put_cpucfg(CPUState *cs)
560 {
561 int i, ret = 0;
562 CPULoongArchState *env = cpu_env(cs);
563 uint64_t val;
564
565 for (i = 0; i < 21; i++) {
566 if (i == 2) {
567 ret = kvm_check_cpucfg2(cs);
568 if (ret) {
569 return ret;
570 }
571 }
572 val = env->cpucfg[i];
573 ret = kvm_set_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
574 if (ret < 0) {
575 trace_kvm_failed_put_cpucfg(strerror(errno));
576 }
577 }
578 return ret;
579 }
580
kvm_arch_get_registers(CPUState * cs)581 int kvm_arch_get_registers(CPUState *cs)
582 {
583 int ret;
584
585 ret = kvm_loongarch_get_regs_core(cs);
586 if (ret) {
587 return ret;
588 }
589
590 ret = kvm_loongarch_get_csr(cs);
591 if (ret) {
592 return ret;
593 }
594
595 ret = kvm_loongarch_get_regs_fp(cs);
596 if (ret) {
597 return ret;
598 }
599
600 ret = kvm_loongarch_get_mpstate(cs);
601 if (ret) {
602 return ret;
603 }
604
605 ret = kvm_loongarch_get_cpucfg(cs);
606 return ret;
607 }
608
kvm_arch_put_registers(CPUState * cs,int level)609 int kvm_arch_put_registers(CPUState *cs, int level)
610 {
611 int ret;
612
613 ret = kvm_loongarch_put_regs_core(cs);
614 if (ret) {
615 return ret;
616 }
617
618 ret = kvm_loongarch_put_csr(cs, level);
619 if (ret) {
620 return ret;
621 }
622
623 ret = kvm_loongarch_put_regs_fp(cs);
624 if (ret) {
625 return ret;
626 }
627
628 ret = kvm_loongarch_put_mpstate(cs);
629 if (ret) {
630 return ret;
631 }
632
633 ret = kvm_loongarch_put_cpucfg(cs);
634 return ret;
635 }
636
kvm_loongarch_vm_stage_change(void * opaque,bool running,RunState state)637 static void kvm_loongarch_vm_stage_change(void *opaque, bool running,
638 RunState state)
639 {
640 int ret;
641 CPUState *cs = opaque;
642 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
643
644 if (running) {
645 ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
646 &cpu->kvm_state_counter);
647 if (ret < 0) {
648 trace_kvm_failed_put_counter(strerror(errno));
649 }
650 } else {
651 ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
652 &cpu->kvm_state_counter);
653 if (ret < 0) {
654 trace_kvm_failed_get_counter(strerror(errno));
655 }
656 }
657 }
658
kvm_arch_init_vcpu(CPUState * cs)659 int kvm_arch_init_vcpu(CPUState *cs)
660 {
661 qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs);
662 return 0;
663 }
664
kvm_arch_destroy_vcpu(CPUState * cs)665 int kvm_arch_destroy_vcpu(CPUState *cs)
666 {
667 return 0;
668 }
669
kvm_arch_vcpu_id(CPUState * cs)670 unsigned long kvm_arch_vcpu_id(CPUState *cs)
671 {
672 return cs->cpu_index;
673 }
674
kvm_arch_release_virq_post(int virq)675 int kvm_arch_release_virq_post(int virq)
676 {
677 return 0;
678 }
679
kvm_arch_msi_data_to_gsi(uint32_t data)680 int kvm_arch_msi_data_to_gsi(uint32_t data)
681 {
682 abort();
683 }
684
kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry * route,uint64_t address,uint32_t data,PCIDevice * dev)685 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
686 uint64_t address, uint32_t data, PCIDevice *dev)
687 {
688 return 0;
689 }
690
kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry * route,int vector,PCIDevice * dev)691 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
692 int vector, PCIDevice *dev)
693 {
694 return 0;
695 }
696
kvm_arch_init_irq_routing(KVMState * s)697 void kvm_arch_init_irq_routing(KVMState *s)
698 {
699 }
700
kvm_arch_get_default_type(MachineState * ms)701 int kvm_arch_get_default_type(MachineState *ms)
702 {
703 return 0;
704 }
705
kvm_arch_init(MachineState * ms,KVMState * s)706 int kvm_arch_init(MachineState *ms, KVMState *s)
707 {
708 cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
709 return 0;
710 }
711
kvm_arch_irqchip_create(KVMState * s)712 int kvm_arch_irqchip_create(KVMState *s)
713 {
714 return 0;
715 }
716
kvm_arch_pre_run(CPUState * cs,struct kvm_run * run)717 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
718 {
719 }
720
kvm_arch_post_run(CPUState * cs,struct kvm_run * run)721 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
722 {
723 return MEMTXATTRS_UNSPECIFIED;
724 }
725
kvm_arch_process_async_events(CPUState * cs)726 int kvm_arch_process_async_events(CPUState *cs)
727 {
728 return cs->halted;
729 }
730
kvm_arch_stop_on_emulation_error(CPUState * cs)731 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
732 {
733 return true;
734 }
735
kvm_arch_handle_exit(CPUState * cs,struct kvm_run * run)736 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
737 {
738 int ret = 0;
739 CPULoongArchState *env = cpu_env(cs);
740 MemTxAttrs attrs = {};
741
742 attrs.requester_id = env_cpu(env)->cpu_index;
743
744 trace_kvm_arch_handle_exit(run->exit_reason);
745 switch (run->exit_reason) {
746 case KVM_EXIT_LOONGARCH_IOCSR:
747 address_space_rw(env->address_space_iocsr,
748 run->iocsr_io.phys_addr,
749 attrs,
750 run->iocsr_io.data,
751 run->iocsr_io.len,
752 run->iocsr_io.is_write);
753 break;
754 default:
755 ret = -1;
756 warn_report("KVM: unknown exit reason %d", run->exit_reason);
757 break;
758 }
759 return ret;
760 }
761
kvm_loongarch_set_interrupt(LoongArchCPU * cpu,int irq,int level)762 int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level)
763 {
764 struct kvm_interrupt intr;
765 CPUState *cs = CPU(cpu);
766
767 if (level) {
768 intr.irq = irq;
769 } else {
770 intr.irq = -irq;
771 }
772
773 trace_kvm_set_intr(irq, level);
774 return kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
775 }
776
kvm_arch_accel_class_init(ObjectClass * oc)777 void kvm_arch_accel_class_init(ObjectClass *oc)
778 {
779 }
780