xref: /qemu/accel/tcg/tcg-accel-ops-icount.c (revision 3cc72cdb)
1 /*
2  * QEMU TCG Single Threaded vCPUs implementation using instruction counting
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2014 Red Hat Inc.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "sysemu/replay.h"
28 #include "sysemu/cpu-timers.h"
29 #include "qemu/main-loop.h"
30 #include "qemu/guest-random.h"
31 #include "exec/exec-all.h"
32 
33 #include "tcg-accel-ops.h"
34 #include "tcg-accel-ops-icount.h"
35 #include "tcg-accel-ops-rr.h"
36 
37 static int64_t icount_get_limit(void)
38 {
39     int64_t deadline;
40 
41     if (replay_mode != REPLAY_MODE_PLAY) {
42         /*
43          * Include all the timers, because they may need an attention.
44          * Too long CPU execution may create unnecessary delay in UI.
45          */
46         deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
47                                               QEMU_TIMER_ATTR_ALL);
48         /* Check realtime timers, because they help with input processing */
49         deadline = qemu_soonest_timeout(deadline,
50                 qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
51                                            QEMU_TIMER_ATTR_ALL));
52 
53         /*
54          * Maintain prior (possibly buggy) behaviour where if no deadline
55          * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
56          * INT32_MAX nanoseconds ahead, we still use INT32_MAX
57          * nanoseconds.
58          */
59         if ((deadline < 0) || (deadline > INT32_MAX)) {
60             deadline = INT32_MAX;
61         }
62 
63         return icount_round(deadline);
64     } else {
65         return replay_get_instructions();
66     }
67 }
68 
69 static void icount_notify_aio_contexts(void)
70 {
71     /* Wake up other AioContexts.  */
72     qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
73     qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
74 }
75 
76 void icount_handle_deadline(void)
77 {
78     assert(qemu_in_vcpu_thread());
79     int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
80                                                   QEMU_TIMER_ATTR_ALL);
81 
82     /*
83      * Instructions, interrupts, and exceptions are processed in cpu-exec.
84      * Don't interrupt cpu thread, when these events are waiting
85      * (i.e., there is no checkpoint)
86      */
87     if (deadline == 0) {
88         icount_notify_aio_contexts();
89     }
90 }
91 
92 /* Distribute the budget evenly across all CPUs */
93 int64_t icount_percpu_budget(int cpu_count)
94 {
95     int64_t limit = icount_get_limit();
96     int64_t timeslice = limit / cpu_count;
97 
98     if (timeslice == 0) {
99         timeslice = limit;
100     }
101 
102     return timeslice;
103 }
104 
105 void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
106 {
107     int insns_left;
108 
109     /*
110      * These should always be cleared by icount_process_data after
111      * each vCPU execution. However u16.high can be raised
112      * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
113      */
114     g_assert(cpu->neg.icount_decr.u16.low == 0);
115     g_assert(cpu->icount_extra == 0);
116 
117     replay_mutex_lock();
118 
119     cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
120     insns_left = MIN(0xffff, cpu->icount_budget);
121     cpu->neg.icount_decr.u16.low = insns_left;
122     cpu->icount_extra = cpu->icount_budget - insns_left;
123 
124     if (cpu->icount_budget == 0) {
125         /*
126          * We're called without the iothread lock, so must take it while
127          * we're calling timer handlers.
128          */
129         qemu_mutex_lock_iothread();
130         icount_notify_aio_contexts();
131         qemu_mutex_unlock_iothread();
132     }
133 }
134 
135 void icount_process_data(CPUState *cpu)
136 {
137     /* Account for executed instructions */
138     icount_update(cpu);
139 
140     /* Reset the counters */
141     cpu->neg.icount_decr.u16.low = 0;
142     cpu->icount_extra = 0;
143     cpu->icount_budget = 0;
144 
145     replay_account_executed_instructions();
146 
147     replay_mutex_unlock();
148 }
149 
150 void icount_handle_interrupt(CPUState *cpu, int mask)
151 {
152     int old_mask = cpu->interrupt_request;
153 
154     tcg_handle_interrupt(cpu, mask);
155     if (qemu_cpu_is_self(cpu) &&
156         !cpu->neg.can_do_io
157         && (mask & ~old_mask) != 0) {
158         cpu_abort(cpu, "Raised interrupt while not in I/O function");
159     }
160 }
161