xref: /qemu/migration/dirtyrate.c (revision 78f314cf)
1 /*
2  * Dirtyrate implement code
3  *
4  * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5  *
6  * Authors:
7  *  Chuan Zheng <zhengchuan@huawei.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
15 #include <zlib.h>
16 #include "hw/core/cpu.h"
17 #include "qapi/error.h"
18 #include "exec/ramblock.h"
19 #include "exec/target_page.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/main-loop.h"
22 #include "qapi/qapi-commands-migration.h"
23 #include "ram.h"
24 #include "trace.h"
25 #include "dirtyrate.h"
26 #include "monitor/hmp.h"
27 #include "monitor/monitor.h"
28 #include "qapi/qmp/qdict.h"
29 #include "sysemu/kvm.h"
30 #include "sysemu/runstate.h"
31 #include "exec/memory.h"
32 #include "qemu/xxhash.h"
33 
34 /*
35  * total_dirty_pages is procted by BQL and is used
36  * to stat dirty pages during the period of two
37  * memory_global_dirty_log_sync
38  */
39 uint64_t total_dirty_pages;
40 
41 typedef struct DirtyPageRecord {
42     uint64_t start_pages;
43     uint64_t end_pages;
44 } DirtyPageRecord;
45 
46 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
47 static struct DirtyRateStat DirtyStat;
48 static DirtyRateMeasureMode dirtyrate_mode =
49                 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
50 
51 static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time)
52 {
53     int64_t current_time;
54 
55     current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56     if ((current_time - initial_time) >= msec) {
57         msec = current_time - initial_time;
58     } else {
59         g_usleep((msec + initial_time - current_time) * 1000);
60     }
61 
62     return msec;
63 }
64 
65 static inline void record_dirtypages(DirtyPageRecord *dirty_pages,
66                                      CPUState *cpu, bool start)
67 {
68     if (start) {
69         dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages;
70     } else {
71         dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages;
72     }
73 }
74 
75 static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
76                                       int64_t calc_time_ms)
77 {
78     uint64_t increased_dirty_pages =
79         dirty_pages.end_pages - dirty_pages.start_pages;
80     uint64_t memory_size_MiB = qemu_target_pages_to_MiB(increased_dirty_pages);
81 
82     return memory_size_MiB * 1000 / calc_time_ms;
83 }
84 
85 void global_dirty_log_change(unsigned int flag, bool start)
86 {
87     qemu_mutex_lock_iothread();
88     if (start) {
89         memory_global_dirty_log_start(flag);
90     } else {
91         memory_global_dirty_log_stop(flag);
92     }
93     qemu_mutex_unlock_iothread();
94 }
95 
96 /*
97  * global_dirty_log_sync
98  * 1. sync dirty log from kvm
99  * 2. stop dirty tracking if needed.
100  */
101 static void global_dirty_log_sync(unsigned int flag, bool one_shot)
102 {
103     qemu_mutex_lock_iothread();
104     memory_global_dirty_log_sync(false);
105     if (one_shot) {
106         memory_global_dirty_log_stop(flag);
107     }
108     qemu_mutex_unlock_iothread();
109 }
110 
111 static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat)
112 {
113     CPUState *cpu;
114     int nvcpu = 0;
115 
116     CPU_FOREACH(cpu) {
117         nvcpu++;
118     }
119 
120     stat->nvcpu = nvcpu;
121     stat->rates = g_new0(DirtyRateVcpu, nvcpu);
122 
123     return g_new0(DirtyPageRecord, nvcpu);
124 }
125 
126 static void vcpu_dirty_stat_collect(VcpuStat *stat,
127                                     DirtyPageRecord *records,
128                                     bool start)
129 {
130     CPUState *cpu;
131 
132     CPU_FOREACH(cpu) {
133         record_dirtypages(records, cpu, start);
134     }
135 }
136 
137 int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
138                                  VcpuStat *stat,
139                                  unsigned int flag,
140                                  bool one_shot)
141 {
142     DirtyPageRecord *records;
143     int64_t init_time_ms;
144     int64_t duration;
145     int64_t dirtyrate;
146     int i = 0;
147     unsigned int gen_id;
148 
149 retry:
150     init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
151 
152     WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
153         gen_id = cpu_list_generation_id_get();
154         records = vcpu_dirty_stat_alloc(stat);
155         vcpu_dirty_stat_collect(stat, records, true);
156     }
157 
158     duration = dirty_stat_wait(calc_time_ms, init_time_ms);
159 
160     global_dirty_log_sync(flag, one_shot);
161 
162     WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
163         if (gen_id != cpu_list_generation_id_get()) {
164             g_free(records);
165             g_free(stat->rates);
166             cpu_list_unlock();
167             goto retry;
168         }
169         vcpu_dirty_stat_collect(stat, records, false);
170     }
171 
172     for (i = 0; i < stat->nvcpu; i++) {
173         dirtyrate = do_calculate_dirtyrate(records[i], duration);
174 
175         stat->rates[i].id = i;
176         stat->rates[i].dirty_rate = dirtyrate;
177 
178         trace_dirtyrate_do_calculate_vcpu(i, dirtyrate);
179     }
180 
181     g_free(records);
182 
183     return duration;
184 }
185 
186 static bool is_sample_period_valid(int64_t sec)
187 {
188     if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC ||
189         sec > MAX_FETCH_DIRTYRATE_TIME_SEC) {
190         return false;
191     }
192 
193     return true;
194 }
195 
196 static bool is_sample_pages_valid(int64_t pages)
197 {
198     return pages >= MIN_SAMPLE_PAGE_COUNT &&
199            pages <= MAX_SAMPLE_PAGE_COUNT;
200 }
201 
202 static int dirtyrate_set_state(int *state, int old_state, int new_state)
203 {
204     assert(new_state < DIRTY_RATE_STATUS__MAX);
205     trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
206     if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
207         return 0;
208     } else {
209         return -1;
210     }
211 }
212 
213 static struct DirtyRateInfo *query_dirty_rate_info(void)
214 {
215     int i;
216     int64_t dirty_rate = DirtyStat.dirty_rate;
217     struct DirtyRateInfo *info = g_new0(DirtyRateInfo, 1);
218     DirtyRateVcpuList *head = NULL, **tail = &head;
219 
220     info->status = CalculatingState;
221     info->start_time = DirtyStat.start_time;
222     info->calc_time = DirtyStat.calc_time;
223     info->sample_pages = DirtyStat.sample_pages;
224     info->mode = dirtyrate_mode;
225 
226     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
227         info->has_dirty_rate = true;
228         info->dirty_rate = dirty_rate;
229 
230         if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
231             /*
232              * set sample_pages with 0 to indicate page sampling
233              * isn't enabled
234              **/
235             info->sample_pages = 0;
236             info->has_vcpu_dirty_rate = true;
237             for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
238                 DirtyRateVcpu *rate = g_new0(DirtyRateVcpu, 1);
239                 rate->id = DirtyStat.dirty_ring.rates[i].id;
240                 rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate;
241                 QAPI_LIST_APPEND(tail, rate);
242             }
243             info->vcpu_dirty_rate = head;
244         }
245 
246         if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
247             info->sample_pages = 0;
248         }
249     }
250 
251     trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
252 
253     return info;
254 }
255 
256 static void init_dirtyrate_stat(int64_t start_time,
257                                 struct DirtyRateConfig config)
258 {
259     DirtyStat.dirty_rate = -1;
260     DirtyStat.start_time = start_time;
261     DirtyStat.calc_time = config.sample_period_seconds;
262     DirtyStat.sample_pages = config.sample_pages_per_gigabytes;
263 
264     switch (config.mode) {
265     case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING:
266         DirtyStat.page_sampling.total_dirty_samples = 0;
267         DirtyStat.page_sampling.total_sample_count = 0;
268         DirtyStat.page_sampling.total_block_mem_MB = 0;
269         break;
270     case DIRTY_RATE_MEASURE_MODE_DIRTY_RING:
271         DirtyStat.dirty_ring.nvcpu = -1;
272         DirtyStat.dirty_ring.rates = NULL;
273         break;
274     default:
275         break;
276     }
277 }
278 
279 static void cleanup_dirtyrate_stat(struct DirtyRateConfig config)
280 {
281     /* last calc-dirty-rate qmp use dirty ring mode */
282     if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
283         free(DirtyStat.dirty_ring.rates);
284         DirtyStat.dirty_ring.rates = NULL;
285     }
286 }
287 
288 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
289 {
290     DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
291     DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
292     /* size of total pages in MB */
293     DirtyStat.page_sampling.total_block_mem_MB +=
294         qemu_target_pages_to_MiB(info->ramblock_pages);
295 }
296 
297 static void update_dirtyrate(uint64_t msec)
298 {
299     uint64_t dirtyrate;
300     uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples;
301     uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count;
302     uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB;
303 
304     dirtyrate = total_dirty_samples * total_block_mem_MB *
305                 1000 / (total_sample_count * msec);
306 
307     DirtyStat.dirty_rate = dirtyrate;
308 }
309 
310 /*
311  * Compute hash of a single page of size TARGET_PAGE_SIZE.
312  */
313 static uint32_t compute_page_hash(void *ptr)
314 {
315     size_t page_size = qemu_target_page_size();
316     uint32_t i;
317     uint64_t v1, v2, v3, v4;
318     uint64_t res;
319     const uint64_t *p = ptr;
320 
321     v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
322     v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
323     v3 = QEMU_XXHASH_SEED + 0;
324     v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
325     for (i = 0; i < page_size / 8; i += 4) {
326         v1 = XXH64_round(v1, p[i + 0]);
327         v2 = XXH64_round(v2, p[i + 1]);
328         v3 = XXH64_round(v3, p[i + 2]);
329         v4 = XXH64_round(v4, p[i + 3]);
330     }
331     res = XXH64_mergerounds(v1, v2, v3, v4);
332     res += page_size;
333     res = XXH64_avalanche(res);
334     return (uint32_t)(res & UINT32_MAX);
335 }
336 
337 
338 /*
339  * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
340  * in ramblock, which starts from ramblock base address.
341  */
342 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
343                                       uint64_t vfn)
344 {
345     uint32_t hash;
346 
347     hash = compute_page_hash(info->ramblock_addr +
348                              vfn * qemu_target_page_size());
349 
350     trace_get_ramblock_vfn_hash(info->idstr, vfn, hash);
351     return hash;
352 }
353 
354 static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
355 {
356     unsigned int sample_pages_count;
357     int i;
358     GRand *rand;
359 
360     sample_pages_count = info->sample_pages_count;
361 
362     /* ramblock size less than one page, return success to skip this ramblock */
363     if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
364         return true;
365     }
366 
367     info->hash_result = g_try_malloc0_n(sample_pages_count,
368                                         sizeof(uint32_t));
369     if (!info->hash_result) {
370         return false;
371     }
372 
373     info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
374                                             sizeof(uint64_t));
375     if (!info->sample_page_vfn) {
376         g_free(info->hash_result);
377         return false;
378     }
379 
380     rand  = g_rand_new();
381     for (i = 0; i < sample_pages_count; i++) {
382         info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
383                                                     info->ramblock_pages - 1);
384         info->hash_result[i] = get_ramblock_vfn_hash(info,
385                                                      info->sample_page_vfn[i]);
386     }
387     g_rand_free(rand);
388 
389     return true;
390 }
391 
392 static void get_ramblock_dirty_info(RAMBlock *block,
393                                     struct RamblockDirtyInfo *info,
394                                     struct DirtyRateConfig *config)
395 {
396     uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
397 
398     /* Right shift 30 bits to calc ramblock size in GB */
399     info->sample_pages_count = (qemu_ram_get_used_length(block) *
400                                 sample_pages_per_gigabytes) >> 30;
401     /* Right shift TARGET_PAGE_BITS to calc page count */
402     info->ramblock_pages = qemu_ram_get_used_length(block) >>
403                            qemu_target_page_bits();
404     info->ramblock_addr = qemu_ram_get_host_addr(block);
405     strcpy(info->idstr, qemu_ram_get_idstr(block));
406 }
407 
408 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
409 {
410     int i;
411 
412     if (!infos) {
413         return;
414     }
415 
416     for (i = 0; i < count; i++) {
417         g_free(infos[i].sample_page_vfn);
418         g_free(infos[i].hash_result);
419     }
420     g_free(infos);
421 }
422 
423 static bool skip_sample_ramblock(RAMBlock *block)
424 {
425     /*
426      * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
427      */
428     if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
429         trace_skip_sample_ramblock(block->idstr,
430                                    qemu_ram_get_used_length(block));
431         return true;
432     }
433 
434     return false;
435 }
436 
437 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
438                                       struct DirtyRateConfig config,
439                                       int *block_count)
440 {
441     struct RamblockDirtyInfo *info = NULL;
442     struct RamblockDirtyInfo *dinfo = NULL;
443     RAMBlock *block = NULL;
444     int total_count = 0;
445     int index = 0;
446     bool ret = false;
447 
448     RAMBLOCK_FOREACH_MIGRATABLE(block) {
449         if (skip_sample_ramblock(block)) {
450             continue;
451         }
452         total_count++;
453     }
454 
455     dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
456     if (dinfo == NULL) {
457         goto out;
458     }
459 
460     RAMBLOCK_FOREACH_MIGRATABLE(block) {
461         if (skip_sample_ramblock(block)) {
462             continue;
463         }
464         if (index >= total_count) {
465             break;
466         }
467         info = &dinfo[index];
468         get_ramblock_dirty_info(block, info, &config);
469         if (!save_ramblock_hash(info)) {
470             goto out;
471         }
472         index++;
473     }
474     ret = true;
475 
476 out:
477     *block_count = index;
478     *block_dinfo = dinfo;
479     return ret;
480 }
481 
482 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
483 {
484     uint32_t hash;
485     int i;
486 
487     for (i = 0; i < info->sample_pages_count; i++) {
488         hash = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
489         if (hash != info->hash_result[i]) {
490             trace_calc_page_dirty_rate(info->idstr, hash, info->hash_result[i]);
491             info->sample_dirty_count++;
492         }
493     }
494 }
495 
496 static struct RamblockDirtyInfo *
497 find_block_matched(RAMBlock *block, int count,
498                   struct RamblockDirtyInfo *infos)
499 {
500     int i;
501 
502     for (i = 0; i < count; i++) {
503         if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
504             break;
505         }
506     }
507 
508     if (i == count) {
509         return NULL;
510     }
511 
512     if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
513         infos[i].ramblock_pages !=
514             (qemu_ram_get_used_length(block) >> qemu_target_page_bits())) {
515         trace_find_page_matched(block->idstr);
516         return NULL;
517     }
518 
519     return &infos[i];
520 }
521 
522 static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
523                                   int block_count)
524 {
525     struct RamblockDirtyInfo *block_dinfo = NULL;
526     RAMBlock *block = NULL;
527 
528     RAMBLOCK_FOREACH_MIGRATABLE(block) {
529         if (skip_sample_ramblock(block)) {
530             continue;
531         }
532         block_dinfo = find_block_matched(block, block_count, info);
533         if (block_dinfo == NULL) {
534             continue;
535         }
536         calc_page_dirty_rate(block_dinfo);
537         update_dirtyrate_stat(block_dinfo);
538     }
539 
540     if (DirtyStat.page_sampling.total_sample_count == 0) {
541         return false;
542     }
543 
544     return true;
545 }
546 
547 static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages,
548                                             bool start)
549 {
550     if (start) {
551         dirty_pages->start_pages = total_dirty_pages;
552     } else {
553         dirty_pages->end_pages = total_dirty_pages;
554     }
555 }
556 
557 static inline void dirtyrate_manual_reset_protect(void)
558 {
559     RAMBlock *block = NULL;
560 
561     WITH_RCU_READ_LOCK_GUARD() {
562         RAMBLOCK_FOREACH_MIGRATABLE(block) {
563             memory_region_clear_dirty_bitmap(block->mr, 0,
564                                              block->used_length);
565         }
566     }
567 }
568 
569 static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
570 {
571     int64_t msec = 0;
572     int64_t start_time;
573     DirtyPageRecord dirty_pages;
574 
575     qemu_mutex_lock_iothread();
576     memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
577 
578     /*
579      * 1'round of log sync may return all 1 bits with
580      * KVM_DIRTY_LOG_INITIALLY_SET enable
581      * skip it unconditionally and start dirty tracking
582      * from 2'round of log sync
583      */
584     memory_global_dirty_log_sync(false);
585 
586     /*
587      * reset page protect manually and unconditionally.
588      * this make sure kvm dirty log be cleared if
589      * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
590      */
591     dirtyrate_manual_reset_protect();
592     qemu_mutex_unlock_iothread();
593 
594     record_dirtypages_bitmap(&dirty_pages, true);
595 
596     start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
597     DirtyStat.start_time = start_time / 1000;
598 
599     msec = config.sample_period_seconds * 1000;
600     msec = dirty_stat_wait(msec, start_time);
601     DirtyStat.calc_time = msec / 1000;
602 
603     /*
604      * do two things.
605      * 1. fetch dirty bitmap from kvm
606      * 2. stop dirty tracking
607      */
608     global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true);
609 
610     record_dirtypages_bitmap(&dirty_pages, false);
611 
612     DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages, msec);
613 }
614 
615 static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config)
616 {
617     int64_t duration;
618     uint64_t dirtyrate = 0;
619     uint64_t dirtyrate_sum = 0;
620     int i = 0;
621 
622     /* start log sync */
623     global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true);
624 
625     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
626 
627     /* calculate vcpu dirtyrate */
628     duration = vcpu_calculate_dirtyrate(config.sample_period_seconds * 1000,
629                                         &DirtyStat.dirty_ring,
630                                         GLOBAL_DIRTY_DIRTY_RATE,
631                                         true);
632 
633     DirtyStat.calc_time = duration / 1000;
634 
635     /* calculate vm dirtyrate */
636     for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
637         dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate;
638         DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate;
639         dirtyrate_sum += dirtyrate;
640     }
641 
642     DirtyStat.dirty_rate = dirtyrate_sum;
643 }
644 
645 static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config)
646 {
647     struct RamblockDirtyInfo *block_dinfo = NULL;
648     int block_count = 0;
649     int64_t msec = 0;
650     int64_t initial_time;
651 
652     rcu_read_lock();
653     initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
654     if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
655         goto out;
656     }
657     rcu_read_unlock();
658 
659     msec = config.sample_period_seconds * 1000;
660     msec = dirty_stat_wait(msec, initial_time);
661     DirtyStat.start_time = initial_time / 1000;
662     DirtyStat.calc_time = msec / 1000;
663 
664     rcu_read_lock();
665     if (!compare_page_hash_info(block_dinfo, block_count)) {
666         goto out;
667     }
668 
669     update_dirtyrate(msec);
670 
671 out:
672     rcu_read_unlock();
673     free_ramblock_dirty_info(block_dinfo, block_count);
674 }
675 
676 static void calculate_dirtyrate(struct DirtyRateConfig config)
677 {
678     if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
679         calculate_dirtyrate_dirty_bitmap(config);
680     } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
681         calculate_dirtyrate_dirty_ring(config);
682     } else {
683         calculate_dirtyrate_sample_vm(config);
684     }
685 
686     trace_dirtyrate_calculate(DirtyStat.dirty_rate);
687 }
688 
689 void *get_dirtyrate_thread(void *arg)
690 {
691     struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
692     int ret;
693     rcu_register_thread();
694 
695     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
696                               DIRTY_RATE_STATUS_MEASURING);
697     if (ret == -1) {
698         error_report("change dirtyrate state failed.");
699         return NULL;
700     }
701 
702     calculate_dirtyrate(config);
703 
704     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
705                               DIRTY_RATE_STATUS_MEASURED);
706     if (ret == -1) {
707         error_report("change dirtyrate state failed.");
708     }
709 
710     rcu_unregister_thread();
711     return NULL;
712 }
713 
714 void qmp_calc_dirty_rate(int64_t calc_time,
715                          bool has_sample_pages,
716                          int64_t sample_pages,
717                          bool has_mode,
718                          DirtyRateMeasureMode mode,
719                          Error **errp)
720 {
721     static struct DirtyRateConfig config;
722     QemuThread thread;
723     int ret;
724     int64_t start_time;
725 
726     /*
727      * If the dirty rate is already being measured, don't attempt to start.
728      */
729     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
730         error_setg(errp, "the dirty rate is already being measured.");
731         return;
732     }
733 
734     if (!is_sample_period_valid(calc_time)) {
735         error_setg(errp, "calc-time is out of range[%d, %d].",
736                          MIN_FETCH_DIRTYRATE_TIME_SEC,
737                          MAX_FETCH_DIRTYRATE_TIME_SEC);
738         return;
739     }
740 
741     if (!has_mode) {
742         mode =  DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
743     }
744 
745     if (has_sample_pages && mode != DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
746         error_setg(errp, "sample-pages is used only in page-sampling mode");
747         return;
748     }
749 
750     if (has_sample_pages) {
751         if (!is_sample_pages_valid(sample_pages)) {
752             error_setg(errp, "sample-pages is out of range[%d, %d].",
753                             MIN_SAMPLE_PAGE_COUNT,
754                             MAX_SAMPLE_PAGE_COUNT);
755             return;
756         }
757     } else {
758         sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
759     }
760 
761     /*
762      * dirty ring mode only works when kvm dirty ring is enabled.
763      * on the contrary, dirty bitmap mode is not.
764      */
765     if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) &&
766         !kvm_dirty_ring_enabled()) ||
767         ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) &&
768          kvm_dirty_ring_enabled())) {
769         error_setg(errp, "mode %s is not enabled, use other method instead.",
770                          DirtyRateMeasureMode_str(mode));
771          return;
772     }
773 
774     /*
775      * Init calculation state as unstarted.
776      */
777     ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
778                               DIRTY_RATE_STATUS_UNSTARTED);
779     if (ret == -1) {
780         error_setg(errp, "init dirty rate calculation state failed.");
781         return;
782     }
783 
784     config.sample_period_seconds = calc_time;
785     config.sample_pages_per_gigabytes = sample_pages;
786     config.mode = mode;
787 
788     cleanup_dirtyrate_stat(config);
789 
790     /*
791      * update dirty rate mode so that we can figure out what mode has
792      * been used in last calculation
793      **/
794     dirtyrate_mode = mode;
795 
796     start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
797     init_dirtyrate_stat(start_time, config);
798 
799     qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
800                        (void *)&config, QEMU_THREAD_DETACHED);
801 }
802 
803 struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp)
804 {
805     return query_dirty_rate_info();
806 }
807 
808 void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict)
809 {
810     DirtyRateInfo *info = query_dirty_rate_info();
811 
812     monitor_printf(mon, "Status: %s\n",
813                    DirtyRateStatus_str(info->status));
814     monitor_printf(mon, "Start Time: %"PRIi64" (ms)\n",
815                    info->start_time);
816     if (info->mode == DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
817         monitor_printf(mon, "Sample Pages: %"PRIu64" (per GB)\n",
818                        info->sample_pages);
819     }
820     monitor_printf(mon, "Period: %"PRIi64" (sec)\n",
821                    info->calc_time);
822     monitor_printf(mon, "Mode: %s\n",
823                    DirtyRateMeasureMode_str(info->mode));
824     monitor_printf(mon, "Dirty rate: ");
825     if (info->has_dirty_rate) {
826         monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate);
827         if (info->has_vcpu_dirty_rate) {
828             DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate;
829             for (rate = head; rate != NULL; rate = rate->next) {
830                 monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64
831                                " (MB/s)\n", rate->value->id,
832                                rate->value->dirty_rate);
833             }
834         }
835     } else {
836         monitor_printf(mon, "(not ready)\n");
837     }
838 
839     qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate);
840     g_free(info);
841 }
842 
843 void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict)
844 {
845     int64_t sec = qdict_get_try_int(qdict, "second", 0);
846     int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1);
847     bool has_sample_pages = (sample_pages != -1);
848     bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false);
849     bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false);
850     DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
851     Error *err = NULL;
852 
853     if (!sec) {
854         monitor_printf(mon, "Incorrect period length specified!\n");
855         return;
856     }
857 
858     if (dirty_ring && dirty_bitmap) {
859         monitor_printf(mon, "Either dirty ring or dirty bitmap "
860                        "can be specified!\n");
861         return;
862     }
863 
864     if (dirty_bitmap) {
865         mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP;
866     } else if (dirty_ring) {
867         mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING;
868     }
869 
870     qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, true,
871                         mode, &err);
872     if (err) {
873         hmp_handle_error(mon, err);
874         return;
875     }
876 
877     monitor_printf(mon, "Starting dirty rate measurement with period %"PRIi64
878                    " seconds\n", sec);
879     monitor_printf(mon, "[Please use 'info dirty_rate' to check results]\n");
880 }
881