xref: /qemu/migration/dirtyrate.c (revision 2abf0da2)
1 /*
2  * Dirtyrate implement code
3  *
4  * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5  *
6  * Authors:
7  *  Chuan Zheng <zhengchuan@huawei.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
15 #include <zlib.h>
16 #include "hw/core/cpu.h"
17 #include "qapi/error.h"
18 #include "exec/ramblock.h"
19 #include "exec/target_page.h"
20 #include "qemu/rcu_queue.h"
21 #include "qemu/main-loop.h"
22 #include "qapi/qapi-commands-migration.h"
23 #include "ram.h"
24 #include "trace.h"
25 #include "dirtyrate.h"
26 #include "monitor/hmp.h"
27 #include "monitor/monitor.h"
28 #include "qapi/qmp/qdict.h"
29 #include "sysemu/kvm.h"
30 #include "sysemu/runstate.h"
31 #include "exec/memory.h"
32 #include "qemu/xxhash.h"
33 
34 /*
35  * total_dirty_pages is procted by BQL and is used
36  * to stat dirty pages during the period of two
37  * memory_global_dirty_log_sync
38  */
39 uint64_t total_dirty_pages;
40 
41 typedef struct DirtyPageRecord {
42     uint64_t start_pages;
43     uint64_t end_pages;
44 } DirtyPageRecord;
45 
46 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
47 static struct DirtyRateStat DirtyStat;
48 static DirtyRateMeasureMode dirtyrate_mode =
49                 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
50 
51 static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time)
52 {
53     int64_t current_time;
54 
55     current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56     if ((current_time - initial_time) >= msec) {
57         msec = current_time - initial_time;
58     } else {
59         g_usleep((msec + initial_time - current_time) * 1000);
60         /* g_usleep may overshoot */
61         msec = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - initial_time;
62     }
63 
64     return msec;
65 }
66 
67 static inline void record_dirtypages(DirtyPageRecord *dirty_pages,
68                                      CPUState *cpu, bool start)
69 {
70     if (start) {
71         dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages;
72     } else {
73         dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages;
74     }
75 }
76 
77 static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
78                                       int64_t calc_time_ms)
79 {
80     uint64_t increased_dirty_pages =
81         dirty_pages.end_pages - dirty_pages.start_pages;
82 
83     /*
84      * multiply by 1000ms/s _before_ converting down to megabytes
85      * to avoid losing precision
86      */
87     return qemu_target_pages_to_MiB(increased_dirty_pages * 1000) /
88         calc_time_ms;
89 }
90 
91 void global_dirty_log_change(unsigned int flag, bool start)
92 {
93     bql_lock();
94     if (start) {
95         memory_global_dirty_log_start(flag);
96     } else {
97         memory_global_dirty_log_stop(flag);
98     }
99     bql_unlock();
100 }
101 
102 /*
103  * global_dirty_log_sync
104  * 1. sync dirty log from kvm
105  * 2. stop dirty tracking if needed.
106  */
107 static void global_dirty_log_sync(unsigned int flag, bool one_shot)
108 {
109     bql_lock();
110     memory_global_dirty_log_sync(false);
111     if (one_shot) {
112         memory_global_dirty_log_stop(flag);
113     }
114     bql_unlock();
115 }
116 
117 static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat)
118 {
119     CPUState *cpu;
120     int nvcpu = 0;
121 
122     CPU_FOREACH(cpu) {
123         nvcpu++;
124     }
125 
126     stat->nvcpu = nvcpu;
127     stat->rates = g_new0(DirtyRateVcpu, nvcpu);
128 
129     return g_new0(DirtyPageRecord, nvcpu);
130 }
131 
132 static void vcpu_dirty_stat_collect(DirtyPageRecord *records,
133                                     bool start)
134 {
135     CPUState *cpu;
136 
137     CPU_FOREACH(cpu) {
138         record_dirtypages(records, cpu, start);
139     }
140 }
141 
142 int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
143                                  VcpuStat *stat,
144                                  unsigned int flag,
145                                  bool one_shot)
146 {
147     DirtyPageRecord *records;
148     int64_t init_time_ms;
149     int64_t duration;
150     int64_t dirtyrate;
151     int i = 0;
152     unsigned int gen_id;
153 
154 retry:
155     init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
156 
157     WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
158         gen_id = cpu_list_generation_id_get();
159         records = vcpu_dirty_stat_alloc(stat);
160         vcpu_dirty_stat_collect(records, true);
161     }
162 
163     duration = dirty_stat_wait(calc_time_ms, init_time_ms);
164 
165     global_dirty_log_sync(flag, one_shot);
166 
167     WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
168         if (gen_id != cpu_list_generation_id_get()) {
169             g_free(records);
170             g_free(stat->rates);
171             cpu_list_unlock();
172             goto retry;
173         }
174         vcpu_dirty_stat_collect(records, false);
175     }
176 
177     for (i = 0; i < stat->nvcpu; i++) {
178         dirtyrate = do_calculate_dirtyrate(records[i], duration);
179 
180         stat->rates[i].id = i;
181         stat->rates[i].dirty_rate = dirtyrate;
182 
183         trace_dirtyrate_do_calculate_vcpu(i, dirtyrate);
184     }
185 
186     g_free(records);
187 
188     return duration;
189 }
190 
191 static bool is_calc_time_valid(int64_t msec)
192 {
193     if ((msec < MIN_CALC_TIME_MS) || (msec > MAX_CALC_TIME_MS)) {
194         return false;
195     }
196 
197     return true;
198 }
199 
200 static bool is_sample_pages_valid(int64_t pages)
201 {
202     return pages >= MIN_SAMPLE_PAGE_COUNT &&
203            pages <= MAX_SAMPLE_PAGE_COUNT;
204 }
205 
206 static int dirtyrate_set_state(int *state, int old_state, int new_state)
207 {
208     assert(new_state < DIRTY_RATE_STATUS__MAX);
209     trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
210     if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
211         return 0;
212     } else {
213         return -1;
214     }
215 }
216 
217 /* Decimal power of given time unit relative to one second */
218 static int time_unit_to_power(TimeUnit time_unit)
219 {
220     switch (time_unit) {
221     case TIME_UNIT_SECOND:
222         return 0;
223     case TIME_UNIT_MILLISECOND:
224         return -3;
225     default:
226         assert(false); /* unreachable */
227         return 0;
228     }
229 }
230 
231 static int64_t convert_time_unit(int64_t value, TimeUnit unit_from,
232                                  TimeUnit unit_to)
233 {
234     int power = time_unit_to_power(unit_from) -
235                 time_unit_to_power(unit_to);
236     while (power < 0) {
237         value /= 10;
238         power += 1;
239     }
240     while (power > 0) {
241         value *= 10;
242         power -= 1;
243     }
244     return value;
245 }
246 
247 
248 static struct DirtyRateInfo *
249 query_dirty_rate_info(TimeUnit calc_time_unit)
250 {
251     int i;
252     int64_t dirty_rate = DirtyStat.dirty_rate;
253     struct DirtyRateInfo *info = g_new0(DirtyRateInfo, 1);
254     DirtyRateVcpuList *head = NULL, **tail = &head;
255 
256     info->status = CalculatingState;
257     info->start_time = DirtyStat.start_time;
258     info->calc_time = convert_time_unit(DirtyStat.calc_time_ms,
259                                         TIME_UNIT_MILLISECOND,
260                                         calc_time_unit);
261     info->calc_time_unit = calc_time_unit;
262     info->sample_pages = DirtyStat.sample_pages;
263     info->mode = dirtyrate_mode;
264 
265     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
266         info->has_dirty_rate = true;
267         info->dirty_rate = dirty_rate;
268 
269         if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
270             /*
271              * set sample_pages with 0 to indicate page sampling
272              * isn't enabled
273              **/
274             info->sample_pages = 0;
275             info->has_vcpu_dirty_rate = true;
276             for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
277                 DirtyRateVcpu *rate = g_new0(DirtyRateVcpu, 1);
278                 rate->id = DirtyStat.dirty_ring.rates[i].id;
279                 rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate;
280                 QAPI_LIST_APPEND(tail, rate);
281             }
282             info->vcpu_dirty_rate = head;
283         }
284 
285         if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
286             info->sample_pages = 0;
287         }
288     }
289 
290     trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
291 
292     return info;
293 }
294 
295 static void init_dirtyrate_stat(struct DirtyRateConfig config)
296 {
297     DirtyStat.dirty_rate = -1;
298     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000;
299     DirtyStat.calc_time_ms = config.calc_time_ms;
300     DirtyStat.sample_pages = config.sample_pages_per_gigabytes;
301 
302     switch (config.mode) {
303     case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING:
304         DirtyStat.page_sampling.total_dirty_samples = 0;
305         DirtyStat.page_sampling.total_sample_count = 0;
306         DirtyStat.page_sampling.total_block_mem_MB = 0;
307         break;
308     case DIRTY_RATE_MEASURE_MODE_DIRTY_RING:
309         DirtyStat.dirty_ring.nvcpu = -1;
310         DirtyStat.dirty_ring.rates = NULL;
311         break;
312     default:
313         break;
314     }
315 }
316 
317 static void cleanup_dirtyrate_stat(struct DirtyRateConfig config)
318 {
319     /* last calc-dirty-rate qmp use dirty ring mode */
320     if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
321         free(DirtyStat.dirty_ring.rates);
322         DirtyStat.dirty_ring.rates = NULL;
323     }
324 }
325 
326 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
327 {
328     DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
329     DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
330     /* size of total pages in MB */
331     DirtyStat.page_sampling.total_block_mem_MB +=
332         qemu_target_pages_to_MiB(info->ramblock_pages);
333 }
334 
335 static void update_dirtyrate(uint64_t msec)
336 {
337     uint64_t dirtyrate;
338     uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples;
339     uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count;
340     uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB;
341 
342     dirtyrate = total_dirty_samples * total_block_mem_MB *
343                 1000 / (total_sample_count * msec);
344 
345     DirtyStat.dirty_rate = dirtyrate;
346 }
347 
348 /*
349  * Compute hash of a single page of size TARGET_PAGE_SIZE.
350  */
351 static uint32_t compute_page_hash(void *ptr)
352 {
353     size_t page_size = qemu_target_page_size();
354     uint32_t i;
355     uint64_t v1, v2, v3, v4;
356     uint64_t res;
357     const uint64_t *p = ptr;
358 
359     v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
360     v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
361     v3 = QEMU_XXHASH_SEED + 0;
362     v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
363     for (i = 0; i < page_size / 8; i += 4) {
364         v1 = XXH64_round(v1, p[i + 0]);
365         v2 = XXH64_round(v2, p[i + 1]);
366         v3 = XXH64_round(v3, p[i + 2]);
367         v4 = XXH64_round(v4, p[i + 3]);
368     }
369     res = XXH64_mergerounds(v1, v2, v3, v4);
370     res += page_size;
371     res = XXH64_avalanche(res);
372     return (uint32_t)(res & UINT32_MAX);
373 }
374 
375 
376 /*
377  * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
378  * in ramblock, which starts from ramblock base address.
379  */
380 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
381                                       uint64_t vfn)
382 {
383     uint32_t hash;
384 
385     hash = compute_page_hash(info->ramblock_addr +
386                              vfn * qemu_target_page_size());
387 
388     trace_get_ramblock_vfn_hash(info->idstr, vfn, hash);
389     return hash;
390 }
391 
392 static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
393 {
394     unsigned int sample_pages_count;
395     int i;
396     GRand *rand;
397 
398     sample_pages_count = info->sample_pages_count;
399 
400     /* ramblock size less than one page, return success to skip this ramblock */
401     if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
402         return true;
403     }
404 
405     info->hash_result = g_try_malloc0_n(sample_pages_count,
406                                         sizeof(uint32_t));
407     if (!info->hash_result) {
408         return false;
409     }
410 
411     info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
412                                             sizeof(uint64_t));
413     if (!info->sample_page_vfn) {
414         g_free(info->hash_result);
415         return false;
416     }
417 
418     rand  = g_rand_new();
419     for (i = 0; i < sample_pages_count; i++) {
420         info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
421                                                     info->ramblock_pages - 1);
422         info->hash_result[i] = get_ramblock_vfn_hash(info,
423                                                      info->sample_page_vfn[i]);
424     }
425     g_rand_free(rand);
426 
427     return true;
428 }
429 
430 static void get_ramblock_dirty_info(RAMBlock *block,
431                                     struct RamblockDirtyInfo *info,
432                                     struct DirtyRateConfig *config)
433 {
434     uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
435 
436     /* Right shift 30 bits to calc ramblock size in GB */
437     info->sample_pages_count = (qemu_ram_get_used_length(block) *
438                                 sample_pages_per_gigabytes) >> 30;
439     /* Right shift TARGET_PAGE_BITS to calc page count */
440     info->ramblock_pages = qemu_ram_get_used_length(block) >>
441                            qemu_target_page_bits();
442     info->ramblock_addr = qemu_ram_get_host_addr(block);
443     strcpy(info->idstr, qemu_ram_get_idstr(block));
444 }
445 
446 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
447 {
448     int i;
449 
450     if (!infos) {
451         return;
452     }
453 
454     for (i = 0; i < count; i++) {
455         g_free(infos[i].sample_page_vfn);
456         g_free(infos[i].hash_result);
457     }
458     g_free(infos);
459 }
460 
461 static bool skip_sample_ramblock(RAMBlock *block)
462 {
463     /*
464      * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
465      */
466     if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
467         trace_skip_sample_ramblock(block->idstr,
468                                    qemu_ram_get_used_length(block));
469         return true;
470     }
471 
472     return false;
473 }
474 
475 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
476                                       struct DirtyRateConfig config,
477                                       int *block_count)
478 {
479     struct RamblockDirtyInfo *info = NULL;
480     struct RamblockDirtyInfo *dinfo = NULL;
481     RAMBlock *block = NULL;
482     int total_count = 0;
483     int index = 0;
484     bool ret = false;
485 
486     RAMBLOCK_FOREACH_MIGRATABLE(block) {
487         if (skip_sample_ramblock(block)) {
488             continue;
489         }
490         total_count++;
491     }
492 
493     dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
494     if (dinfo == NULL) {
495         goto out;
496     }
497 
498     RAMBLOCK_FOREACH_MIGRATABLE(block) {
499         if (skip_sample_ramblock(block)) {
500             continue;
501         }
502         if (index >= total_count) {
503             break;
504         }
505         info = &dinfo[index];
506         get_ramblock_dirty_info(block, info, &config);
507         if (!save_ramblock_hash(info)) {
508             goto out;
509         }
510         index++;
511     }
512     ret = true;
513 
514 out:
515     *block_count = index;
516     *block_dinfo = dinfo;
517     return ret;
518 }
519 
520 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
521 {
522     uint32_t hash;
523     int i;
524 
525     for (i = 0; i < info->sample_pages_count; i++) {
526         hash = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
527         if (hash != info->hash_result[i]) {
528             trace_calc_page_dirty_rate(info->idstr, hash, info->hash_result[i]);
529             info->sample_dirty_count++;
530         }
531     }
532 }
533 
534 static struct RamblockDirtyInfo *
535 find_block_matched(RAMBlock *block, int count,
536                   struct RamblockDirtyInfo *infos)
537 {
538     int i;
539 
540     for (i = 0; i < count; i++) {
541         if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
542             break;
543         }
544     }
545 
546     if (i == count) {
547         return NULL;
548     }
549 
550     if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
551         infos[i].ramblock_pages !=
552             (qemu_ram_get_used_length(block) >> qemu_target_page_bits())) {
553         trace_find_page_matched(block->idstr);
554         return NULL;
555     }
556 
557     return &infos[i];
558 }
559 
560 static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
561                                   int block_count)
562 {
563     struct RamblockDirtyInfo *block_dinfo = NULL;
564     RAMBlock *block = NULL;
565 
566     RAMBLOCK_FOREACH_MIGRATABLE(block) {
567         if (skip_sample_ramblock(block)) {
568             continue;
569         }
570         block_dinfo = find_block_matched(block, block_count, info);
571         if (block_dinfo == NULL) {
572             continue;
573         }
574         calc_page_dirty_rate(block_dinfo);
575         update_dirtyrate_stat(block_dinfo);
576     }
577 
578     if (DirtyStat.page_sampling.total_sample_count == 0) {
579         return false;
580     }
581 
582     return true;
583 }
584 
585 static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages,
586                                             bool start)
587 {
588     if (start) {
589         dirty_pages->start_pages = total_dirty_pages;
590     } else {
591         dirty_pages->end_pages = total_dirty_pages;
592     }
593 }
594 
595 static inline void dirtyrate_manual_reset_protect(void)
596 {
597     RAMBlock *block = NULL;
598 
599     WITH_RCU_READ_LOCK_GUARD() {
600         RAMBLOCK_FOREACH_MIGRATABLE(block) {
601             memory_region_clear_dirty_bitmap(block->mr, 0,
602                                              block->used_length);
603         }
604     }
605 }
606 
607 static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
608 {
609     int64_t start_time;
610     DirtyPageRecord dirty_pages;
611 
612     bql_lock();
613     memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
614 
615     /*
616      * 1'round of log sync may return all 1 bits with
617      * KVM_DIRTY_LOG_INITIALLY_SET enable
618      * skip it unconditionally and start dirty tracking
619      * from 2'round of log sync
620      */
621     memory_global_dirty_log_sync(false);
622 
623     /*
624      * reset page protect manually and unconditionally.
625      * this make sure kvm dirty log be cleared if
626      * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
627      */
628     dirtyrate_manual_reset_protect();
629     bql_unlock();
630 
631     record_dirtypages_bitmap(&dirty_pages, true);
632 
633     start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
634     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000;
635 
636     DirtyStat.calc_time_ms = dirty_stat_wait(config.calc_time_ms, start_time);
637 
638     /*
639      * do two things.
640      * 1. fetch dirty bitmap from kvm
641      * 2. stop dirty tracking
642      */
643     global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true);
644 
645     record_dirtypages_bitmap(&dirty_pages, false);
646 
647     DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages,
648                                                   DirtyStat.calc_time_ms);
649 }
650 
651 static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config)
652 {
653     uint64_t dirtyrate = 0;
654     uint64_t dirtyrate_sum = 0;
655     int i = 0;
656 
657     /* start log sync */
658     global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true);
659 
660     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000;
661 
662     /* calculate vcpu dirtyrate */
663     DirtyStat.calc_time_ms = vcpu_calculate_dirtyrate(config.calc_time_ms,
664                                                       &DirtyStat.dirty_ring,
665                                                       GLOBAL_DIRTY_DIRTY_RATE,
666                                                       true);
667 
668     /* calculate vm dirtyrate */
669     for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
670         dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate;
671         DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate;
672         dirtyrate_sum += dirtyrate;
673     }
674 
675     DirtyStat.dirty_rate = dirtyrate_sum;
676 }
677 
678 static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config)
679 {
680     struct RamblockDirtyInfo *block_dinfo = NULL;
681     int block_count = 0;
682     int64_t initial_time;
683 
684     rcu_read_lock();
685     initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
686     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000;
687     if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
688         goto out;
689     }
690     rcu_read_unlock();
691 
692     DirtyStat.calc_time_ms = dirty_stat_wait(config.calc_time_ms,
693                                              initial_time);
694 
695     rcu_read_lock();
696     if (!compare_page_hash_info(block_dinfo, block_count)) {
697         goto out;
698     }
699 
700     update_dirtyrate(DirtyStat.calc_time_ms);
701 
702 out:
703     rcu_read_unlock();
704     free_ramblock_dirty_info(block_dinfo, block_count);
705 }
706 
707 static void calculate_dirtyrate(struct DirtyRateConfig config)
708 {
709     if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
710         calculate_dirtyrate_dirty_bitmap(config);
711     } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
712         calculate_dirtyrate_dirty_ring(config);
713     } else {
714         calculate_dirtyrate_sample_vm(config);
715     }
716 
717     trace_dirtyrate_calculate(DirtyStat.dirty_rate);
718 }
719 
720 void *get_dirtyrate_thread(void *arg)
721 {
722     struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
723     int ret;
724     rcu_register_thread();
725 
726     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
727                               DIRTY_RATE_STATUS_MEASURING);
728     if (ret == -1) {
729         error_report("change dirtyrate state failed.");
730         return NULL;
731     }
732 
733     calculate_dirtyrate(config);
734 
735     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
736                               DIRTY_RATE_STATUS_MEASURED);
737     if (ret == -1) {
738         error_report("change dirtyrate state failed.");
739     }
740 
741     rcu_unregister_thread();
742     return NULL;
743 }
744 
745 void qmp_calc_dirty_rate(int64_t calc_time,
746                          bool has_calc_time_unit,
747                          TimeUnit calc_time_unit,
748                          bool has_sample_pages,
749                          int64_t sample_pages,
750                          bool has_mode,
751                          DirtyRateMeasureMode mode,
752                          Error **errp)
753 {
754     static struct DirtyRateConfig config;
755     QemuThread thread;
756     int ret;
757 
758     /*
759      * If the dirty rate is already being measured, don't attempt to start.
760      */
761     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
762         error_setg(errp, "the dirty rate is already being measured.");
763         return;
764     }
765 
766     int64_t calc_time_ms = convert_time_unit(
767         calc_time,
768         has_calc_time_unit ? calc_time_unit : TIME_UNIT_SECOND,
769         TIME_UNIT_MILLISECOND
770     );
771 
772     if (!is_calc_time_valid(calc_time_ms)) {
773         error_setg(errp, "Calculation time is out of range [%dms, %dms].",
774                          MIN_CALC_TIME_MS, MAX_CALC_TIME_MS);
775         return;
776     }
777 
778     if (!has_mode) {
779         mode =  DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
780     }
781 
782     if (has_sample_pages && mode != DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
783         error_setg(errp, "sample-pages is used only in page-sampling mode");
784         return;
785     }
786 
787     if (has_sample_pages) {
788         if (!is_sample_pages_valid(sample_pages)) {
789             error_setg(errp, "sample-pages is out of range[%d, %d].",
790                             MIN_SAMPLE_PAGE_COUNT,
791                             MAX_SAMPLE_PAGE_COUNT);
792             return;
793         }
794     } else {
795         sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
796     }
797 
798     /*
799      * dirty ring mode only works when kvm dirty ring is enabled.
800      * on the contrary, dirty bitmap mode is not.
801      */
802     if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) &&
803         !kvm_dirty_ring_enabled()) ||
804         ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) &&
805          kvm_dirty_ring_enabled())) {
806         error_setg(errp, "mode %s is not enabled, use other method instead.",
807                          DirtyRateMeasureMode_str(mode));
808          return;
809     }
810 
811     /*
812      * Init calculation state as unstarted.
813      */
814     ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
815                               DIRTY_RATE_STATUS_UNSTARTED);
816     if (ret == -1) {
817         error_setg(errp, "init dirty rate calculation state failed.");
818         return;
819     }
820 
821     config.calc_time_ms = calc_time_ms;
822     config.sample_pages_per_gigabytes = sample_pages;
823     config.mode = mode;
824 
825     cleanup_dirtyrate_stat(config);
826 
827     /*
828      * update dirty rate mode so that we can figure out what mode has
829      * been used in last calculation
830      **/
831     dirtyrate_mode = mode;
832 
833     init_dirtyrate_stat(config);
834 
835     qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
836                        (void *)&config, QEMU_THREAD_DETACHED);
837 }
838 
839 
840 struct DirtyRateInfo *qmp_query_dirty_rate(bool has_calc_time_unit,
841                                            TimeUnit calc_time_unit,
842                                            Error **errp)
843 {
844     return query_dirty_rate_info(
845         has_calc_time_unit ? calc_time_unit : TIME_UNIT_SECOND);
846 }
847 
848 void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict)
849 {
850     DirtyRateInfo *info = query_dirty_rate_info(TIME_UNIT_SECOND);
851 
852     monitor_printf(mon, "Status: %s\n",
853                    DirtyRateStatus_str(info->status));
854     monitor_printf(mon, "Start Time: %"PRIi64" (ms)\n",
855                    info->start_time);
856     if (info->mode == DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
857         monitor_printf(mon, "Sample Pages: %"PRIu64" (per GB)\n",
858                        info->sample_pages);
859     }
860     monitor_printf(mon, "Period: %"PRIi64" (sec)\n",
861                    info->calc_time);
862     monitor_printf(mon, "Mode: %s\n",
863                    DirtyRateMeasureMode_str(info->mode));
864     monitor_printf(mon, "Dirty rate: ");
865     if (info->has_dirty_rate) {
866         monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate);
867         if (info->has_vcpu_dirty_rate) {
868             DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate;
869             for (rate = head; rate != NULL; rate = rate->next) {
870                 monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64
871                                " (MB/s)\n", rate->value->id,
872                                rate->value->dirty_rate);
873             }
874         }
875     } else {
876         monitor_printf(mon, "(not ready)\n");
877     }
878 
879     qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate);
880     g_free(info);
881 }
882 
883 void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict)
884 {
885     int64_t sec = qdict_get_try_int(qdict, "second", 0);
886     int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1);
887     bool has_sample_pages = (sample_pages != -1);
888     bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false);
889     bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false);
890     DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
891     Error *err = NULL;
892 
893     if (!sec) {
894         monitor_printf(mon, "Incorrect period length specified!\n");
895         return;
896     }
897 
898     if (dirty_ring && dirty_bitmap) {
899         monitor_printf(mon, "Either dirty ring or dirty bitmap "
900                        "can be specified!\n");
901         return;
902     }
903 
904     if (dirty_bitmap) {
905         mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP;
906     } else if (dirty_ring) {
907         mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING;
908     }
909 
910     qmp_calc_dirty_rate(sec, /* calc-time */
911                         false, TIME_UNIT_SECOND, /* calc-time-unit */
912                         has_sample_pages, sample_pages,
913                         true, mode,
914                         &err);
915     if (err) {
916         hmp_handle_error(mon, err);
917         return;
918     }
919 
920     monitor_printf(mon, "Starting dirty rate measurement with period %"PRIi64
921                    " seconds\n", sec);
922     monitor_printf(mon, "[Please use 'info dirty_rate' to check results]\n");
923 }
924