xref: /qemu/migration/dirtyrate.c (revision 70eb5fde)
1 /*
2  * Dirtyrate implement code
3  *
4  * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5  *
6  * Authors:
7  *  Chuan Zheng <zhengchuan@huawei.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
15 #include "hw/core/cpu.h"
16 #include "qapi/error.h"
17 #include "exec/ramblock.h"
18 #include "exec/target_page.h"
19 #include "qemu/rcu_queue.h"
20 #include "qemu/main-loop.h"
21 #include "qapi/qapi-commands-migration.h"
22 #include "ram.h"
23 #include "trace.h"
24 #include "dirtyrate.h"
25 #include "monitor/hmp.h"
26 #include "monitor/monitor.h"
27 #include "qapi/qmp/qdict.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/runstate.h"
30 #include "exec/memory.h"
31 #include "qemu/xxhash.h"
32 
33 /*
34  * total_dirty_pages is procted by BQL and is used
35  * to stat dirty pages during the period of two
36  * memory_global_dirty_log_sync
37  */
38 uint64_t total_dirty_pages;
39 
40 typedef struct DirtyPageRecord {
41     uint64_t start_pages;
42     uint64_t end_pages;
43 } DirtyPageRecord;
44 
45 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
46 static struct DirtyRateStat DirtyStat;
47 static DirtyRateMeasureMode dirtyrate_mode =
48                 DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
49 
dirty_stat_wait(int64_t msec,int64_t initial_time)50 static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time)
51 {
52     int64_t current_time;
53 
54     current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
55     if ((current_time - initial_time) >= msec) {
56         msec = current_time - initial_time;
57     } else {
58         g_usleep((msec + initial_time - current_time) * 1000);
59         /* g_usleep may overshoot */
60         msec = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - initial_time;
61     }
62 
63     return msec;
64 }
65 
record_dirtypages(DirtyPageRecord * dirty_pages,CPUState * cpu,bool start)66 static inline void record_dirtypages(DirtyPageRecord *dirty_pages,
67                                      CPUState *cpu, bool start)
68 {
69     if (start) {
70         dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages;
71     } else {
72         dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages;
73     }
74 }
75 
do_calculate_dirtyrate(DirtyPageRecord dirty_pages,int64_t calc_time_ms)76 static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
77                                       int64_t calc_time_ms)
78 {
79     uint64_t increased_dirty_pages =
80         dirty_pages.end_pages - dirty_pages.start_pages;
81 
82     /*
83      * multiply by 1000ms/s _before_ converting down to megabytes
84      * to avoid losing precision
85      */
86     return qemu_target_pages_to_MiB(increased_dirty_pages * 1000) /
87         calc_time_ms;
88 }
89 
global_dirty_log_change(unsigned int flag,bool start)90 void global_dirty_log_change(unsigned int flag, bool start)
91 {
92     Error *local_err = NULL;
93     bool ret;
94 
95     bql_lock();
96     if (start) {
97         ret = memory_global_dirty_log_start(flag, &local_err);
98         if (!ret) {
99             error_report_err(local_err);
100         }
101     } else {
102         memory_global_dirty_log_stop(flag);
103     }
104     bql_unlock();
105 }
106 
107 /*
108  * global_dirty_log_sync
109  * 1. sync dirty log from kvm
110  * 2. stop dirty tracking if needed.
111  */
global_dirty_log_sync(unsigned int flag,bool one_shot)112 static void global_dirty_log_sync(unsigned int flag, bool one_shot)
113 {
114     bql_lock();
115     memory_global_dirty_log_sync(false);
116     if (one_shot) {
117         memory_global_dirty_log_stop(flag);
118     }
119     bql_unlock();
120 }
121 
vcpu_dirty_stat_alloc(VcpuStat * stat)122 static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat)
123 {
124     CPUState *cpu;
125     int nvcpu = 0;
126 
127     CPU_FOREACH(cpu) {
128         nvcpu++;
129     }
130 
131     stat->nvcpu = nvcpu;
132     stat->rates = g_new0(DirtyRateVcpu, nvcpu);
133 
134     return g_new0(DirtyPageRecord, nvcpu);
135 }
136 
vcpu_dirty_stat_collect(DirtyPageRecord * records,bool start)137 static void vcpu_dirty_stat_collect(DirtyPageRecord *records,
138                                     bool start)
139 {
140     CPUState *cpu;
141 
142     CPU_FOREACH(cpu) {
143         record_dirtypages(records, cpu, start);
144     }
145 }
146 
vcpu_calculate_dirtyrate(int64_t calc_time_ms,VcpuStat * stat,unsigned int flag,bool one_shot)147 int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms,
148                                  VcpuStat *stat,
149                                  unsigned int flag,
150                                  bool one_shot)
151 {
152     DirtyPageRecord *records;
153     int64_t init_time_ms;
154     int64_t duration;
155     int64_t dirtyrate;
156     int i = 0;
157     unsigned int gen_id;
158 
159 retry:
160     init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
161 
162     WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
163         gen_id = cpu_list_generation_id_get();
164         records = vcpu_dirty_stat_alloc(stat);
165         vcpu_dirty_stat_collect(records, true);
166     }
167 
168     duration = dirty_stat_wait(calc_time_ms, init_time_ms);
169 
170     global_dirty_log_sync(flag, one_shot);
171 
172     WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
173         if (gen_id != cpu_list_generation_id_get()) {
174             g_free(records);
175             g_free(stat->rates);
176             cpu_list_unlock();
177             goto retry;
178         }
179         vcpu_dirty_stat_collect(records, false);
180     }
181 
182     for (i = 0; i < stat->nvcpu; i++) {
183         dirtyrate = do_calculate_dirtyrate(records[i], duration);
184 
185         stat->rates[i].id = i;
186         stat->rates[i].dirty_rate = dirtyrate;
187 
188         trace_dirtyrate_do_calculate_vcpu(i, dirtyrate);
189     }
190 
191     g_free(records);
192 
193     return duration;
194 }
195 
is_calc_time_valid(int64_t msec)196 static bool is_calc_time_valid(int64_t msec)
197 {
198     if ((msec < MIN_CALC_TIME_MS) || (msec > MAX_CALC_TIME_MS)) {
199         return false;
200     }
201 
202     return true;
203 }
204 
is_sample_pages_valid(int64_t pages)205 static bool is_sample_pages_valid(int64_t pages)
206 {
207     return pages >= MIN_SAMPLE_PAGE_COUNT &&
208            pages <= MAX_SAMPLE_PAGE_COUNT;
209 }
210 
dirtyrate_set_state(int * state,int old_state,int new_state)211 static int dirtyrate_set_state(int *state, int old_state, int new_state)
212 {
213     assert(new_state < DIRTY_RATE_STATUS__MAX);
214     trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
215     if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
216         return 0;
217     } else {
218         return -1;
219     }
220 }
221 
222 /* Decimal power of given time unit relative to one second */
time_unit_to_power(TimeUnit time_unit)223 static int time_unit_to_power(TimeUnit time_unit)
224 {
225     switch (time_unit) {
226     case TIME_UNIT_SECOND:
227         return 0;
228     case TIME_UNIT_MILLISECOND:
229         return -3;
230     default:
231         assert(false); /* unreachable */
232         return 0;
233     }
234 }
235 
convert_time_unit(int64_t value,TimeUnit unit_from,TimeUnit unit_to)236 static int64_t convert_time_unit(int64_t value, TimeUnit unit_from,
237                                  TimeUnit unit_to)
238 {
239     int power = time_unit_to_power(unit_from) -
240                 time_unit_to_power(unit_to);
241     while (power < 0) {
242         value /= 10;
243         power += 1;
244     }
245     while (power > 0) {
246         value *= 10;
247         power -= 1;
248     }
249     return value;
250 }
251 
252 
253 static struct DirtyRateInfo *
query_dirty_rate_info(TimeUnit calc_time_unit)254 query_dirty_rate_info(TimeUnit calc_time_unit)
255 {
256     int i;
257     int64_t dirty_rate = DirtyStat.dirty_rate;
258     struct DirtyRateInfo *info = g_new0(DirtyRateInfo, 1);
259     DirtyRateVcpuList *head = NULL, **tail = &head;
260 
261     info->status = CalculatingState;
262     info->start_time = DirtyStat.start_time;
263     info->calc_time = convert_time_unit(DirtyStat.calc_time_ms,
264                                         TIME_UNIT_MILLISECOND,
265                                         calc_time_unit);
266     info->calc_time_unit = calc_time_unit;
267     info->sample_pages = DirtyStat.sample_pages;
268     info->mode = dirtyrate_mode;
269 
270     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
271         info->has_dirty_rate = true;
272         info->dirty_rate = dirty_rate;
273 
274         if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
275             /*
276              * set sample_pages with 0 to indicate page sampling
277              * isn't enabled
278              **/
279             info->sample_pages = 0;
280             info->has_vcpu_dirty_rate = true;
281             for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
282                 DirtyRateVcpu *rate = g_new0(DirtyRateVcpu, 1);
283                 rate->id = DirtyStat.dirty_ring.rates[i].id;
284                 rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate;
285                 QAPI_LIST_APPEND(tail, rate);
286             }
287             info->vcpu_dirty_rate = head;
288         }
289 
290         if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
291             info->sample_pages = 0;
292         }
293     }
294 
295     trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
296 
297     return info;
298 }
299 
init_dirtyrate_stat(struct DirtyRateConfig config)300 static void init_dirtyrate_stat(struct DirtyRateConfig config)
301 {
302     DirtyStat.dirty_rate = -1;
303     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000;
304     DirtyStat.calc_time_ms = config.calc_time_ms;
305     DirtyStat.sample_pages = config.sample_pages_per_gigabytes;
306 
307     switch (config.mode) {
308     case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING:
309         DirtyStat.page_sampling.total_dirty_samples = 0;
310         DirtyStat.page_sampling.total_sample_count = 0;
311         DirtyStat.page_sampling.total_block_mem_MB = 0;
312         break;
313     case DIRTY_RATE_MEASURE_MODE_DIRTY_RING:
314         DirtyStat.dirty_ring.nvcpu = -1;
315         DirtyStat.dirty_ring.rates = NULL;
316         break;
317     default:
318         break;
319     }
320 }
321 
cleanup_dirtyrate_stat(struct DirtyRateConfig config)322 static void cleanup_dirtyrate_stat(struct DirtyRateConfig config)
323 {
324     /* last calc-dirty-rate qmp use dirty ring mode */
325     if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
326         free(DirtyStat.dirty_ring.rates);
327         DirtyStat.dirty_ring.rates = NULL;
328     }
329 }
330 
update_dirtyrate_stat(struct RamblockDirtyInfo * info)331 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
332 {
333     DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
334     DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
335     /* size of total pages in MB */
336     DirtyStat.page_sampling.total_block_mem_MB +=
337         qemu_target_pages_to_MiB(info->ramblock_pages);
338 }
339 
update_dirtyrate(uint64_t msec)340 static void update_dirtyrate(uint64_t msec)
341 {
342     uint64_t dirtyrate;
343     uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples;
344     uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count;
345     uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB;
346 
347     dirtyrate = total_dirty_samples * total_block_mem_MB *
348                 1000 / (total_sample_count * msec);
349 
350     DirtyStat.dirty_rate = dirtyrate;
351 }
352 
353 /*
354  * Compute hash of a single page of size TARGET_PAGE_SIZE.
355  */
compute_page_hash(void * ptr)356 static uint32_t compute_page_hash(void *ptr)
357 {
358     size_t page_size = qemu_target_page_size();
359     uint32_t i;
360     uint64_t v1, v2, v3, v4;
361     uint64_t res;
362     const uint64_t *p = ptr;
363 
364     v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
365     v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
366     v3 = QEMU_XXHASH_SEED + 0;
367     v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
368     for (i = 0; i < page_size / 8; i += 4) {
369         v1 = XXH64_round(v1, p[i + 0]);
370         v2 = XXH64_round(v2, p[i + 1]);
371         v3 = XXH64_round(v3, p[i + 2]);
372         v4 = XXH64_round(v4, p[i + 3]);
373     }
374     res = XXH64_mergerounds(v1, v2, v3, v4);
375     res += page_size;
376     res = XXH64_avalanche(res);
377     return (uint32_t)(res & UINT32_MAX);
378 }
379 
380 
381 /*
382  * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
383  * in ramblock, which starts from ramblock base address.
384  */
get_ramblock_vfn_hash(struct RamblockDirtyInfo * info,uint64_t vfn)385 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
386                                       uint64_t vfn)
387 {
388     uint32_t hash;
389 
390     hash = compute_page_hash(info->ramblock_addr +
391                              vfn * qemu_target_page_size());
392 
393     trace_get_ramblock_vfn_hash(info->idstr, vfn, hash);
394     return hash;
395 }
396 
save_ramblock_hash(struct RamblockDirtyInfo * info)397 static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
398 {
399     unsigned int sample_pages_count;
400     int i;
401     GRand *rand;
402 
403     sample_pages_count = info->sample_pages_count;
404 
405     /* ramblock size less than one page, return success to skip this ramblock */
406     if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
407         return true;
408     }
409 
410     info->hash_result = g_try_malloc0_n(sample_pages_count,
411                                         sizeof(uint32_t));
412     if (!info->hash_result) {
413         return false;
414     }
415 
416     info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
417                                             sizeof(uint64_t));
418     if (!info->sample_page_vfn) {
419         g_free(info->hash_result);
420         return false;
421     }
422 
423     rand  = g_rand_new();
424     for (i = 0; i < sample_pages_count; i++) {
425         info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
426                                                     info->ramblock_pages - 1);
427         info->hash_result[i] = get_ramblock_vfn_hash(info,
428                                                      info->sample_page_vfn[i]);
429     }
430     g_rand_free(rand);
431 
432     return true;
433 }
434 
get_ramblock_dirty_info(RAMBlock * block,struct RamblockDirtyInfo * info,struct DirtyRateConfig * config)435 static void get_ramblock_dirty_info(RAMBlock *block,
436                                     struct RamblockDirtyInfo *info,
437                                     struct DirtyRateConfig *config)
438 {
439     uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
440 
441     /* Right shift 30 bits to calc ramblock size in GB */
442     info->sample_pages_count = (qemu_ram_get_used_length(block) *
443                                 sample_pages_per_gigabytes) >> 30;
444     /* Right shift TARGET_PAGE_BITS to calc page count */
445     info->ramblock_pages = qemu_ram_get_used_length(block) >>
446                            qemu_target_page_bits();
447     info->ramblock_addr = qemu_ram_get_host_addr(block);
448     strcpy(info->idstr, qemu_ram_get_idstr(block));
449 }
450 
free_ramblock_dirty_info(struct RamblockDirtyInfo * infos,int count)451 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
452 {
453     int i;
454 
455     if (!infos) {
456         return;
457     }
458 
459     for (i = 0; i < count; i++) {
460         g_free(infos[i].sample_page_vfn);
461         g_free(infos[i].hash_result);
462     }
463     g_free(infos);
464 }
465 
skip_sample_ramblock(RAMBlock * block)466 static bool skip_sample_ramblock(RAMBlock *block)
467 {
468     /*
469      * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
470      */
471     if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
472         trace_skip_sample_ramblock(block->idstr,
473                                    qemu_ram_get_used_length(block));
474         return true;
475     }
476 
477     return false;
478 }
479 
record_ramblock_hash_info(struct RamblockDirtyInfo ** block_dinfo,struct DirtyRateConfig config,int * block_count)480 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
481                                       struct DirtyRateConfig config,
482                                       int *block_count)
483 {
484     struct RamblockDirtyInfo *info = NULL;
485     struct RamblockDirtyInfo *dinfo = NULL;
486     RAMBlock *block = NULL;
487     int total_count = 0;
488     int index = 0;
489     bool ret = false;
490 
491     RAMBLOCK_FOREACH_MIGRATABLE(block) {
492         if (skip_sample_ramblock(block)) {
493             continue;
494         }
495         total_count++;
496     }
497 
498     dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
499     if (dinfo == NULL) {
500         goto out;
501     }
502 
503     RAMBLOCK_FOREACH_MIGRATABLE(block) {
504         if (skip_sample_ramblock(block)) {
505             continue;
506         }
507         if (index >= total_count) {
508             break;
509         }
510         info = &dinfo[index];
511         get_ramblock_dirty_info(block, info, &config);
512         if (!save_ramblock_hash(info)) {
513             goto out;
514         }
515         index++;
516     }
517     ret = true;
518 
519 out:
520     *block_count = index;
521     *block_dinfo = dinfo;
522     return ret;
523 }
524 
calc_page_dirty_rate(struct RamblockDirtyInfo * info)525 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
526 {
527     uint32_t hash;
528     int i;
529 
530     for (i = 0; i < info->sample_pages_count; i++) {
531         hash = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
532         if (hash != info->hash_result[i]) {
533             trace_calc_page_dirty_rate(info->idstr, hash, info->hash_result[i]);
534             info->sample_dirty_count++;
535         }
536     }
537 }
538 
539 static struct RamblockDirtyInfo *
find_block_matched(RAMBlock * block,int count,struct RamblockDirtyInfo * infos)540 find_block_matched(RAMBlock *block, int count,
541                   struct RamblockDirtyInfo *infos)
542 {
543     int i;
544 
545     for (i = 0; i < count; i++) {
546         if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
547             break;
548         }
549     }
550 
551     if (i == count) {
552         return NULL;
553     }
554 
555     if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
556         infos[i].ramblock_pages !=
557             (qemu_ram_get_used_length(block) >> qemu_target_page_bits())) {
558         trace_find_page_matched(block->idstr);
559         return NULL;
560     }
561 
562     return &infos[i];
563 }
564 
compare_page_hash_info(struct RamblockDirtyInfo * info,int block_count)565 static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
566                                   int block_count)
567 {
568     struct RamblockDirtyInfo *block_dinfo = NULL;
569     RAMBlock *block = NULL;
570 
571     RAMBLOCK_FOREACH_MIGRATABLE(block) {
572         if (skip_sample_ramblock(block)) {
573             continue;
574         }
575         block_dinfo = find_block_matched(block, block_count, info);
576         if (block_dinfo == NULL) {
577             continue;
578         }
579         calc_page_dirty_rate(block_dinfo);
580         update_dirtyrate_stat(block_dinfo);
581     }
582 
583     if (DirtyStat.page_sampling.total_sample_count == 0) {
584         return false;
585     }
586 
587     return true;
588 }
589 
record_dirtypages_bitmap(DirtyPageRecord * dirty_pages,bool start)590 static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages,
591                                             bool start)
592 {
593     if (start) {
594         dirty_pages->start_pages = total_dirty_pages;
595     } else {
596         dirty_pages->end_pages = total_dirty_pages;
597     }
598 }
599 
dirtyrate_manual_reset_protect(void)600 static inline void dirtyrate_manual_reset_protect(void)
601 {
602     RAMBlock *block = NULL;
603 
604     WITH_RCU_READ_LOCK_GUARD() {
605         RAMBLOCK_FOREACH_MIGRATABLE(block) {
606             memory_region_clear_dirty_bitmap(block->mr, 0,
607                                              block->used_length);
608         }
609     }
610 }
611 
calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)612 static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
613 {
614     int64_t start_time;
615     DirtyPageRecord dirty_pages;
616     Error *local_err = NULL;
617 
618     bql_lock();
619     if (!memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE, &local_err)) {
620         error_report_err(local_err);
621     }
622 
623     /*
624      * 1'round of log sync may return all 1 bits with
625      * KVM_DIRTY_LOG_INITIALLY_SET enable
626      * skip it unconditionally and start dirty tracking
627      * from 2'round of log sync
628      */
629     memory_global_dirty_log_sync(false);
630 
631     /*
632      * reset page protect manually and unconditionally.
633      * this make sure kvm dirty log be cleared if
634      * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
635      */
636     dirtyrate_manual_reset_protect();
637     bql_unlock();
638 
639     record_dirtypages_bitmap(&dirty_pages, true);
640 
641     start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
642     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000;
643 
644     DirtyStat.calc_time_ms = dirty_stat_wait(config.calc_time_ms, start_time);
645 
646     /*
647      * do two things.
648      * 1. fetch dirty bitmap from kvm
649      * 2. stop dirty tracking
650      */
651     global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true);
652 
653     record_dirtypages_bitmap(&dirty_pages, false);
654 
655     DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages,
656                                                   DirtyStat.calc_time_ms);
657 }
658 
calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config)659 static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config)
660 {
661     uint64_t dirtyrate = 0;
662     uint64_t dirtyrate_sum = 0;
663     int i = 0;
664 
665     /* start log sync */
666     global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true);
667 
668     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000;
669 
670     /* calculate vcpu dirtyrate */
671     DirtyStat.calc_time_ms = vcpu_calculate_dirtyrate(config.calc_time_ms,
672                                                       &DirtyStat.dirty_ring,
673                                                       GLOBAL_DIRTY_DIRTY_RATE,
674                                                       true);
675 
676     /* calculate vm dirtyrate */
677     for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
678         dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate;
679         DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate;
680         dirtyrate_sum += dirtyrate;
681     }
682 
683     DirtyStat.dirty_rate = dirtyrate_sum;
684 }
685 
calculate_dirtyrate_sample_vm(struct DirtyRateConfig config)686 static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config)
687 {
688     struct RamblockDirtyInfo *block_dinfo = NULL;
689     int block_count = 0;
690     int64_t initial_time;
691 
692     rcu_read_lock();
693     initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
694     DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000;
695     if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
696         goto out;
697     }
698     rcu_read_unlock();
699 
700     DirtyStat.calc_time_ms = dirty_stat_wait(config.calc_time_ms,
701                                              initial_time);
702 
703     rcu_read_lock();
704     if (!compare_page_hash_info(block_dinfo, block_count)) {
705         goto out;
706     }
707 
708     update_dirtyrate(DirtyStat.calc_time_ms);
709 
710 out:
711     rcu_read_unlock();
712     free_ramblock_dirty_info(block_dinfo, block_count);
713 }
714 
calculate_dirtyrate(struct DirtyRateConfig config)715 static void calculate_dirtyrate(struct DirtyRateConfig config)
716 {
717     if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
718         calculate_dirtyrate_dirty_bitmap(config);
719     } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
720         calculate_dirtyrate_dirty_ring(config);
721     } else {
722         calculate_dirtyrate_sample_vm(config);
723     }
724 
725     trace_dirtyrate_calculate(DirtyStat.dirty_rate);
726 }
727 
get_dirtyrate_thread(void * arg)728 void *get_dirtyrate_thread(void *arg)
729 {
730     struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
731     int ret;
732     rcu_register_thread();
733 
734     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
735                               DIRTY_RATE_STATUS_MEASURING);
736     if (ret == -1) {
737         error_report("change dirtyrate state failed.");
738         return NULL;
739     }
740 
741     calculate_dirtyrate(config);
742 
743     ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
744                               DIRTY_RATE_STATUS_MEASURED);
745     if (ret == -1) {
746         error_report("change dirtyrate state failed.");
747     }
748 
749     rcu_unregister_thread();
750     return NULL;
751 }
752 
qmp_calc_dirty_rate(int64_t calc_time,bool has_calc_time_unit,TimeUnit calc_time_unit,bool has_sample_pages,int64_t sample_pages,bool has_mode,DirtyRateMeasureMode mode,Error ** errp)753 void qmp_calc_dirty_rate(int64_t calc_time,
754                          bool has_calc_time_unit,
755                          TimeUnit calc_time_unit,
756                          bool has_sample_pages,
757                          int64_t sample_pages,
758                          bool has_mode,
759                          DirtyRateMeasureMode mode,
760                          Error **errp)
761 {
762     static struct DirtyRateConfig config;
763     QemuThread thread;
764     int ret;
765 
766     /*
767      * If the dirty rate is already being measured, don't attempt to start.
768      */
769     if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
770         error_setg(errp, "the dirty rate is already being measured.");
771         return;
772     }
773 
774     int64_t calc_time_ms = convert_time_unit(
775         calc_time,
776         has_calc_time_unit ? calc_time_unit : TIME_UNIT_SECOND,
777         TIME_UNIT_MILLISECOND
778     );
779 
780     if (!is_calc_time_valid(calc_time_ms)) {
781         error_setg(errp, "Calculation time is out of range [%dms, %dms].",
782                          MIN_CALC_TIME_MS, MAX_CALC_TIME_MS);
783         return;
784     }
785 
786     if (!has_mode) {
787         mode =  DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
788     }
789 
790     if (has_sample_pages && mode != DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
791         error_setg(errp, "sample-pages is used only in page-sampling mode");
792         return;
793     }
794 
795     if (has_sample_pages) {
796         if (!is_sample_pages_valid(sample_pages)) {
797             error_setg(errp, "sample-pages is out of range[%d, %d].",
798                             MIN_SAMPLE_PAGE_COUNT,
799                             MAX_SAMPLE_PAGE_COUNT);
800             return;
801         }
802     } else {
803         sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
804     }
805 
806     /*
807      * dirty ring mode only works when kvm dirty ring is enabled.
808      * on the contrary, dirty bitmap mode is not.
809      */
810     if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) &&
811         !kvm_dirty_ring_enabled()) ||
812         ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) &&
813          kvm_dirty_ring_enabled())) {
814         error_setg(errp, "mode %s is not enabled, use other method instead.",
815                          DirtyRateMeasureMode_str(mode));
816          return;
817     }
818 
819     /*
820      * Init calculation state as unstarted.
821      */
822     ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
823                               DIRTY_RATE_STATUS_UNSTARTED);
824     if (ret == -1) {
825         error_setg(errp, "init dirty rate calculation state failed.");
826         return;
827     }
828 
829     config.calc_time_ms = calc_time_ms;
830     config.sample_pages_per_gigabytes = sample_pages;
831     config.mode = mode;
832 
833     cleanup_dirtyrate_stat(config);
834 
835     /*
836      * update dirty rate mode so that we can figure out what mode has
837      * been used in last calculation
838      **/
839     dirtyrate_mode = mode;
840 
841     init_dirtyrate_stat(config);
842 
843     qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
844                        (void *)&config, QEMU_THREAD_DETACHED);
845 }
846 
847 
qmp_query_dirty_rate(bool has_calc_time_unit,TimeUnit calc_time_unit,Error ** errp)848 struct DirtyRateInfo *qmp_query_dirty_rate(bool has_calc_time_unit,
849                                            TimeUnit calc_time_unit,
850                                            Error **errp)
851 {
852     return query_dirty_rate_info(
853         has_calc_time_unit ? calc_time_unit : TIME_UNIT_SECOND);
854 }
855 
hmp_info_dirty_rate(Monitor * mon,const QDict * qdict)856 void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict)
857 {
858     DirtyRateInfo *info = query_dirty_rate_info(TIME_UNIT_SECOND);
859 
860     monitor_printf(mon, "Status: %s\n",
861                    DirtyRateStatus_str(info->status));
862     monitor_printf(mon, "Start Time: %"PRIi64" (ms)\n",
863                    info->start_time);
864     if (info->mode == DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING) {
865         monitor_printf(mon, "Sample Pages: %"PRIu64" (per GB)\n",
866                        info->sample_pages);
867     }
868     monitor_printf(mon, "Period: %"PRIi64" (sec)\n",
869                    info->calc_time);
870     monitor_printf(mon, "Mode: %s\n",
871                    DirtyRateMeasureMode_str(info->mode));
872     monitor_printf(mon, "Dirty rate: ");
873     if (info->has_dirty_rate) {
874         monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate);
875         if (info->has_vcpu_dirty_rate) {
876             DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate;
877             for (rate = head; rate != NULL; rate = rate->next) {
878                 monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64
879                                " (MB/s)\n", rate->value->id,
880                                rate->value->dirty_rate);
881             }
882         }
883     } else {
884         monitor_printf(mon, "(not ready)\n");
885     }
886 
887     qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate);
888     g_free(info);
889 }
890 
hmp_calc_dirty_rate(Monitor * mon,const QDict * qdict)891 void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict)
892 {
893     int64_t sec = qdict_get_try_int(qdict, "second", 0);
894     int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1);
895     bool has_sample_pages = (sample_pages != -1);
896     bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false);
897     bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false);
898     DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
899     Error *err = NULL;
900 
901     if (!sec) {
902         monitor_printf(mon, "Incorrect period length specified!\n");
903         return;
904     }
905 
906     if (dirty_ring && dirty_bitmap) {
907         monitor_printf(mon, "Either dirty ring or dirty bitmap "
908                        "can be specified!\n");
909         return;
910     }
911 
912     if (dirty_bitmap) {
913         mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP;
914     } else if (dirty_ring) {
915         mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING;
916     }
917 
918     qmp_calc_dirty_rate(sec, /* calc-time */
919                         false, TIME_UNIT_SECOND, /* calc-time-unit */
920                         has_sample_pages, sample_pages,
921                         true, mode,
922                         &err);
923     if (err) {
924         hmp_handle_error(mon, err);
925         return;
926     }
927 
928     monitor_printf(mon, "Starting dirty rate measurement with period %"PRIi64
929                    " seconds\n", sec);
930     monitor_printf(mon, "[Please use 'info dirty_rate' to check results]\n");
931 }
932