xref: /qemu/migration/postcopy-ram.c (revision 99dbfd1d)
1 /*
2  * Postcopy migration for RAM
3  *
4  * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Dave Gilbert  <dgilbert@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /*
15  * Postcopy is a migration technique where the execution flips from the
16  * source to the destination before all the data has been copied.
17  */
18 
19 #include "qemu/osdep.h"
20 
21 #include "qemu-common.h"
22 #include "migration/migration.h"
23 #include "migration/postcopy-ram.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/balloon.h"
26 #include "qemu/error-report.h"
27 #include "trace.h"
28 
29 /* Arbitrary limit on size of each discard command,
30  * keeps them around ~200 bytes
31  */
32 #define MAX_DISCARDS_PER_COMMAND 12
33 
34 struct PostcopyDiscardState {
35     const char *ramblock_name;
36     uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */
37     uint16_t cur_entry;
38     /*
39      * Start and length of a discard range (bytes)
40      */
41     uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
42     uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
43     unsigned int nsentwords;
44     unsigned int nsentcmds;
45 };
46 
47 /* Postcopy needs to detect accesses to pages that haven't yet been copied
48  * across, and efficiently map new pages in, the techniques for doing this
49  * are target OS specific.
50  */
51 #if defined(__linux__)
52 
53 #include <poll.h>
54 #include <sys/ioctl.h>
55 #include <sys/syscall.h>
56 #include <asm/types.h> /* for __u64 */
57 #endif
58 
59 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
60 #include <sys/eventfd.h>
61 #include <linux/userfaultfd.h>
62 
63 static bool ufd_version_check(int ufd)
64 {
65     struct uffdio_api api_struct;
66     uint64_t ioctl_mask;
67 
68     api_struct.api = UFFD_API;
69     api_struct.features = 0;
70     if (ioctl(ufd, UFFDIO_API, &api_struct)) {
71         error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s",
72                      strerror(errno));
73         return false;
74     }
75 
76     ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
77                  (__u64)1 << _UFFDIO_UNREGISTER;
78     if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
79         error_report("Missing userfault features: %" PRIx64,
80                      (uint64_t)(~api_struct.ioctls & ioctl_mask));
81         return false;
82     }
83 
84     if (getpagesize() != ram_pagesize_summary()) {
85         bool have_hp = false;
86         /* We've got a huge page */
87 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
88         have_hp = api_struct.features & UFFD_FEATURE_MISSING_HUGETLBFS;
89 #endif
90         if (!have_hp) {
91             error_report("Userfault on this host does not support huge pages");
92             return false;
93         }
94     }
95     return true;
96 }
97 
98 /*
99  * Note: This has the side effect of munlock'ing all of RAM, that's
100  * normally fine since if the postcopy succeeds it gets turned back on at the
101  * end.
102  */
103 bool postcopy_ram_supported_by_host(void)
104 {
105     long pagesize = getpagesize();
106     int ufd = -1;
107     bool ret = false; /* Error unless we change it */
108     void *testarea = NULL;
109     struct uffdio_register reg_struct;
110     struct uffdio_range range_struct;
111     uint64_t feature_mask;
112 
113     if ((1ul << qemu_target_page_bits()) > pagesize) {
114         error_report("Target page size bigger than host page size");
115         goto out;
116     }
117 
118     ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
119     if (ufd == -1) {
120         error_report("%s: userfaultfd not available: %s", __func__,
121                      strerror(errno));
122         goto out;
123     }
124 
125     /* Version and features check */
126     if (!ufd_version_check(ufd)) {
127         goto out;
128     }
129 
130     /*
131      * userfault and mlock don't go together; we'll put it back later if
132      * it was enabled.
133      */
134     if (munlockall()) {
135         error_report("%s: munlockall: %s", __func__,  strerror(errno));
136         return -1;
137     }
138 
139     /*
140      *  We need to check that the ops we need are supported on anon memory
141      *  To do that we need to register a chunk and see the flags that
142      *  are returned.
143      */
144     testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
145                                     MAP_ANONYMOUS, -1, 0);
146     if (testarea == MAP_FAILED) {
147         error_report("%s: Failed to map test area: %s", __func__,
148                      strerror(errno));
149         goto out;
150     }
151     g_assert(((size_t)testarea & (pagesize-1)) == 0);
152 
153     reg_struct.range.start = (uintptr_t)testarea;
154     reg_struct.range.len = pagesize;
155     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
156 
157     if (ioctl(ufd, UFFDIO_REGISTER, &reg_struct)) {
158         error_report("%s userfault register: %s", __func__, strerror(errno));
159         goto out;
160     }
161 
162     range_struct.start = (uintptr_t)testarea;
163     range_struct.len = pagesize;
164     if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
165         error_report("%s userfault unregister: %s", __func__, strerror(errno));
166         goto out;
167     }
168 
169     feature_mask = (__u64)1 << _UFFDIO_WAKE |
170                    (__u64)1 << _UFFDIO_COPY |
171                    (__u64)1 << _UFFDIO_ZEROPAGE;
172     if ((reg_struct.ioctls & feature_mask) != feature_mask) {
173         error_report("Missing userfault map features: %" PRIx64,
174                      (uint64_t)(~reg_struct.ioctls & feature_mask));
175         goto out;
176     }
177 
178     /* Success! */
179     ret = true;
180 out:
181     if (testarea) {
182         munmap(testarea, pagesize);
183     }
184     if (ufd != -1) {
185         close(ufd);
186     }
187     return ret;
188 }
189 
190 /*
191  * Setup an area of RAM so that it *can* be used for postcopy later; this
192  * must be done right at the start prior to pre-copy.
193  * opaque should be the MIS.
194  */
195 static int init_range(const char *block_name, void *host_addr,
196                       ram_addr_t offset, ram_addr_t length, void *opaque)
197 {
198     MigrationIncomingState *mis = opaque;
199 
200     trace_postcopy_init_range(block_name, host_addr, offset, length);
201 
202     /*
203      * We need the whole of RAM to be truly empty for postcopy, so things
204      * like ROMs and any data tables built during init must be zero'd
205      * - we're going to get the copy from the source anyway.
206      * (Precopy will just overwrite this data, so doesn't need the discard)
207      */
208     if (ram_discard_range(mis, block_name, 0, length)) {
209         return -1;
210     }
211 
212     return 0;
213 }
214 
215 /*
216  * At the end of migration, undo the effects of init_range
217  * opaque should be the MIS.
218  */
219 static int cleanup_range(const char *block_name, void *host_addr,
220                         ram_addr_t offset, ram_addr_t length, void *opaque)
221 {
222     MigrationIncomingState *mis = opaque;
223     struct uffdio_range range_struct;
224     trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
225 
226     /*
227      * We turned off hugepage for the precopy stage with postcopy enabled
228      * we can turn it back on now.
229      */
230     qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
231 
232     /*
233      * We can also turn off userfault now since we should have all the
234      * pages.   It can be useful to leave it on to debug postcopy
235      * if you're not sure it's always getting every page.
236      */
237     range_struct.start = (uintptr_t)host_addr;
238     range_struct.len = length;
239 
240     if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
241         error_report("%s: userfault unregister %s", __func__, strerror(errno));
242 
243         return -1;
244     }
245 
246     return 0;
247 }
248 
249 /*
250  * Initialise postcopy-ram, setting the RAM to a state where we can go into
251  * postcopy later; must be called prior to any precopy.
252  * called from arch_init's similarly named ram_postcopy_incoming_init
253  */
254 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
255 {
256     if (qemu_ram_foreach_block(init_range, mis)) {
257         return -1;
258     }
259 
260     return 0;
261 }
262 
263 /*
264  * At the end of a migration where postcopy_ram_incoming_init was called.
265  */
266 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
267 {
268     trace_postcopy_ram_incoming_cleanup_entry();
269 
270     if (mis->have_fault_thread) {
271         uint64_t tmp64;
272 
273         if (qemu_ram_foreach_block(cleanup_range, mis)) {
274             return -1;
275         }
276         /*
277          * Tell the fault_thread to exit, it's an eventfd that should
278          * currently be at 0, we're going to increment it to 1
279          */
280         tmp64 = 1;
281         if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) {
282             trace_postcopy_ram_incoming_cleanup_join();
283             qemu_thread_join(&mis->fault_thread);
284         } else {
285             /* Not much we can do here, but may as well report it */
286             error_report("%s: incrementing userfault_quit_fd: %s", __func__,
287                          strerror(errno));
288         }
289         trace_postcopy_ram_incoming_cleanup_closeuf();
290         close(mis->userfault_fd);
291         close(mis->userfault_quit_fd);
292         mis->have_fault_thread = false;
293     }
294 
295     qemu_balloon_inhibit(false);
296 
297     if (enable_mlock) {
298         if (os_mlock() < 0) {
299             error_report("mlock: %s", strerror(errno));
300             /*
301              * It doesn't feel right to fail at this point, we have a valid
302              * VM state.
303              */
304         }
305     }
306 
307     postcopy_state_set(POSTCOPY_INCOMING_END);
308     migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
309 
310     if (mis->postcopy_tmp_page) {
311         munmap(mis->postcopy_tmp_page, mis->largest_page_size);
312         mis->postcopy_tmp_page = NULL;
313     }
314     if (mis->postcopy_tmp_zero_page) {
315         munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
316         mis->postcopy_tmp_zero_page = NULL;
317     }
318     trace_postcopy_ram_incoming_cleanup_exit();
319     return 0;
320 }
321 
322 /*
323  * Disable huge pages on an area
324  */
325 static int nhp_range(const char *block_name, void *host_addr,
326                     ram_addr_t offset, ram_addr_t length, void *opaque)
327 {
328     trace_postcopy_nhp_range(block_name, host_addr, offset, length);
329 
330     /*
331      * Before we do discards we need to ensure those discards really
332      * do delete areas of the page, even if THP thinks a hugepage would
333      * be a good idea, so force hugepages off.
334      */
335     qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
336 
337     return 0;
338 }
339 
340 /*
341  * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
342  * however leaving it until after precopy means that most of the precopy
343  * data is still THPd
344  */
345 int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
346 {
347     if (qemu_ram_foreach_block(nhp_range, mis)) {
348         return -1;
349     }
350 
351     postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
352 
353     return 0;
354 }
355 
356 /*
357  * Mark the given area of RAM as requiring notification to unwritten areas
358  * Used as a  callback on qemu_ram_foreach_block.
359  *   host_addr: Base of area to mark
360  *   offset: Offset in the whole ram arena
361  *   length: Length of the section
362  *   opaque: MigrationIncomingState pointer
363  * Returns 0 on success
364  */
365 static int ram_block_enable_notify(const char *block_name, void *host_addr,
366                                    ram_addr_t offset, ram_addr_t length,
367                                    void *opaque)
368 {
369     MigrationIncomingState *mis = opaque;
370     struct uffdio_register reg_struct;
371 
372     reg_struct.range.start = (uintptr_t)host_addr;
373     reg_struct.range.len = length;
374     reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
375 
376     /* Now tell our userfault_fd that it's responsible for this area */
377     if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, &reg_struct)) {
378         error_report("%s userfault register: %s", __func__, strerror(errno));
379         return -1;
380     }
381     if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
382         error_report("%s userfault: Region doesn't support COPY", __func__);
383         return -1;
384     }
385 
386     return 0;
387 }
388 
389 /*
390  * Handle faults detected by the USERFAULT markings
391  */
392 static void *postcopy_ram_fault_thread(void *opaque)
393 {
394     MigrationIncomingState *mis = opaque;
395     struct uffd_msg msg;
396     int ret;
397     RAMBlock *rb = NULL;
398     RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */
399 
400     trace_postcopy_ram_fault_thread_entry();
401     qemu_sem_post(&mis->fault_thread_sem);
402 
403     while (true) {
404         ram_addr_t rb_offset;
405         struct pollfd pfd[2];
406 
407         /*
408          * We're mainly waiting for the kernel to give us a faulting HVA,
409          * however we can be told to quit via userfault_quit_fd which is
410          * an eventfd
411          */
412         pfd[0].fd = mis->userfault_fd;
413         pfd[0].events = POLLIN;
414         pfd[0].revents = 0;
415         pfd[1].fd = mis->userfault_quit_fd;
416         pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
417         pfd[1].revents = 0;
418 
419         if (poll(pfd, 2, -1 /* Wait forever */) == -1) {
420             error_report("%s: userfault poll: %s", __func__, strerror(errno));
421             break;
422         }
423 
424         if (pfd[1].revents) {
425             trace_postcopy_ram_fault_thread_quit();
426             break;
427         }
428 
429         ret = read(mis->userfault_fd, &msg, sizeof(msg));
430         if (ret != sizeof(msg)) {
431             if (errno == EAGAIN) {
432                 /*
433                  * if a wake up happens on the other thread just after
434                  * the poll, there is nothing to read.
435                  */
436                 continue;
437             }
438             if (ret < 0) {
439                 error_report("%s: Failed to read full userfault message: %s",
440                              __func__, strerror(errno));
441                 break;
442             } else {
443                 error_report("%s: Read %d bytes from userfaultfd expected %zd",
444                              __func__, ret, sizeof(msg));
445                 break; /* Lost alignment, don't know what we'd read next */
446             }
447         }
448         if (msg.event != UFFD_EVENT_PAGEFAULT) {
449             error_report("%s: Read unexpected event %ud from userfaultfd",
450                          __func__, msg.event);
451             continue; /* It's not a page fault, shouldn't happen */
452         }
453 
454         rb = qemu_ram_block_from_host(
455                  (void *)(uintptr_t)msg.arg.pagefault.address,
456                  true, &rb_offset);
457         if (!rb) {
458             error_report("postcopy_ram_fault_thread: Fault outside guest: %"
459                          PRIx64, (uint64_t)msg.arg.pagefault.address);
460             break;
461         }
462 
463         rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
464         trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
465                                                 qemu_ram_get_idstr(rb),
466                                                 rb_offset);
467 
468         /*
469          * Send the request to the source - we want to request one
470          * of our host page sizes (which is >= TPS)
471          */
472         if (rb != last_rb) {
473             last_rb = rb;
474             migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
475                                      rb_offset, qemu_ram_pagesize(rb));
476         } else {
477             /* Save some space */
478             migrate_send_rp_req_pages(mis, NULL,
479                                      rb_offset, qemu_ram_pagesize(rb));
480         }
481     }
482     trace_postcopy_ram_fault_thread_exit();
483     return NULL;
484 }
485 
486 int postcopy_ram_enable_notify(MigrationIncomingState *mis)
487 {
488     /* Open the fd for the kernel to give us userfaults */
489     mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
490     if (mis->userfault_fd == -1) {
491         error_report("%s: Failed to open userfault fd: %s", __func__,
492                      strerror(errno));
493         return -1;
494     }
495 
496     /*
497      * Although the host check already tested the API, we need to
498      * do the check again as an ABI handshake on the new fd.
499      */
500     if (!ufd_version_check(mis->userfault_fd)) {
501         return -1;
502     }
503 
504     /* Now an eventfd we use to tell the fault-thread to quit */
505     mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC);
506     if (mis->userfault_quit_fd == -1) {
507         error_report("%s: Opening userfault_quit_fd: %s", __func__,
508                      strerror(errno));
509         close(mis->userfault_fd);
510         return -1;
511     }
512 
513     qemu_sem_init(&mis->fault_thread_sem, 0);
514     qemu_thread_create(&mis->fault_thread, "postcopy/fault",
515                        postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE);
516     qemu_sem_wait(&mis->fault_thread_sem);
517     qemu_sem_destroy(&mis->fault_thread_sem);
518     mis->have_fault_thread = true;
519 
520     /* Mark so that we get notified of accesses to unwritten areas */
521     if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
522         return -1;
523     }
524 
525     /*
526      * Ballooning can mark pages as absent while we're postcopying
527      * that would cause false userfaults.
528      */
529     qemu_balloon_inhibit(true);
530 
531     trace_postcopy_ram_enable_notify();
532 
533     return 0;
534 }
535 
536 /*
537  * Place a host page (from) at (host) atomically
538  * returns 0 on success
539  */
540 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
541                         size_t pagesize)
542 {
543     struct uffdio_copy copy_struct;
544 
545     copy_struct.dst = (uint64_t)(uintptr_t)host;
546     copy_struct.src = (uint64_t)(uintptr_t)from;
547     copy_struct.len = pagesize;
548     copy_struct.mode = 0;
549 
550     /* copy also acks to the kernel waking the stalled thread up
551      * TODO: We can inhibit that ack and only do it if it was requested
552      * which would be slightly cheaper, but we'd have to be careful
553      * of the order of updating our page state.
554      */
555     if (ioctl(mis->userfault_fd, UFFDIO_COPY, &copy_struct)) {
556         int e = errno;
557         error_report("%s: %s copy host: %p from: %p (size: %zd)",
558                      __func__, strerror(e), host, from, pagesize);
559 
560         return -e;
561     }
562 
563     trace_postcopy_place_page(host);
564     return 0;
565 }
566 
567 /*
568  * Place a zero page at (host) atomically
569  * returns 0 on success
570  */
571 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
572                              size_t pagesize)
573 {
574     trace_postcopy_place_page_zero(host);
575 
576     if (pagesize == getpagesize()) {
577         struct uffdio_zeropage zero_struct;
578         zero_struct.range.start = (uint64_t)(uintptr_t)host;
579         zero_struct.range.len = getpagesize();
580         zero_struct.mode = 0;
581 
582         if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) {
583             int e = errno;
584             error_report("%s: %s zero host: %p",
585                          __func__, strerror(e), host);
586 
587             return -e;
588         }
589     } else {
590         /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */
591         if (!mis->postcopy_tmp_zero_page) {
592             mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
593                                                PROT_READ | PROT_WRITE,
594                                                MAP_PRIVATE | MAP_ANONYMOUS,
595                                                -1, 0);
596             if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
597                 int e = errno;
598                 mis->postcopy_tmp_zero_page = NULL;
599                 error_report("%s: %s mapping large zero page",
600                              __func__, strerror(e));
601                 return -e;
602             }
603             memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
604         }
605         return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page,
606                                    pagesize);
607     }
608 
609     return 0;
610 }
611 
612 /*
613  * Returns a target page of memory that can be mapped at a later point in time
614  * using postcopy_place_page
615  * The same address is used repeatedly, postcopy_place_page just takes the
616  * backing page away.
617  * Returns: Pointer to allocated page
618  *
619  */
620 void *postcopy_get_tmp_page(MigrationIncomingState *mis)
621 {
622     if (!mis->postcopy_tmp_page) {
623         mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
624                              PROT_READ | PROT_WRITE, MAP_PRIVATE |
625                              MAP_ANONYMOUS, -1, 0);
626         if (mis->postcopy_tmp_page == MAP_FAILED) {
627             mis->postcopy_tmp_page = NULL;
628             error_report("%s: %s", __func__, strerror(errno));
629             return NULL;
630         }
631     }
632 
633     return mis->postcopy_tmp_page;
634 }
635 
636 #else
637 /* No target OS support, stubs just fail */
638 bool postcopy_ram_supported_by_host(void)
639 {
640     error_report("%s: No OS support", __func__);
641     return false;
642 }
643 
644 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
645 {
646     error_report("postcopy_ram_incoming_init: No OS support");
647     return -1;
648 }
649 
650 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
651 {
652     assert(0);
653     return -1;
654 }
655 
656 int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
657 {
658     assert(0);
659     return -1;
660 }
661 
662 int postcopy_ram_enable_notify(MigrationIncomingState *mis)
663 {
664     assert(0);
665     return -1;
666 }
667 
668 int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
669                         size_t pagesize)
670 {
671     assert(0);
672     return -1;
673 }
674 
675 int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
676                         size_t pagesize)
677 {
678     assert(0);
679     return -1;
680 }
681 
682 void *postcopy_get_tmp_page(MigrationIncomingState *mis)
683 {
684     assert(0);
685     return NULL;
686 }
687 
688 #endif
689 
690 /* ------------------------------------------------------------------------- */
691 
692 /**
693  * postcopy_discard_send_init: Called at the start of each RAMBlock before
694  *   asking to discard individual ranges.
695  *
696  * @ms: The current migration state.
697  * @offset: the bitmap offset of the named RAMBlock in the migration
698  *   bitmap.
699  * @name: RAMBlock that discards will operate on.
700  *
701  * returns: a new PDS.
702  */
703 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
704                                                  unsigned long offset,
705                                                  const char *name)
706 {
707     PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
708 
709     if (res) {
710         res->ramblock_name = name;
711         res->offset = offset;
712     }
713 
714     return res;
715 }
716 
717 /**
718  * postcopy_discard_send_range: Called by the bitmap code for each chunk to
719  *   discard. May send a discard message, may just leave it queued to
720  *   be sent later.
721  *
722  * @ms: Current migration state.
723  * @pds: Structure initialised by postcopy_discard_send_init().
724  * @start,@length: a range of pages in the migration bitmap in the
725  *   RAM block passed to postcopy_discard_send_init() (length=1 is one page)
726  */
727 void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
728                                 unsigned long start, unsigned long length)
729 {
730     size_t tp_bits = qemu_target_page_bits();
731     /* Convert to byte offsets within the RAM block */
732     pds->start_list[pds->cur_entry] = (start - pds->offset) << tp_bits;
733     pds->length_list[pds->cur_entry] = length << tp_bits;
734     trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
735     pds->cur_entry++;
736     pds->nsentwords++;
737 
738     if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) {
739         /* Full set, ship it! */
740         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
741                                               pds->ramblock_name,
742                                               pds->cur_entry,
743                                               pds->start_list,
744                                               pds->length_list);
745         pds->nsentcmds++;
746         pds->cur_entry = 0;
747     }
748 }
749 
750 /**
751  * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
752  * bitmap code. Sends any outstanding discard messages, frees the PDS
753  *
754  * @ms: Current migration state.
755  * @pds: Structure initialised by postcopy_discard_send_init().
756  */
757 void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds)
758 {
759     /* Anything unsent? */
760     if (pds->cur_entry) {
761         qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
762                                               pds->ramblock_name,
763                                               pds->cur_entry,
764                                               pds->start_list,
765                                               pds->length_list);
766         pds->nsentcmds++;
767     }
768 
769     trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords,
770                                        pds->nsentcmds);
771 
772     g_free(pds);
773 }
774