1# Copyright (C) 2006, 2013, 2014 Red Hat, Inc.
2# Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com>
3#
4# This work is licensed under the GNU GPLv2 or later.
5# See the COPYING file in the top-level directory.
6
7import os
8import time
9import threading
10
11import libvirt
12
13from virtinst import DeviceConsole
14from virtinst import DeviceController
15from virtinst import DeviceDisk
16from virtinst import DomainSnapshot
17from virtinst import Guest
18from virtinst import log
19
20from .libvirtobject import vmmLibvirtObject
21from ..baseclass import vmmGObject
22from ..lib.libvirtenummap import LibvirtEnumMap
23from ..lib import testmock
24
25
26class _SENTINEL(object):
27    pass
28
29
30def start_job_progress_thread(vm, meter, progtext):
31    current_thread = threading.currentThread()
32
33    def jobinfo_cb():
34        while True:
35            time.sleep(.5)
36
37            if not current_thread.is_alive():
38                return
39
40            try:
41                jobinfo = vm.job_info()
42                data_total      = float(jobinfo[3])
43                # data_processed  = float(jobinfo[4])
44                data_remaining  = float(jobinfo[5])
45
46                # data_total is 0 if the job hasn't started yet
47                if not data_total:
48                    continue  # pragma: no cover
49
50                if not meter.started:
51                    meter.start(size=data_total,
52                                text=progtext)
53
54                progress = data_total - data_remaining
55                meter.update(progress)
56            except Exception:  # pragma: no cover
57                log.exception("Error calling jobinfo")
58                return
59
60    if vm.supports_domain_job_info():
61        t = threading.Thread(target=jobinfo_cb,
62                             name="job progress reporting",
63                             args=())
64        t.daemon = True
65        t.start()
66
67
68class _IPFetcher:
69    """
70    Helper class to contain all IP fetching and processing logic
71    """
72    def __init__(self):
73        self._cache = None
74
75    def refresh(self, vm, iface):
76        self._cache = {"qemuga": {}, "arp": {}}
77
78        if iface.type == "network":
79            net = vm.conn.get_net_by_name(iface.source)
80            if net:
81                net.get_dhcp_leases(refresh=True)
82
83        if not vm.is_active():
84            return
85
86        if vm.agent_ready():
87            self._cache["qemuga"] = vm.get_interface_addresses(
88                iface,
89                libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT)
90
91        arp_flag = getattr(libvirt,
92            "VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_ARP", 3)
93        self._cache["arp"] = vm.get_interface_addresses(iface, arp_flag)
94
95    def get(self, vm, iface):
96        if self._cache is None:
97            self.refresh(vm, iface)
98
99        qemuga = self._cache["qemuga"]
100        arp = self._cache["arp"]
101        leases = []
102        if iface.type == "network":
103            net = vm.conn.get_net_by_name(iface.source)
104            if net:
105                leases = net.get_dhcp_leases()
106
107        def extract_dom(addrs):
108            ipv4 = None
109            ipv6 = None
110            if addrs["hwaddr"] == iface.macaddr:
111                for addr in (addrs["addrs"] or []):
112                    if addr["type"] == 0:
113                        ipv4 = addr["addr"]
114                    elif (addr["type"] == 1 and
115                          not str(addr["addr"]).startswith("fe80")):
116                        ipv6 = addr["addr"] + "/" + str(addr["prefix"])
117            return ipv4, ipv6
118
119        def extract_lease(lease):
120            ipv4 = None
121            ipv6 = None
122            mac = lease["mac"]
123            if vm.conn.is_test():
124                # Hack it to match our interface for UI testing
125                mac = iface.macaddr
126            if mac == iface.macaddr:
127                if lease["type"] == 0:
128                    ipv4 = lease["ipaddr"]
129                elif lease["type"] == 1:
130                    ipv6 = lease["ipaddr"]
131            return ipv4, ipv6
132
133
134        for datalist in [list(qemuga.values()), leases, list(arp.values())]:
135            ipv4 = None
136            ipv6 = None
137            for data in datalist:
138                if "expirytime" in data:
139                    tmpipv4, tmpipv6 = extract_lease(data)
140                else:
141                    tmpipv4, tmpipv6 = extract_dom(data)
142                ipv4 = tmpipv4 or ipv4
143                ipv6 = tmpipv6 or ipv6
144            if ipv4 or ipv6:
145                return ipv4, ipv6
146        return None, None
147
148
149class vmmInspectionApplication(object):
150    def __init__(self):
151        self.name = None
152        self.display_name = None
153        self.epoch = None
154        self.version = None
155        self.release = None
156        self.summary = None
157        self.description = None
158
159
160class vmmInspectionData(object):
161    def __init__(self):
162        self.os_type = None
163        self.distro = None
164        self.major_version = None
165        self.minor_version = None
166        self.hostname = None
167        self.product_name = None
168        self.product_variant = None
169        self.icon = None
170        self.applications = None
171        self.errorstr = None
172        self.package_format = None
173
174
175class vmmDomainSnapshot(vmmLibvirtObject):
176    """
177    Class wrapping a virDomainSnapshot object
178    """
179    def __init__(self, conn, backend):
180        vmmLibvirtObject.__init__(self, conn, backend, backend.getName(),
181                                  DomainSnapshot)
182
183
184    ##########################
185    # Required class methods #
186    ##########################
187
188    def _conn_tick_poll_param(self):
189        return None  # pragma: no cover
190    def class_name(self):
191        return "snapshot"  # pragma: no cover
192
193    def _XMLDesc(self, flags):
194        return self._backend.getXMLDesc(flags=flags)
195    def _get_backend_status(self):
196        return self._STATUS_ACTIVE
197
198
199    ###########
200    # Actions #
201    ###########
202
203    def delete(self, force=True):
204        ignore = force
205        self._backend.delete()
206
207    def _state_str_to_int(self):
208        state = self.get_xmlobj().state
209        statemap = {
210            "nostate": libvirt.VIR_DOMAIN_NOSTATE,
211            "running": libvirt.VIR_DOMAIN_RUNNING,
212            "blocked": libvirt.VIR_DOMAIN_BLOCKED,
213            "paused": libvirt.VIR_DOMAIN_PAUSED,
214            "shutdown": libvirt.VIR_DOMAIN_SHUTDOWN,
215            "shutoff": libvirt.VIR_DOMAIN_SHUTOFF,
216            "crashed": libvirt.VIR_DOMAIN_CRASHED,
217            "pmsuspended": getattr(libvirt, "VIR_DOMAIN_PMSUSPENDED", 7)
218        }
219        return statemap.get(state, libvirt.VIR_DOMAIN_SHUTOFF)
220
221    def run_status(self):
222        status = self._state_str_to_int()
223        return LibvirtEnumMap.pretty_run_status(status, False)
224    def run_status_icon_name(self):
225        status = self._state_str_to_int()
226        if status not in LibvirtEnumMap.VM_STATUS_ICONS:  # pragma: no cover
227            log.debug("Unknown status %d, using NOSTATE", status)
228            status = libvirt.VIR_DOMAIN_NOSTATE
229        return LibvirtEnumMap.VM_STATUS_ICONS[status]
230    def is_running(self):
231        """
232        Captured state is a running domain.
233        """
234        return self._state_str_to_int() in [libvirt.VIR_DOMAIN_RUNNING]
235    def has_run_state(self):
236        """
237        Captured state contains run state in addition to disk state.
238        """
239        return self._state_str_to_int() in [libvirt.VIR_DOMAIN_RUNNING,
240                                            libvirt.VIR_DOMAIN_PAUSED]
241
242    def is_current(self):
243        return self._backend.isCurrent()
244    def is_external(self):
245        if self.get_xmlobj().memory_type == "external":
246            return True
247        for disk in self.get_xmlobj().disks:
248            if disk.snapshot == "external":
249                return True
250        return False
251
252
253class _vmmDomainSetTimeThread(vmmGObject):
254    """
255    A separate thread handling time setting operations as not to block the main
256    UI.
257    """
258    def __init__(self, domain):
259        vmmGObject.__init__(self)
260        self._domain = domain
261        self._do_cancel = threading.Event()
262        self._do_cancel.clear()
263        self._thread = None
264        self._maxwait = 30
265        self._sleep = 0.5
266
267    def start(self):
268        """
269        Start time setting thread if setting time is supported by the
270        connection. Stop the old thread first. May block until the old thread
271        terminates.
272        """
273        self.stop()
274
275        # Only run the API for qemu and test drivers, they are the only ones
276        # that support it. This will save spamming logs with error output.
277        if (not self._domain.conn.is_qemu() and
278            not self._domain.conn.is_test()):
279            return  # pragma: no cover
280
281        # For qemu, only run the API if the VM has the qemu guest agent in
282        # the XML.
283        if self._domain.conn.is_qemu() and not self._domain.has_agent():
284            return
285
286        log.debug("Starting time setting thread")
287        self._thread = threading.Thread(name='settime thread',
288                                        target=self._do_loop)
289        self._thread.start()
290
291    def stop(self):
292        """
293        Signal running thread to terminate and wait for it to do so.
294        """
295        if not self._thread:
296            return
297
298        log.debug("Stopping time setting thread")
299        self._do_cancel.set()
300        # thread may be in a loop waiting for an agent to come online or just
301        # waiting for a set time operation to finish
302        self._thread.join()
303        self._thread = None
304        self._do_cancel.clear()
305
306    def _wait_for_agent(self):
307        # Setting time of a qemu domain can only work if an agent is
308        # defined and online. We only get here if one is defined. So wait
309        # for it to come online now.
310        waited = 0
311        while waited < self._maxwait and not self._domain.agent_ready():
312            if waited == 0:
313                log.debug("Waiting for qemu guest agent to come online...")
314
315            # sleep some time and potentially abort
316            if self._do_cancel.wait(self._sleep):
317                return
318
319            waited += self._sleep
320
321        if not self._domain.agent_ready():  # pragma: no cover
322            log.debug("Giving up on qemu guest agent for time sync")
323            return
324
325    def _do_loop(self):
326        """
327        Run the domain's set time operation. Potentially wait for a guest agent
328        to come online beforehand.
329        """
330        if self._domain.conn.is_qemu():
331            self._wait_for_agent()
332        self._domain.set_time()
333
334    def _cleanup(self):
335        self.stop()
336
337
338class vmmDomain(vmmLibvirtObject):
339    """
340    Class wrapping virDomain libvirt objects. Is also extended to be
341    backed by a virtinst.Guest object for new VM 'customize before install'
342    """
343    __gsignals__ = {
344        "resources-sampled": (vmmLibvirtObject.RUN_FIRST, None, []),
345        "inspection-changed": (vmmLibvirtObject.RUN_FIRST, None, []),
346    }
347
348    def __init__(self, conn, backend, key):
349        vmmLibvirtObject.__init__(self, conn, backend, key, Guest)
350
351        self.cloning = False
352
353        self._install_abort = False
354        self._id = None
355        self._uuid = None
356        self._has_managed_save = None
357        self._snapshot_list = None
358        self._autostart = None
359        self._domain_caps = None
360        self._status_reason = None
361        self._ipfetcher = _IPFetcher()
362
363        self.managedsave_supported = False
364        self._domain_state_supported = False
365
366        self.inspection = vmmInspectionData()
367        self._set_time_thread = _vmmDomainSetTimeThread(self)
368
369    def _cleanup(self):
370        for snap in self._snapshot_list or []:
371            snap.cleanup()
372        self._snapshot_list = None
373        self._set_time_thread.cleanup()
374        self._set_time_thread = None
375        vmmLibvirtObject._cleanup(self)
376
377    def _init_libvirt_state(self):
378        self.managedsave_supported = self.conn.support.domain_managed_save(self._backend)
379        self._domain_state_supported = self.conn.support.domain_state(self._backend)
380
381        # Determine available XML flags (older libvirt versions will error
382        # out if passed SECURE_XML, INACTIVE_XML, etc)
383        (self._inactive_xml_flags,
384         self._active_xml_flags) = self.conn.get_dom_flags(self._backend)
385
386        # Prime caches
387        info = self._backend.info()
388        self._refresh_status(newstatus=info[0])
389        self.has_managed_save()
390        self.snapshots_supported()
391
392        if (self.get_name() == "Domain-0" and
393            self.get_uuid() == "00000000-0000-0000-0000-000000000000"):
394            # We don't want virt-manager to track Domain-0 since it
395            # doesn't work with our UI. Raising an error will ensures it
396            # is denylisted.
397            raise RuntimeError(  # pragma: no cover
398                    "Can't track Domain-0 as a vmmDomain")
399
400
401    ###########################
402    # Misc API getter methods #
403    ###########################
404
405    def reports_stats(self):
406        return True
407    def _using_events(self):
408        return self.conn.using_domain_events
409
410    def get_id(self):
411        if self._id is None:
412            self._id = self._backend.ID()
413        return self._id
414
415    def status(self):
416        return self._normalize_status(self._get_status())
417
418    def status_reason(self):
419        if self._status_reason is None:
420            self._status_reason = 1
421            if self._domain_state_supported:
422                self._status_reason = self._backend.state()[1]
423        return self._status_reason
424
425    # If manual shutdown or destroy specified, make sure we don't continue
426    # install process
427    def get_install_abort(self):
428        return bool(self._install_abort)
429
430    def has_spicevmc_type_redirdev(self):
431        devs = self.xmlobj.devices.redirdev
432        for dev in devs:
433            if dev.type == "spicevmc":
434                return True
435        return False
436
437    def has_nvram(self):
438        return bool(self.get_xmlobj().os.firmware == 'efi' or
439                    (self.get_xmlobj().os.loader_ro is True and
440                     self.get_xmlobj().os.loader_type == "pflash" and
441                     self.get_xmlobj().os.nvram))
442
443    def is_persistent(self):
444        return bool(self._backend.isPersistent())
445
446    ##################
447    # Support checks #
448    ##################
449
450    def supports_domain_job_info(self):
451        if self.conn.is_test():
452            # jobinfo isn't actually supported but this tests more code
453            return True
454        return self.conn.support.domain_job_info(self._backend)
455
456    def snapshots_supported(self):
457        if not self.conn.support.domain_list_snapshots(self._backend):
458            return _("Libvirt connection does not support snapshots.")
459
460        if self.list_snapshots():
461            return
462
463        # Check if our disks are all qcow2
464        seen_qcow2 = False
465        for disk in self.get_disk_devices_norefresh():
466            if disk.read_only:
467                continue
468            if disk.is_empty():
469                continue
470            if disk.driver_type == "qcow2":
471                seen_qcow2 = True
472                continue
473            return _("Snapshots are only supported if all writeable disks "
474                     "images allocated to the guest are qcow2 format.")
475        if not seen_qcow2:
476            return _("Snapshots require at least one writeable qcow2 disk "
477                     "image allocated to the guest.")
478
479    def get_domain_capabilities(self):
480        if not self._domain_caps:
481            self._domain_caps = self.get_xmlobj().lookup_domcaps()
482        return self._domain_caps
483
484
485    #############################
486    # Internal XML handling API #
487    #############################
488
489    def _invalidate_xml(self):
490        vmmLibvirtObject._invalidate_xml(self)
491        self._id = None
492        self._status_reason = None
493        self._has_managed_save = None
494
495    def _lookup_device_to_define(self, xmlobj, origdev, for_hotplug):
496        if for_hotplug:
497            return origdev
498
499        dev = xmlobj.find_device(origdev)
500        if dev:
501            return dev
502
503        # If we are removing multiple dev from an active VM, a double
504        # attempt may result in a lookup failure. If device is present
505        # in the active XML, assume all is good.
506        if self.get_xmlobj().find_device(origdev):  # pragma: no cover
507            log.debug("Device in active config but not inactive config.")
508            return
509
510        raise RuntimeError(  # pragma: no cover
511                _("Could not find specified device in the "
512                  "inactive VM configuration: %s") % repr(origdev))
513
514    def _process_device_define(self, editdev, xmlobj, do_hotplug):
515        if do_hotplug:
516            self.hotplug(device=editdev)
517        else:
518            self._redefine_xmlobj(xmlobj)
519
520    def _copy_nvram_file(self, new_name):
521        """
522        We need to do this copy magic because there is no Libvirt storage API
523        to rename storage volume.
524        """
525        from virtinst import Cloner
526        old_nvram = DeviceDisk(self.conn.get_backend())
527        old_nvram.set_source_path(self.get_xmlobj().os.nvram)
528
529        nvram_dir = os.path.dirname(old_nvram.get_source_path())
530        new_nvram_path = os.path.join(nvram_dir,
531                "%s_VARS.fd" % os.path.basename(new_name))
532
533        new_nvram = Cloner.build_clone_disk(
534                old_nvram, new_nvram_path, True, False)
535
536        new_nvram.build_storage(None)
537        return new_nvram, old_nvram
538
539
540    ##############################
541    # Persistent XML change APIs #
542    ##############################
543
544    def rename_domain(self, new_name):
545        Guest.validate_name(self.conn.get_backend(), str(new_name))
546
547        new_nvram = None
548        old_nvram = None
549        if self.has_nvram():
550            new_nvram, old_nvram = self._copy_nvram_file(new_name)
551
552        try:
553            self.define_name(new_name)
554        except Exception as error:
555            if new_nvram:
556                try:
557                    new_nvram.get_vol_object().delete(0)
558                except Exception as warn:  # pragma: no cover
559                    log.debug("rename failed and new nvram was not "
560                                  "removed: '%s'", warn)
561            raise error
562
563        if not new_nvram:
564            return
565
566        try:
567            old_nvram.get_vol_object().delete(0)
568        except Exception as warn:  # pragma: no cover
569            log.debug("old nvram file was not removed: '%s'", warn)
570
571        self.define_overview(nvram=new_nvram.get_source_path())
572
573    # Device Add/Remove
574    def add_device(self, devobj):
575        """
576        Redefine guest with appended device XML 'devxml'
577        """
578        xmlobj = self._make_xmlobj_to_define()
579        xmlobj.add_device(devobj)
580        self._redefine_xmlobj(xmlobj)
581
582    def remove_device(self, devobj):
583        """
584        Remove passed device from the inactive guest XML
585        """
586        # If serial and duplicate console are both present, they both need
587        # to be removed at the same time
588        con = None
589        if self.serial_is_console_dup(devobj):
590            con = self.xmlobj.devices.console[0]
591
592        xmlobj = self._make_xmlobj_to_define()
593        editdev = self._lookup_device_to_define(xmlobj, devobj, False)
594        if not editdev:
595            return  # pragma: no cover
596
597        if con:
598            rmcon = xmlobj.find_device(con)
599            if rmcon:
600                xmlobj.remove_device(rmcon)
601        xmlobj.remove_device(editdev)
602
603        self._redefine_xmlobj(xmlobj)
604
605    def replace_device_xml(self, devobj, newxml):
606        """
607        When device XML is editing from the XML editor window.
608        """
609        do_hotplug = False
610        devclass = devobj.__class__
611        newdev = devclass(devobj.conn, parsexml=newxml)
612
613        xmlobj = self._make_xmlobj_to_define()
614        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
615        if not editdev:
616            return  # pragma: no cover
617
618        xmlobj.devices.replace_child(editdev, newdev)
619        self._redefine_xmlobj(xmlobj)
620        return editdev, newdev
621
622
623    ##########################
624    # non-device XML editing #
625    ##########################
626
627    def define_cpu(self, vcpus=_SENTINEL,
628            model=_SENTINEL, secure=_SENTINEL, sockets=_SENTINEL,
629            cores=_SENTINEL, threads=_SENTINEL, clear_topology=_SENTINEL):
630        guest = self._make_xmlobj_to_define()
631
632        if vcpus != _SENTINEL:
633            guest.vcpus = int(vcpus)
634            guest.vcpu_current = int(vcpus)
635
636        if clear_topology is True:
637            guest.cpu.topology.clear()
638        elif sockets != _SENTINEL:
639            guest.cpu.topology.sockets = sockets
640            guest.cpu.topology.cores = cores
641            guest.cpu.topology.threads = threads
642
643        if secure != _SENTINEL or model != _SENTINEL:
644            guest.cpu.secure = secure
645            if model in guest.cpu.SPECIAL_MODES:
646                guest.cpu.set_special_mode(guest, model)
647            else:
648                guest.cpu.set_model(guest, model)
649        self._redefine_xmlobj(guest)
650
651    def define_memory(self, memory=_SENTINEL, maxmem=_SENTINEL):
652        guest = self._make_xmlobj_to_define()
653
654        if memory != _SENTINEL:
655            guest.currentMemory = int(memory)
656        if maxmem != _SENTINEL:
657            guest.memory = int(maxmem)
658        self._redefine_xmlobj(guest)
659
660    def define_overview(self, machine=_SENTINEL, description=_SENTINEL,
661            title=_SENTINEL, loader=_SENTINEL,
662            nvram=_SENTINEL):
663        guest = self._make_xmlobj_to_define()
664        if machine != _SENTINEL:
665            guest.os.machine = machine
666            self._domain_caps = None
667        if description != _SENTINEL:
668            guest.description = description or None
669        if title != _SENTINEL:
670            guest.title = title or None
671
672        if loader != _SENTINEL:
673            if loader is None:
674                # Implies seabios, aka the default, so clear everything
675                guest.os.loader = None
676                guest.os.loader_ro = None
677                guest.os.loader_type = None
678                guest.os.nvram = None
679                guest.os.nvram_template = None
680            else:
681                # Implies UEFI
682                guest.set_uefi_path(loader)
683                guest.disable_hyperv_for_uefi()
684
685        if nvram != _SENTINEL:
686            guest.os.nvram = nvram
687
688        self._redefine_xmlobj(guest)
689
690    def define_os(self, os_name=_SENTINEL):
691        guest = self._make_xmlobj_to_define()
692
693        if os_name != _SENTINEL:
694            guest.set_os_name(os_name)
695
696        self._redefine_xmlobj(guest)
697
698    def define_boot(self, boot_order=_SENTINEL, boot_menu=_SENTINEL,
699                    kernel=_SENTINEL, initrd=_SENTINEL, dtb=_SENTINEL,
700                    kernel_args=_SENTINEL, init=_SENTINEL, initargs=_SENTINEL):
701
702        guest = self._make_xmlobj_to_define()
703        if boot_order != _SENTINEL:
704            legacy = not self.can_use_device_boot_order()
705            guest.set_boot_order(boot_order, legacy=legacy)
706
707        if boot_menu != _SENTINEL:
708            guest.os.enable_bootmenu = bool(boot_menu)
709        if init != _SENTINEL:
710            guest.os.init = init
711            guest.os.set_initargs_string(initargs)
712
713        if kernel != _SENTINEL:
714            guest.os.kernel = kernel or None
715        if initrd != _SENTINEL:
716            guest.os.initrd = initrd or None
717        if dtb != _SENTINEL:
718            guest.os.dtb = dtb or None
719        if kernel_args != _SENTINEL:
720            guest.os.kernel_args = kernel_args or None
721
722        self._redefine_xmlobj(guest)
723
724
725    ######################
726    # Device XML editing #
727    ######################
728
729    def define_disk(self, devobj, do_hotplug,
730            path=_SENTINEL, readonly=_SENTINEL,
731            shareable=_SENTINEL, removable=_SENTINEL, cache=_SENTINEL,
732            discard=_SENTINEL, detect_zeroes=_SENTINEL, bus=_SENTINEL,
733            serial=_SENTINEL):
734        xmlobj = self._make_xmlobj_to_define()
735        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
736        if not editdev:
737            return  # pragma: no cover
738
739        if path != _SENTINEL:
740            editdev.set_source_path(path)
741            if not do_hotplug:
742                editdev.sync_path_props()
743
744        if readonly != _SENTINEL:
745            editdev.read_only = readonly
746        if shareable != _SENTINEL:
747            editdev.shareable = shareable
748        if removable != _SENTINEL:
749            editdev.removable = removable
750
751        if serial != _SENTINEL:
752            editdev.serial = serial or None
753        if cache != _SENTINEL:
754            editdev.driver_cache = cache or None
755        if discard != _SENTINEL:
756            editdev.driver_discard = discard or None
757        if detect_zeroes != _SENTINEL:
758            editdev.driver_detect_zeroes = detect_zeroes or None
759
760        if bus != _SENTINEL:
761            editdev.change_bus(self.xmlobj, bus)
762
763        self._process_device_define(editdev, xmlobj, do_hotplug)
764
765    def define_network(self, devobj, do_hotplug,
766            ntype=_SENTINEL, source=_SENTINEL,
767            mode=_SENTINEL, model=_SENTINEL,
768            macaddr=_SENTINEL, linkstate=_SENTINEL):
769        xmlobj = self._make_xmlobj_to_define()
770        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
771        if not editdev:
772            return  # pragma: no cover
773
774        if ntype != _SENTINEL:
775            editdev.source = None
776
777            editdev.type = ntype
778            editdev.source = source
779            editdev.source_mode = mode or None
780
781        if model != _SENTINEL:
782            if editdev.model != model:
783                editdev.address.clear()
784            editdev.model = model
785
786        if macaddr != _SENTINEL:
787            editdev.macaddr = macaddr
788
789        if linkstate != _SENTINEL:
790            editdev.link_state = "up" if linkstate else "down"
791
792        self._process_device_define(editdev, xmlobj, do_hotplug)
793
794    def define_graphics(self, devobj, do_hotplug,
795            listen=_SENTINEL, port=_SENTINEL,
796            passwd=_SENTINEL, gtype=_SENTINEL,
797            gl=_SENTINEL, rendernode=_SENTINEL):
798        xmlobj = self._make_xmlobj_to_define()
799        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
800        if not editdev:
801            return  # pragma: no cover
802
803        if listen != _SENTINEL:
804            editdev.listen = listen
805        if port != _SENTINEL:
806            editdev.port = port
807        if passwd != _SENTINEL:
808            editdev.passwd = passwd
809        if gtype != _SENTINEL:
810            editdev.type = gtype
811        if gl != _SENTINEL:
812            editdev.gl = gl
813        if rendernode != _SENTINEL:
814            editdev.rendernode = rendernode
815
816        self._process_device_define(editdev, xmlobj, do_hotplug)
817
818    def define_sound(self, devobj, do_hotplug, model=_SENTINEL):
819        xmlobj = self._make_xmlobj_to_define()
820        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
821        if not editdev:
822            return  # pragma: no cover
823
824        if model != _SENTINEL:
825            if editdev.model != model:
826                editdev.address.clear()
827            editdev.model = model
828
829        self._process_device_define(editdev, xmlobj, do_hotplug)
830
831    def define_video(self, devobj, do_hotplug, model=_SENTINEL, accel3d=_SENTINEL):
832        xmlobj = self._make_xmlobj_to_define()
833        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
834        if not editdev:
835            return  # pragma: no cover
836
837        if model != _SENTINEL and model != editdev.model:
838            editdev.model = model
839            editdev.address.clear()
840
841            # Clear out heads/ram values so they reset to default. If
842            # we ever allow editing these values in the UI we should
843            # drop this
844            editdev.vram = None
845            editdev.heads = None
846            editdev.ram = None
847            editdev.vgamem = None
848            editdev.accel3d = None
849
850        if accel3d != _SENTINEL:
851            editdev.accel3d = accel3d
852
853        self._process_device_define(editdev, xmlobj, do_hotplug)
854
855    def define_watchdog(self, devobj, do_hotplug,
856            model=_SENTINEL, action=_SENTINEL):
857        xmlobj = self._make_xmlobj_to_define()
858        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
859        if not editdev:
860            return  # pragma: no cover
861
862        if model != _SENTINEL:
863            if editdev.model != model:
864                editdev.address.clear()
865            editdev.model = model
866
867        if action != _SENTINEL:
868            editdev.action = action
869
870        self._process_device_define(editdev, xmlobj, do_hotplug)
871
872    def define_smartcard(self, devobj, do_hotplug, model=_SENTINEL):
873        xmlobj = self._make_xmlobj_to_define()
874        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
875        if not editdev:
876            return  # pragma: no cover
877
878        if model != _SENTINEL:
879            editdev.mode = model
880            editdev.type = None
881            editdev.type = editdev.default_type()
882
883        self._process_device_define(editdev, xmlobj, do_hotplug)
884
885    def define_controller(self, devobj, do_hotplug, model=_SENTINEL):
886        xmlobj = self._make_xmlobj_to_define()
887        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
888        if not editdev:
889            return  # pragma: no cover
890
891        def _change_model():
892            if editdev.type == "usb":
893                ctrls = xmlobj.devices.controller
894                ctrls = [x for x in ctrls if (x.type ==
895                         DeviceController.TYPE_USB)]
896                for dev in ctrls:
897                    xmlobj.remove_device(dev)
898
899                if model == "ich9-ehci1":
900                    for dev in DeviceController.get_usb2_controllers(
901                            xmlobj.conn):
902                        xmlobj.add_device(dev)
903                elif model == "usb3":
904                    dev = DeviceController.get_usb3_controller(
905                        xmlobj.conn, xmlobj)
906                    xmlobj.add_device(dev)
907                else:
908                    dev = DeviceController(xmlobj.conn)
909                    dev.type = "usb"
910                    dev.model = model
911                    xmlobj.add_device(dev)
912
913            else:
914                editdev.model = model
915                editdev.address.clear()
916                self.hotplug(device=editdev)
917
918        if model != _SENTINEL:
919            _change_model()
920
921        self._process_device_define(editdev, xmlobj, do_hotplug)
922
923    def define_filesystem(self, devobj, do_hotplug, newdev=_SENTINEL):
924        xmlobj = self._make_xmlobj_to_define()
925        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
926        if not editdev:
927            return  # pragma: no cover
928
929        if newdev != _SENTINEL:
930            # pylint: disable=maybe-no-member
931            editdev.type = newdev.type
932            editdev.accessmode = newdev.accessmode
933            editdev.driver_type = newdev.driver_type
934            editdev.driver_format = newdev.driver_format
935            editdev.readonly = newdev.readonly
936            editdev.source_units = newdev.source_units
937            editdev.source = newdev.source
938            editdev.target = newdev.target
939
940        self._process_device_define(editdev, xmlobj, do_hotplug)
941
942
943    def define_hostdev(self, devobj, do_hotplug, rom_bar=_SENTINEL):
944        xmlobj = self._make_xmlobj_to_define()
945        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
946        if not editdev:
947            return  # pragma: no cover
948
949        if rom_bar != _SENTINEL:
950            editdev.rom_bar = rom_bar
951
952        self._process_device_define(editdev, xmlobj, do_hotplug)
953
954    def define_tpm(self, devobj, do_hotplug, model=_SENTINEL):
955        xmlobj = self._make_xmlobj_to_define()
956        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
957        if not editdev:
958            return  # pragma: no cover
959
960        if model != _SENTINEL:
961            editdev.model = model
962
963        self._process_device_define(editdev, xmlobj, do_hotplug)
964
965    def define_vsock(self, devobj, do_hotplug,
966            auto_cid=_SENTINEL, cid=_SENTINEL):
967        xmlobj = self._make_xmlobj_to_define()
968        editdev = self._lookup_device_to_define(xmlobj, devobj, do_hotplug)
969        if not editdev:
970            return  # pragma: no cover
971
972        if auto_cid != _SENTINEL:
973            editdev.auto_cid = auto_cid
974        if cid != _SENTINEL:
975            editdev.cid = cid
976
977        self._process_device_define(editdev, xmlobj, do_hotplug)
978
979
980    ####################
981    # Hotplug routines #
982    ####################
983
984    def attach_device(self, devobj):
985        """
986        Hotplug device to running guest
987        """
988        if not self.is_active():
989            return
990
991        devxml = devobj.get_xml()
992        log.debug("attach_device with xml=\n%s", devxml)
993        self._backend.attachDevice(devxml)
994
995    def detach_device(self, devobj):
996        """
997        Hotunplug device from running guest
998        """
999        if not self.is_active():
1000            return
1001
1002        devxml = devobj.get_xml()
1003        log.debug("detach_device with xml=\n%s", devxml)
1004        self._backend.detachDevice(devxml)
1005
1006    def _update_device(self, devobj, flags=None):
1007        if flags is None:
1008            flags = getattr(libvirt, "VIR_DOMAIN_DEVICE_MODIFY_LIVE", 1)
1009
1010        xml = devobj.get_xml()
1011        log.debug("update_device with xml=\n%s", xml)
1012        self._backend.updateDeviceFlags(xml, flags)
1013
1014    def hotplug(self, memory=_SENTINEL, maxmem=_SENTINEL,
1015            description=_SENTINEL, title=_SENTINEL, device=_SENTINEL):
1016        if not self.is_active():
1017            return
1018
1019        def _hotplug_memory(val):
1020            if val != self.xmlobj.currentMemory:
1021                self._backend.setMemory(val)
1022        def _hotplug_maxmem(val):
1023            if val != self.xmlobj.memory:
1024                self._backend.setMaxMemory(val)
1025
1026        def _hotplug_metadata(val, mtype):
1027            flags = (libvirt.VIR_DOMAIN_AFFECT_LIVE |
1028                     libvirt.VIR_DOMAIN_AFFECT_CONFIG)
1029            self._backend.setMetadata(mtype, val, None, None, flags)
1030
1031        if memory != _SENTINEL:
1032            log.debug("Hotplugging curmem=%s maxmem=%s for VM '%s'",
1033                         memory, maxmem, self.get_name())
1034
1035            actual_cur = self.xmlobj.currentMemory
1036            if maxmem < actual_cur:
1037                # Set current first to avoid error
1038                _hotplug_memory(memory)
1039                _hotplug_maxmem(maxmem)
1040            else:
1041                _hotplug_maxmem(maxmem)
1042                _hotplug_memory(memory)
1043
1044        if description != _SENTINEL:
1045            _hotplug_metadata(description,
1046                libvirt.VIR_DOMAIN_METADATA_DESCRIPTION)
1047        if title != _SENTINEL:
1048            _hotplug_metadata(title, libvirt.VIR_DOMAIN_METADATA_TITLE)
1049
1050        if device != _SENTINEL:
1051            self._update_device(device)
1052
1053
1054    ########################
1055    # Libvirt API wrappers #
1056    ########################
1057
1058    def _conn_tick_poll_param(self):
1059        return "pollvm"
1060    def class_name(self):
1061        return "domain"
1062
1063    def _define(self, xml):
1064        self.conn.define_domain(xml)
1065    def _XMLDesc(self, flags):
1066        return self._backend.XMLDesc(flags)
1067    def _get_backend_status(self):
1068        return self._backend.info()[0]
1069
1070    def get_autostart(self):
1071        if self._autostart is None:
1072            self._autostart = self._backend.autostart()
1073        return self._autostart
1074    def set_autostart(self, val):
1075        self._backend.setAutostart(val)
1076
1077        # Recache value
1078        self._autostart = None
1079        self.get_autostart()
1080
1081    def job_info(self):
1082        if self.conn.is_test():
1083            return testmock.fake_job_info()
1084        # It's tough to hit this via uitests because it depends
1085        # on the job lasting more than a second
1086        return self._backend.jobInfo()  # pragma: no cover
1087    def abort_job(self):
1088        self._backend.abortJob()
1089
1090    def open_console(self, devname, stream, flags=0):
1091        return self._backend.openConsole(devname, stream, flags)
1092
1093    def open_graphics_fd(self):
1094        flags = 0
1095        return self._backend.openGraphicsFD(0, flags)
1096
1097    def list_snapshots(self):
1098        if self._snapshot_list is None:
1099            newlist = []
1100            for rawsnap in self._backend.listAllSnapshots():
1101                obj = vmmDomainSnapshot(self.conn, rawsnap)
1102                obj.init_libvirt_state()
1103                newlist.append(obj)
1104            self._snapshot_list = newlist
1105        return self._snapshot_list[:]
1106
1107    @vmmLibvirtObject.lifecycle_action
1108    def revert_to_snapshot(self, snap):
1109        # no use trying to set the guest time if is going to be switched off
1110        # after reverting to the snapshot
1111        will_be_running = snap.is_running()
1112        self._backend.revertToSnapshot(snap.get_backend())
1113        # looking at the domain state after revert will always come back as
1114        # paused, so look at the snapshot state instead
1115        if will_be_running:
1116            self._async_set_time()
1117
1118    def create_snapshot(self, xml, redefine=False):
1119        flags = 0
1120        if redefine:
1121            flags = (flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE)
1122        else:
1123            log.debug("Creating snapshot flags=%s xml=\n%s", flags, xml)
1124        self._backend.snapshotCreateXML(xml, flags)
1125
1126    def _get_agent(self):
1127        """
1128        Return agent channel object if it is defined.
1129        """
1130        for dev in self.xmlobj.devices.channel:
1131            if (dev.type == "unix" and
1132                dev.target_name == dev.CHANNEL_NAME_QEMUGA):
1133                return dev
1134        return None
1135
1136    def has_agent(self):
1137        """
1138        Return True if domain has a guest agent defined.
1139        """
1140        return self._get_agent() is not None
1141
1142    def agent_ready(self):
1143        """
1144        Return connected state of an agent.
1145        """
1146        dev = self._get_agent()
1147        if not dev:
1148            return False
1149
1150        target_state = dev.target_state
1151        if self.conn.is_test():
1152            # test driver doesn't report 'connected' state so hack it here
1153            target_state = "connected"
1154        return target_state == "connected"
1155
1156    def refresh_snapshots(self):
1157        self._snapshot_list = None
1158
1159    def get_interface_addresses(self, iface, source):
1160        ret = {}
1161        log.debug("Calling interfaceAddresses source=%s", source)
1162        try:
1163            ret = self._backend.interfaceAddresses(source)
1164        except Exception as e:
1165            log.debug("interfaceAddresses failed: %s", str(e))
1166        if self.conn.is_test():
1167            ret = testmock.fake_interface_addresses(iface, source)
1168        return ret
1169
1170    def get_ips(self, iface):
1171        return self._ipfetcher.get(self, iface)
1172
1173    def refresh_ips(self, iface):
1174        return self._ipfetcher.refresh(self, iface)
1175
1176    def set_time(self):
1177        """
1178        Try to set VM time to the current value. This is typically useful when
1179        clock wasn't running on the VM for some time (e.g. during suspension or
1180        migration), especially if the time delay exceeds NTP tolerance.
1181        It is not guaranteed that the time is actually set (it depends on guest
1182        environment, especially QEMU agent presence) or that the set time is
1183        very precise (NTP in the guest should take care of it if needed).
1184
1185        Heavily based on
1186        https://github.com/openstack/nova/commit/414df1e56ea9df700756a1732125e06c5d97d792.
1187        """
1188        t = time.time()
1189        seconds = int(t)
1190        nseconds = int((t - seconds) * 10 ** 9)
1191        try:
1192            self._backend.setTime(time={"seconds": seconds,
1193                                        "nseconds": nseconds})
1194            log.debug("Successfully set guest time")
1195        except Exception as e:  # pragma: no cover
1196            log.debug("Failed to set time: %s", e)
1197
1198    def _async_set_time(self):
1199        """
1200        Asynchronously try to set guest time and maybe wait for a guest agent
1201        to come online using a separate thread.
1202        """
1203        self._set_time_thread.start()
1204
1205    def _cancel_set_time(self):
1206        """
1207        Cancel a running guest time setting operation
1208        """
1209        self._set_time_thread.stop()
1210
1211
1212    ########################
1213    # XML Parsing routines #
1214    ########################
1215
1216    def is_container(self):
1217        return self.get_xmlobj().os.is_container()
1218    def is_xenpv(self):
1219        return self.get_xmlobj().os.is_xenpv()
1220    def is_hvm(self):
1221        return self.get_xmlobj().os.is_hvm()
1222
1223    def get_uuid(self):
1224        if self._uuid is None:
1225            self._uuid = self._backend.UUIDString()
1226        return self._uuid
1227    def get_abi_type(self):
1228        return self.get_xmlobj().os.os_type
1229    def get_hv_type(self):
1230        return self.get_xmlobj().type
1231    def get_pretty_hv_type(self):
1232        return self.conn.pretty_hv(self.get_abi_type(), self.get_hv_type())
1233    def get_arch(self):
1234        return self.get_xmlobj().os.arch
1235    def get_init(self):
1236        import pipes
1237        init = self.get_xmlobj().os.init
1238        initargs = " ".join(
1239            [pipes.quote(i.val) for i in self.get_xmlobj().os.initargs])
1240        return init, initargs
1241
1242    def get_emulator(self):
1243        return self.get_xmlobj().emulator
1244    def get_machtype(self):
1245        return self.get_xmlobj().os.machine
1246
1247    def get_name_or_title(self):
1248        title = self.get_title()
1249        if title:
1250            return title
1251        return self.get_name()
1252
1253    def get_title(self):
1254        return self.get_xmlobj().title
1255    def get_description(self):
1256        return self.get_xmlobj().description
1257
1258    def get_boot_order(self):
1259        legacy = not self.can_use_device_boot_order()
1260        return self.xmlobj.get_boot_order(legacy=legacy)
1261
1262    def get_boot_menu(self):
1263        guest = self.get_xmlobj()
1264        return bool(guest.os.enable_bootmenu)
1265    def get_boot_kernel_info(self):
1266        guest = self.get_xmlobj()
1267        return (guest.os.kernel, guest.os.initrd,
1268                guest.os.dtb, guest.os.kernel_args)
1269
1270    def get_interface_devices_norefresh(self):
1271        xmlobj = self.get_xmlobj(refresh_if_nec=False)
1272        return xmlobj.devices.interface
1273    def get_disk_devices_norefresh(self):
1274        xmlobj = self.get_xmlobj(refresh_if_nec=False)
1275        return xmlobj.devices.disk
1276
1277    def serial_is_console_dup(self, serial):
1278        return DeviceConsole.get_console_duplicate(self.xmlobj, serial)
1279
1280    def can_use_device_boot_order(self):
1281        # Return 'True' if guest can use new style boot device ordering
1282        return self.conn.support.conn_device_boot_order()
1283
1284    def get_bootable_devices(self):
1285        # redirdev can also be marked bootable, but it should be rarely
1286        # used and clutters the UI
1287        return self.xmlobj.get_bootable_devices(exclude_redirdev=True)
1288
1289
1290    ############################
1291    # Domain lifecycle methods #
1292    ############################
1293
1294    # All these methods are usually run asynchronously from threads, so
1295    # let's be extra careful and have anything which might touch UI
1296    # or GObject.props invoked in an idle callback
1297
1298    @vmmLibvirtObject.lifecycle_action
1299    def shutdown(self):
1300        self._cancel_set_time()
1301        self._install_abort = True
1302        self._backend.shutdown()
1303
1304    @vmmLibvirtObject.lifecycle_action
1305    def reboot(self):
1306        self._cancel_set_time()
1307        self._install_abort = True
1308        self._backend.reboot(0)
1309
1310    @vmmLibvirtObject.lifecycle_action
1311    def destroy(self):
1312        self._cancel_set_time()
1313        self._install_abort = True
1314        self._backend.destroy()
1315
1316    @vmmLibvirtObject.lifecycle_action
1317    def reset(self):
1318        self._cancel_set_time()
1319        self._install_abort = True
1320        self._backend.reset(0)
1321
1322    @vmmLibvirtObject.lifecycle_action
1323    def startup(self):
1324        has_managed = self.has_managed_save()
1325        if (self.config.CLITestOptions.test_vm_run_fail or
1326           (has_managed and self.config.CLITestOptions.test_managed_save)):
1327            raise RuntimeError("fake error for managed save")
1328
1329        self._backend.create()
1330        if has_managed:
1331            self._async_set_time()
1332
1333    @vmmLibvirtObject.lifecycle_action
1334    def suspend(self):
1335        self._cancel_set_time()
1336        self._backend.suspend()
1337
1338    @vmmLibvirtObject.lifecycle_action
1339    def delete(self, force=True):
1340        """
1341        @force: True if we are deleting domain, False if we are renaming domain
1342
1343        If the domain is renamed we need to keep the nvram file.
1344        """
1345        flags = 0
1346        if force:
1347            flags |= getattr(libvirt,
1348                             "VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA", 0)
1349            flags |= getattr(libvirt, "VIR_DOMAIN_UNDEFINE_MANAGED_SAVE", 0)
1350            if self.has_nvram():
1351                flags |= getattr(libvirt, "VIR_DOMAIN_UNDEFINE_NVRAM", 0)
1352        else:
1353            if self.has_nvram():
1354                flags |= getattr(libvirt, "VIR_DOMAIN_UNDEFINE_KEEP_NVRAM", 0)
1355        try:
1356            self._backend.undefineFlags(flags)
1357        except libvirt.libvirtError:
1358            log.exception("libvirt undefineFlags failed, "
1359                              "falling back to old style")
1360            self._backend.undefine()
1361
1362    @vmmLibvirtObject.lifecycle_action
1363    def resume(self):
1364        self._backend.resume()
1365        self._async_set_time()
1366
1367    @vmmLibvirtObject.lifecycle_action
1368    def save(self, meter=None):
1369        self._cancel_set_time()
1370        self._install_abort = True
1371
1372        if meter:
1373            start_job_progress_thread(self, meter, _("Saving domain to disk"))
1374
1375        if self.config.CLITestOptions.test_managed_save:
1376            time.sleep(1.2)
1377        self._backend.managedSave(0)
1378
1379    def has_managed_save(self):
1380        if not self.managedsave_supported:
1381            return False  # pragma: no cover
1382
1383        if self._has_managed_save is None:
1384            try:
1385                self._has_managed_save = self._backend.hasManagedSaveImage(0)
1386            except Exception as e:    # pragma: no cover
1387                if self.conn.support.is_libvirt_error_no_domain(e):
1388                    return False
1389                raise
1390
1391        return self._has_managed_save
1392
1393    def remove_saved_image(self):
1394        if not self.has_managed_save():
1395            return   # pragma: no cover
1396        self._backend.managedSaveRemove(0)
1397        self._has_managed_save = None
1398
1399
1400    def migrate(self, destconn, dest_uri=None,
1401            tunnel=False, unsafe=False, temporary=False, xml=None, meter=None):
1402        self._cancel_set_time()
1403        self._install_abort = True
1404
1405        flags = 0
1406        flags |= libvirt.VIR_MIGRATE_LIVE
1407
1408        if not temporary:
1409            flags |= libvirt.VIR_MIGRATE_PERSIST_DEST
1410            flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE
1411
1412        if tunnel:
1413            flags |= libvirt.VIR_MIGRATE_PEER2PEER
1414            flags |= libvirt.VIR_MIGRATE_TUNNELLED
1415
1416        if unsafe:
1417            flags |= libvirt.VIR_MIGRATE_UNSAFE
1418
1419        libvirt_destconn = destconn.get_backend().get_conn_for_api_arg()
1420        log.debug("Migrating: conn=%s flags=%s uri=%s tunnel=%s "
1421            "unsafe=%s temporary=%s",
1422            destconn, flags, dest_uri, tunnel, unsafe, temporary)
1423
1424        if meter:
1425            start_job_progress_thread(self, meter, _("Migrating domain"))
1426
1427        params = {}
1428        if dest_uri and not tunnel:
1429            params[libvirt.VIR_MIGRATE_PARAM_URI] = dest_uri
1430        if xml:
1431            params[libvirt.VIR_MIGRATE_PARAM_DEST_XML] = xml
1432
1433        if self.conn.is_test() and "TESTSUITE-FAKE" in (dest_uri or ""):
1434            # If using the test driver and a special URI, fake successful
1435            # migration so we can test more of the migration wizard
1436            time.sleep(1.2)
1437            if not xml:
1438                xml = self.get_xml_to_define()
1439            destconn.define_domain(xml).create()
1440            self.delete()
1441        elif tunnel:
1442            self._backend.migrateToURI3(dest_uri, params, flags)
1443        else:
1444            self._backend.migrate3(libvirt_destconn, params, flags)
1445
1446        # Don't schedule any conn update, migrate dialog handles it for us
1447
1448
1449    ###################
1450    # Stats accessors #
1451    ###################
1452
1453    def _get_stats(self):
1454        return self.conn.statsmanager.get_vm_statslist(self)
1455    def stats_memory(self):
1456        return self._get_stats().get_record("curmem")
1457    def cpu_time(self):
1458        return self._get_stats().get_record("cpuTime")
1459    def host_cpu_time_percentage(self):
1460        return self._get_stats().get_record("cpuHostPercent")
1461    def guest_cpu_time_percentage(self):
1462        return self._get_stats().get_record("cpuGuestPercent")
1463    def network_rx_rate(self):
1464        return self._get_stats().get_record("netRxRate")
1465    def network_tx_rate(self):
1466        return self._get_stats().get_record("netTxRate")
1467    def disk_read_rate(self):
1468        return self._get_stats().get_record("diskRdRate")
1469    def disk_write_rate(self):
1470        return self._get_stats().get_record("diskWrRate")
1471
1472    def network_traffic_rate(self):
1473        return self.network_tx_rate() + self.network_rx_rate()
1474    def network_traffic_max_rate(self):
1475        stats = self._get_stats()
1476        return max(stats.netRxMaxRate, stats.netTxMaxRate, 10.0)
1477    def disk_io_rate(self):
1478        return self.disk_read_rate() + self.disk_write_rate()
1479    def disk_io_max_rate(self):
1480        stats = self._get_stats()
1481        return max(stats.diskRdMaxRate, stats.diskWrMaxRate, 10.0)
1482
1483    def host_cpu_time_vector(self, limit=None):
1484        return self._get_stats().get_vector("cpuHostPercent", limit)
1485    def guest_cpu_time_vector(self, limit=None):
1486        return self._get_stats().get_vector("cpuGuestPercent", limit)
1487    def stats_memory_vector(self, limit=None):
1488        return self._get_stats().get_vector("currMemPercent", limit)
1489    def network_traffic_vectors(self, limit=None, ceil=None):
1490        if ceil is None:
1491            ceil = self.network_traffic_max_rate()
1492        return self._get_stats().get_in_out_vector(
1493                "netRxRate", "netTxRate", limit, ceil)
1494    def disk_io_vectors(self, limit=None, ceil=None):
1495        if ceil is None:
1496            ceil = self.disk_io_max_rate()
1497        return self._get_stats().get_in_out_vector(
1498                "diskRdRate", "diskWrRate", limit, ceil)
1499
1500
1501    ###################
1502    # Status helpers ##
1503    ###################
1504
1505    def _normalize_status(self, status):
1506        if status == libvirt.VIR_DOMAIN_NOSTATE:
1507            return libvirt.VIR_DOMAIN_RUNNING  # pragma: no cover
1508        elif status == libvirt.VIR_DOMAIN_BLOCKED:
1509            return libvirt.VIR_DOMAIN_RUNNING  # pragma: no cover
1510        return status
1511
1512    def is_active(self):
1513        return not self.is_shutoff()
1514    def is_shutoff(self):
1515        return self.status() == libvirt.VIR_DOMAIN_SHUTOFF
1516    def is_crashed(self):
1517        return self.status() == libvirt.VIR_DOMAIN_CRASHED
1518    def is_stoppable(self):
1519        return self.status() in [libvirt.VIR_DOMAIN_RUNNING,
1520                                 libvirt.VIR_DOMAIN_PAUSED,
1521                                 libvirt.VIR_DOMAIN_CRASHED,
1522                                 libvirt.VIR_DOMAIN_PMSUSPENDED]
1523    def is_destroyable(self):
1524        return (self.is_stoppable() or
1525                self.status() in [libvirt.VIR_DOMAIN_CRASHED])
1526    def is_runable(self):
1527        return self.is_shutoff()
1528    def is_pauseable(self):
1529        return self.status() in [libvirt.VIR_DOMAIN_RUNNING]
1530    def is_unpauseable(self):
1531        return self.status() in [libvirt.VIR_DOMAIN_PAUSED]
1532    def is_paused(self):
1533        return self.status() in [libvirt.VIR_DOMAIN_PAUSED]
1534    def is_cloneable(self):
1535        return self.status() in [libvirt.VIR_DOMAIN_SHUTOFF]
1536
1537    def run_status(self):
1538        return LibvirtEnumMap.pretty_run_status(
1539                self.status(), self.has_managed_save())
1540
1541    def run_status_reason(self):
1542        return LibvirtEnumMap.pretty_status_reason(
1543                self.status(), self.status_reason())
1544
1545    def run_status_icon_name(self):
1546        status = self.status()
1547        if status not in LibvirtEnumMap.VM_STATUS_ICONS:  # pragma: no cover
1548            log.debug("Unknown status %s, using NOSTATE", status)
1549            status = libvirt.VIR_DOMAIN_NOSTATE
1550        return LibvirtEnumMap.VM_STATUS_ICONS[status]
1551
1552    def set_inspection_data(self, data):
1553        self.inspection = data
1554        self.idle_emit("inspection-changed")
1555
1556
1557    ##################
1558    # config helpers #
1559    ##################
1560
1561    def on_console_scaling_changed(self, *args, **kwargs):
1562        return self.config.listen_pervm(self.get_uuid(), "/scaling",
1563                                        *args, **kwargs)
1564    def set_console_scaling(self, value):
1565        self.config.set_pervm(self.get_uuid(), "/scaling", value)
1566    def get_console_scaling(self):
1567        ret = self.config.get_pervm(self.get_uuid(), "/scaling")
1568        if ret == -1:
1569            return self.config.get_console_scaling()
1570        return ret
1571
1572    def on_console_resizeguest_changed(self, *args, **kwargs):
1573        return self.config.listen_pervm(self.get_uuid(), "/resize-guest",
1574                                        *args, **kwargs)
1575    def set_console_resizeguest(self, value):
1576        self.config.set_pervm(self.get_uuid(), "/resize-guest", value)
1577    def get_console_resizeguest(self):
1578        ret = self.config.get_pervm(self.get_uuid(), "/resize-guest")
1579        if ret == -1:
1580            return self.config.get_console_resizeguest()
1581        return ret
1582
1583    def on_console_autoconnect_changed(self, *args, **kwargs):
1584        return self.config.listen_pervm(self.get_uuid(), "/resize-guest",
1585                                        *args, **kwargs)
1586    def set_console_autoconnect(self, value):
1587        self.config.set_pervm(self.get_uuid(), "/autoconnect", value)
1588    def get_console_autoconnect(self):
1589        ret = self.config.get_pervm(self.get_uuid(), "/autoconnect")
1590        if ret == -1:
1591            return self.config.get_console_autoconnect()
1592        return ret
1593
1594    def set_details_window_size(self, w, h):
1595        self.config.set_pervm(self.get_uuid(), "/vm-window-size", (w, h))
1596    def get_details_window_size(self):
1597        ret = self.config.get_pervm(self.get_uuid(), "/vm-window-size")
1598        return ret
1599
1600    def get_console_password(self):
1601        return self.config.get_pervm(self.get_uuid(), "/console-password")
1602    def set_console_password(self, username, keyid):
1603        return self.config.set_pervm(self.get_uuid(), "/console-password",
1604                                     (username, keyid))
1605    def del_console_password(self):
1606        return self.config.set_pervm(self.get_uuid(), "/console-password",
1607                                     ("", -1))
1608
1609    def get_cache_dir(self):
1610        ret = os.path.join(self.conn.get_cache_dir(), self.get_uuid())
1611        os.makedirs(ret, 0o755, exist_ok=True)
1612        return ret
1613
1614
1615    ###################
1616    # Polling helpers #
1617    ###################
1618
1619    def tick(self, stats_update=True):
1620        if (not self._using_events() and
1621            not stats_update):
1622            return
1623
1624        dosignal = False
1625        if not self._using_events():
1626            # For domains it's pretty important that we are always using
1627            # the latest XML, but other objects probably don't want to do
1628            # this since it could be a performance hit.
1629            self._invalidate_xml()
1630            info = self._backend.info()
1631            dosignal = self._refresh_status(newstatus=info[0], cansignal=False)
1632
1633        if stats_update:
1634            self.conn.statsmanager.refresh_vm_stats(self)
1635        if dosignal:
1636            self.idle_emit("state-changed")
1637        if stats_update:
1638            self.idle_emit("resources-sampled")
1639
1640
1641########################
1642# Libvirt domain class #
1643########################
1644
1645class vmmDomainVirtinst(vmmDomain):
1646    """
1647    Domain object backed by a virtinst Guest object.
1648
1649    Used for launching a details window for customizing a VM before install.
1650    """
1651    def __init__(self, conn, backend, key, installer):
1652        vmmDomain.__init__(self, conn, backend, key)
1653        self._orig_xml = None
1654        self._orig_backend = self._backend
1655        self._installer = installer
1656
1657        self._refresh_status()
1658        log.debug("%s initialized with XML=\n%s", self, self._XMLDesc(0))
1659
1660    def get_name(self):
1661        return self._backend.name
1662    def get_uuid(self):
1663        return self._backend.uuid
1664    def get_id(self):
1665        return -1  # pragma: no cover
1666    def has_managed_save(self):
1667        return False
1668
1669    def snapshots_supported(self):
1670        return False
1671
1672    def get_autostart(self):
1673        return self._installer.autostart
1674    def set_autostart(self, val):
1675        self._installer.autostart = bool(val)
1676        self.emit("state-changed")
1677
1678    def _using_events(self):
1679        return False
1680    def _get_backend_status(self):
1681        return libvirt.VIR_DOMAIN_SHUTOFF
1682
1683    def _cleanup(self):
1684        self._orig_backend = None
1685        self._installer = None
1686        super()._cleanup()
1687
1688
1689    ################
1690    # XML handling #
1691    ################
1692
1693    def _sync_disk_storage_params(self, origdisk, newdisk):
1694        """
1695        When raw disk XML is edited from the customize wizard, the
1696        original DeviceDisk is completely blown away, but that will
1697        lose the storage creation info. This syncs that info across
1698        to the new DeviceDisk
1699        """
1700        if origdisk.get_source_path() != newdisk.get_source_path():
1701            return
1702
1703        if origdisk.get_vol_object():
1704            log.debug(
1705                    "Syncing vol_object=%s from origdisk=%s to newdisk=%s",
1706                    origdisk.get_vol_object(), origdisk, newdisk)
1707            newdisk.set_vol_object(origdisk.get_vol_object(),
1708                                   origdisk.get_parent_pool())
1709        elif origdisk.get_vol_install():
1710            log.debug(
1711                    "Syncing vol_install=%s from origdisk=%s to newdisk=%s",
1712                    origdisk.get_vol_install(), origdisk, newdisk)
1713            newdisk.set_vol_install(origdisk.get_vol_install())
1714
1715    def _replace_domain_xml(self, newxml):
1716        """
1717        Blow away the Guest instance we are tracking internally with
1718        a new one from the xmleditor UI, and sync over all disk storage
1719        info afterwards
1720        """
1721        newbackend = Guest(self._backend.conn, parsexml=newxml)
1722
1723        for origdisk in self._backend.devices.disk:
1724            for newdisk in newbackend.devices.disk:
1725                if origdisk.compare_device(newdisk, newdisk.get_xml_idx()):
1726                    self._sync_disk_storage_params(origdisk, newdisk)
1727                    break
1728
1729        self._backend = newbackend
1730
1731    def replace_device_xml(self, devobj, newxml):
1732        """
1733        Overwrite vmmDomain's implementation, since we need to wire in
1734        syncing disk details.
1735        """
1736        if self._backend == self._orig_backend:
1737            # If the backend hasn't been replace yet, do it, so we don't
1738            # have a mix of is_build Guest with XML parsed objects which
1739            # might contain dragons
1740            self._replace_domain_xml(self._backend.get_xml())
1741        editdev, newdev = vmmDomain.replace_device_xml(self, devobj, newxml)
1742        if editdev.DEVICE_TYPE == "disk":
1743            self._sync_disk_storage_params(editdev, newdev)
1744
1745    def define_xml(self, xml):
1746        origxml = self._backend.get_xml()
1747        self._replace_domain_xml(xml)
1748        self._redefine_xml_internal(origxml, xml)
1749
1750    def define_name(self, newname):
1751        # We need to overwrite this, since the implementation for libvirt
1752        # needs to do some crazy stuff.
1753        xmlobj = self._make_xmlobj_to_define()
1754        xmlobj.name = str(newname)
1755        self._redefine_xmlobj(xmlobj)
1756
1757    def _XMLDesc(self, flags):
1758        ignore = flags
1759        return self._backend.get_xml()
1760
1761    def _define(self, xml):
1762        ignore = xml
1763        self.emit("state-changed")
1764
1765    def _invalidate_xml(self):
1766        vmmDomain._invalidate_xml(self)
1767        self._orig_xml = None
1768
1769    def _make_xmlobj_to_define(self):
1770        if not self._orig_xml:
1771            self._orig_xml = self._backend.get_xml()
1772        return self._backend
1773
1774    def _redefine_xmlobj(self, xmlobj):
1775        self._redefine_xml_internal(self._orig_xml or "", xmlobj.get_xml())
1776
1777    def rename_domain(self, new_name):
1778        Guest.validate_name(self._backend.conn, str(new_name))
1779        self.define_name(new_name)
1780