1 /*
2  * qemu_blockjob.c: helper functions for QEMU block jobs
3  *
4  * Copyright (C) 2006-2015 Red Hat, Inc.
5  * Copyright (C) 2006 Daniel P. Berrange
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library.  If not, see
19  * <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <config.h>
23 
24 #include "internal.h"
25 
26 #include "qemu_blockjob.h"
27 #include "qemu_block.h"
28 #include "qemu_domain.h"
29 #include "qemu_alias.h"
30 #include "qemu_backup.h"
31 
32 #include "conf/domain_conf.h"
33 #include "conf/domain_event.h"
34 
35 #include "storage_source_conf.h"
36 #include "virlog.h"
37 #include "virthread.h"
38 #include "virtime.h"
39 #include "locking/domain_lock.h"
40 #include "viralloc.h"
41 #include "virstring.h"
42 #include "qemu_security.h"
43 
44 #define VIR_FROM_THIS VIR_FROM_QEMU
45 
46 VIR_LOG_INIT("qemu.qemu_blockjob");
47 
48 /* Note that qemuBlockjobState and qemuBlockjobType values are formatted into
49  * the status XML */
50 VIR_ENUM_IMPL(qemuBlockjobState,
51               QEMU_BLOCKJOB_STATE_LAST,
52               "completed",
53               "failed",
54               "cancelled",
55               "ready",
56               "new",
57               "running",
58               "concluded",
59               "aborting",
60               "pivoting");
61 
62 VIR_ENUM_IMPL(qemuBlockjob,
63               QEMU_BLOCKJOB_TYPE_LAST,
64               "",
65               "pull",
66               "copy",
67               "commit",
68               "active-commit",
69               "backup",
70               "",
71               "create",
72               "broken");
73 
74 static virClass *qemuBlockJobDataClass;
75 
76 
77 static void
qemuBlockJobDataDisposeJobdata(qemuBlockJobData * job)78 qemuBlockJobDataDisposeJobdata(qemuBlockJobData *job)
79 {
80     if (job->type == QEMU_BLOCKJOB_TYPE_CREATE)
81         virObjectUnref(job->data.create.src);
82 
83     if (job->type == QEMU_BLOCKJOB_TYPE_BACKUP) {
84         virObjectUnref(job->data.backup.store);
85         g_free(job->data.backup.bitmap);
86     }
87 }
88 
89 
90 static void
qemuBlockJobDataDispose(void * obj)91 qemuBlockJobDataDispose(void *obj)
92 {
93     qemuBlockJobData *job = obj;
94 
95     virObjectUnref(job->chain);
96     virObjectUnref(job->mirrorChain);
97 
98     qemuBlockJobDataDisposeJobdata(job);
99 
100     g_free(job->name);
101     g_free(job->errmsg);
102 }
103 
104 
105 static int
qemuBlockJobDataOnceInit(void)106 qemuBlockJobDataOnceInit(void)
107 {
108     if (!VIR_CLASS_NEW(qemuBlockJobData, virClassForObject()))
109         return -1;
110 
111     return 0;
112 }
113 
114 
115 VIR_ONCE_GLOBAL_INIT(qemuBlockJobData);
116 
117 qemuBlockJobData *
qemuBlockJobDataNew(qemuBlockJobType type,const char * name)118 qemuBlockJobDataNew(qemuBlockJobType type,
119                     const char *name)
120 {
121     g_autoptr(qemuBlockJobData) job = NULL;
122 
123     if (qemuBlockJobDataInitialize() < 0)
124         return NULL;
125 
126     if (!(job = virObjectNew(qemuBlockJobDataClass)))
127         return NULL;
128 
129     job->name = g_strdup(name);
130 
131     job->state = QEMU_BLOCKJOB_STATE_NEW;
132     job->newstate = -1;
133     job->type = type;
134 
135     return g_steal_pointer(&job);
136 }
137 
138 
139 /**
140  * qemuBlockJobMarkBroken:
141  * @job: job to mark as broken
142  *
143  * In case when we are unable to parse the block job data from the XML
144  * successfully we'll need to mark the job as broken and then attempt to abort
145  * it. This function marks the job as broken.
146  */
147 static void
qemuBlockJobMarkBroken(qemuBlockJobData * job)148 qemuBlockJobMarkBroken(qemuBlockJobData *job)
149 {
150     qemuBlockJobDataDisposeJobdata(job);
151     job->brokentype = job->type;
152     job->type = QEMU_BLOCKJOB_TYPE_BROKEN;
153 }
154 
155 
156 /**
157  * qemuBlockJobRegister:
158  * @job: job to register
159  * @vm: domain to register @job with
160  * @disk: disk to register @job with
161  * @savestatus: save the status XML after registering
162  *
163  * This function registers @job with @disk and @vm and records it into the status
164  * xml (if @savestatus is true).
165  *
166  * Note that if @job also references a separate chain e.g. for disk mirroring,
167  * then job->mirrorchain needs to be set manually.
168  */
169 int
qemuBlockJobRegister(qemuBlockJobData * job,virDomainObj * vm,virDomainDiskDef * disk,bool savestatus)170 qemuBlockJobRegister(qemuBlockJobData *job,
171                      virDomainObj *vm,
172                      virDomainDiskDef *disk,
173                      bool savestatus)
174 {
175     qemuDomainObjPrivate *priv = vm->privateData;
176 
177     if (disk && QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob) {
178         virReportError(VIR_ERR_INTERNAL_ERROR,
179                        _("disk '%s' has a blockjob assigned"), disk->dst);
180         return -1;
181     }
182 
183     if (virHashAddEntry(priv->blockjobs, job->name, virObjectRef(job)) < 0) {
184         virObjectUnref(job);
185         return -1;
186     }
187 
188     if (disk) {
189         job->disk = disk;
190         job->chain = virObjectRef(disk->src);
191         QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob = virObjectRef(job);
192     }
193 
194     if (savestatus)
195         qemuDomainSaveStatus(vm);
196 
197     return 0;
198 }
199 
200 
201 static void
qemuBlockJobUnregister(qemuBlockJobData * job,virDomainObj * vm)202 qemuBlockJobUnregister(qemuBlockJobData *job,
203                        virDomainObj *vm)
204 {
205     qemuDomainObjPrivate *priv = vm->privateData;
206     qemuDomainDiskPrivate *diskPriv;
207 
208     if (job->disk) {
209         diskPriv = QEMU_DOMAIN_DISK_PRIVATE(job->disk);
210 
211         if (job == diskPriv->blockjob) {
212             virObjectUnref(diskPriv->blockjob);
213             diskPriv->blockjob = NULL;
214         }
215 
216         job->disk = NULL;
217     }
218 
219     /* this may remove the last reference of 'job' */
220     virHashRemoveEntry(priv->blockjobs, job->name);
221 
222     qemuDomainSaveStatus(vm);
223 }
224 
225 
226 /**
227  * qemuBlockJobDiskNew:
228  * @disk: disk definition
229  *
230  * Start/associate a new blockjob with @disk.
231  *
232  * Returns 0 on success and -1 on failure.
233  */
234 qemuBlockJobData *
qemuBlockJobDiskNew(virDomainObj * vm,virDomainDiskDef * disk,qemuBlockJobType type,const char * jobname)235 qemuBlockJobDiskNew(virDomainObj *vm,
236                     virDomainDiskDef *disk,
237                     qemuBlockJobType type,
238                     const char *jobname)
239 {
240     g_autoptr(qemuBlockJobData) job = NULL;
241 
242     if (!(job = qemuBlockJobDataNew(type, jobname)))
243         return NULL;
244 
245     if (qemuBlockJobRegister(job, vm, disk, true) < 0)
246         return NULL;
247 
248     return g_steal_pointer(&job);
249 }
250 
251 
252 qemuBlockJobData *
qemuBlockJobDiskNewPull(virDomainObj * vm,virDomainDiskDef * disk,virStorageSource * base,unsigned int jobflags)253 qemuBlockJobDiskNewPull(virDomainObj *vm,
254                         virDomainDiskDef *disk,
255                         virStorageSource *base,
256                         unsigned int jobflags)
257 {
258     qemuDomainObjPrivate *priv = vm->privateData;
259     g_autoptr(qemuBlockJobData) job = NULL;
260     g_autofree char *jobname = NULL;
261 
262     if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
263         jobname = g_strdup_printf("pull-%s-%s", disk->dst, disk->src->nodeformat);
264     } else {
265         if (!(jobname = qemuAliasDiskDriveFromDisk(disk)))
266             return NULL;
267     }
268 
269     if (!(job = qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_PULL, jobname)))
270         return NULL;
271 
272     job->data.pull.base = base;
273     job->jobflags = jobflags;
274 
275     if (qemuBlockJobRegister(job, vm, disk, true) < 0)
276         return NULL;
277 
278     return g_steal_pointer(&job);
279 }
280 
281 
282 qemuBlockJobData *
qemuBlockJobDiskNewCommit(virDomainObj * vm,virDomainDiskDef * disk,virStorageSource * topparent,virStorageSource * top,virStorageSource * base,bool delete_imgs,unsigned int jobflags)283 qemuBlockJobDiskNewCommit(virDomainObj *vm,
284                           virDomainDiskDef *disk,
285                           virStorageSource *topparent,
286                           virStorageSource *top,
287                           virStorageSource *base,
288                           bool delete_imgs,
289                           unsigned int jobflags)
290 {
291     qemuDomainObjPrivate *priv = vm->privateData;
292     g_autoptr(qemuBlockJobData) job = NULL;
293     g_autofree char *jobname = NULL;
294     qemuBlockJobType jobtype = QEMU_BLOCKJOB_TYPE_COMMIT;
295 
296     if (topparent == NULL)
297         jobtype = QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT;
298 
299     if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
300         jobname = g_strdup_printf("commit-%s-%s", disk->dst, top->nodeformat);
301     } else {
302         if (!(jobname = qemuAliasDiskDriveFromDisk(disk)))
303             return NULL;
304     }
305 
306     if (!(job = qemuBlockJobDataNew(jobtype, jobname)))
307         return NULL;
308 
309     job->data.commit.topparent = topparent;
310     job->data.commit.top = top;
311     job->data.commit.base = base;
312     job->data.commit.deleteCommittedImages = delete_imgs;
313     job->jobflags = jobflags;
314 
315     if (qemuBlockJobRegister(job, vm, disk, true) < 0)
316         return NULL;
317 
318     return g_steal_pointer(&job);
319 }
320 
321 
322 qemuBlockJobData *
qemuBlockJobNewCreate(virDomainObj * vm,virStorageSource * src,virStorageSource * chain,bool storage)323 qemuBlockJobNewCreate(virDomainObj *vm,
324                       virStorageSource *src,
325                       virStorageSource *chain,
326                       bool storage)
327 {
328     g_autoptr(qemuBlockJobData) job = NULL;
329     g_autofree char *jobname = NULL;
330     const char *nodename = src->nodeformat;
331 
332     if (storage)
333         nodename = src->nodestorage;
334 
335     jobname = g_strdup_printf("create-%s", nodename);
336 
337     if (!(job = qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_CREATE, jobname)))
338         return NULL;
339 
340     if (virStorageSourceIsBacking(chain))
341         job->chain = virObjectRef(chain);
342 
343      job->data.create.src = virObjectRef(src);
344 
345     if (qemuBlockJobRegister(job, vm, NULL, true) < 0)
346         return NULL;
347 
348     return g_steal_pointer(&job);
349 }
350 
351 
352 qemuBlockJobData *
qemuBlockJobDiskNewCopy(virDomainObj * vm,virDomainDiskDef * disk,virStorageSource * mirror,bool shallow,bool reuse,unsigned int jobflags)353 qemuBlockJobDiskNewCopy(virDomainObj *vm,
354                         virDomainDiskDef *disk,
355                         virStorageSource *mirror,
356                         bool shallow,
357                         bool reuse,
358                         unsigned int jobflags)
359 {
360     qemuDomainObjPrivate *priv = vm->privateData;
361     g_autoptr(qemuBlockJobData) job = NULL;
362     g_autofree char *jobname = NULL;
363 
364     if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
365         jobname = g_strdup_printf("copy-%s-%s", disk->dst, disk->src->nodeformat);
366     } else {
367         if (!(jobname = qemuAliasDiskDriveFromDisk(disk)))
368             return NULL;
369     }
370 
371     if (!(job = qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_COPY, jobname)))
372         return NULL;
373 
374     job->mirrorChain = virObjectRef(mirror);
375 
376     if (shallow && !reuse)
377         job->data.copy.shallownew = true;
378 
379     job->jobflags = jobflags;
380 
381     if (qemuBlockJobRegister(job, vm, disk, true) < 0)
382         return NULL;
383 
384     return g_steal_pointer(&job);
385 }
386 
387 
388 qemuBlockJobData *
qemuBlockJobDiskNewBackup(virDomainObj * vm,virDomainDiskDef * disk,virStorageSource * store,const char * bitmap)389 qemuBlockJobDiskNewBackup(virDomainObj *vm,
390                           virDomainDiskDef *disk,
391                           virStorageSource *store,
392                           const char *bitmap)
393 {
394     g_autoptr(qemuBlockJobData) job = NULL;
395     g_autofree char *jobname = NULL;
396 
397     jobname = g_strdup_printf("backup-%s-%s", disk->dst, disk->src->nodeformat);
398 
399     if (!(job = qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_BACKUP, jobname)))
400         return NULL;
401 
402     job->data.backup.bitmap = g_strdup(bitmap);
403     job->data.backup.store = virObjectRef(store);
404 
405     /* backup jobs are usually started in bulk by transaction so the caller
406      * shall save the status XML */
407     if (qemuBlockJobRegister(job, vm, disk, false) < 0)
408         return NULL;
409 
410     return g_steal_pointer(&job);
411 }
412 
413 
414 /**
415  * qemuBlockJobDiskGetJob:
416  * @disk: disk definition
417  *
418  * Get a reference to the block job data object associated with @disk.
419  */
420 qemuBlockJobData *
qemuBlockJobDiskGetJob(virDomainDiskDef * disk)421 qemuBlockJobDiskGetJob(virDomainDiskDef *disk)
422 {
423     qemuBlockJobData *job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;
424 
425     if (!job)
426         return NULL;
427 
428     return virObjectRef(job);
429 }
430 
431 
432 /**
433  * qemuBlockJobStarted:
434  * @job: job data
435  *
436  * Mark @job as started in qemu.
437  */
438 void
qemuBlockJobStarted(qemuBlockJobData * job,virDomainObj * vm)439 qemuBlockJobStarted(qemuBlockJobData *job,
440                     virDomainObj *vm)
441 {
442     if (job->state == QEMU_BLOCKJOB_STATE_NEW)
443         job->state = QEMU_BLOCKJOB_STATE_RUNNING;
444 
445     qemuDomainSaveStatus(vm);
446 }
447 
448 
449 /**
450  * qemuBlockJobStartupFinalize:
451  * @job: job being started
452  *
453  * Cancels and clears the job private data if the job was not started with
454  * qemu (see qemuBlockJobStarted) or just clears up the local reference
455  * to @job if it was started.
456  */
457 void
qemuBlockJobStartupFinalize(virDomainObj * vm,qemuBlockJobData * job)458 qemuBlockJobStartupFinalize(virDomainObj *vm,
459                             qemuBlockJobData *job)
460 {
461     if (!job)
462         return;
463 
464     if (job->state == QEMU_BLOCKJOB_STATE_NEW)
465         qemuBlockJobUnregister(job, vm);
466 
467     virObjectUnref(job);
468 }
469 
470 
471 bool
qemuBlockJobIsRunning(qemuBlockJobData * job)472 qemuBlockJobIsRunning(qemuBlockJobData *job)
473 {
474     return job->state == QEMU_BLOCKJOB_STATE_RUNNING ||
475            job->state == QEMU_BLOCKJOB_STATE_READY ||
476            job->state == QEMU_BLOCKJOB_STATE_ABORTING ||
477            job->state == QEMU_BLOCKJOB_STATE_PIVOTING;
478 }
479 
480 
481 /* returns 1 for a job we didn't reconnect to */
482 static int
qemuBlockJobRefreshJobsFindInactive(const void * payload,const char * name G_GNUC_UNUSED,const void * data G_GNUC_UNUSED)483 qemuBlockJobRefreshJobsFindInactive(const void *payload,
484                                     const char *name G_GNUC_UNUSED,
485                                     const void *data G_GNUC_UNUSED)
486 {
487     const qemuBlockJobData *job = payload;
488 
489     return !job->reconnected;
490 }
491 
492 
493 int
qemuBlockJobRefreshJobs(virQEMUDriver * driver,virDomainObj * vm)494 qemuBlockJobRefreshJobs(virQEMUDriver *driver,
495                         virDomainObj *vm)
496 {
497     qemuDomainObjPrivate *priv = vm->privateData;
498     qemuMonitorJobInfo **jobinfo = NULL;
499     size_t njobinfo = 0;
500     qemuBlockJobData *job = NULL;
501     int newstate;
502     size_t i;
503     int ret = -1;
504     int rc;
505 
506     qemuDomainObjEnterMonitor(driver, vm);
507 
508     rc = qemuMonitorGetJobInfo(priv->mon, &jobinfo, &njobinfo);
509 
510     if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
511         goto cleanup;
512 
513     for (i = 0; i < njobinfo; i++) {
514         if (!(job = virHashLookup(priv->blockjobs, jobinfo[i]->id))) {
515             VIR_DEBUG("ignoring untracked job '%s'", jobinfo[i]->id);
516             continue;
517         }
518 
519         /* try cancelling invalid jobs - this works only if the job is not
520          * concluded. In such case it will fail. We'll leave such job linger
521          * in qemu and just forget about it in libvirt because there's not much
522          * we could do besides killing the VM */
523         if (job->invalidData) {
524 
525             qemuBlockJobMarkBroken(job);
526 
527             qemuDomainObjEnterMonitor(driver, vm);
528 
529             rc = qemuMonitorBlockJobCancel(priv->mon, job->name, true);
530             if (rc == -1 && jobinfo[i]->status == QEMU_MONITOR_JOB_STATUS_CONCLUDED)
531                 VIR_WARN("can't cancel job '%s' with invalid data", job->name);
532 
533             if (qemuDomainObjExitMonitor(driver, vm) < 0)
534                 goto cleanup;
535 
536             if (rc < 0)
537                 qemuBlockJobUnregister(job, vm);
538             else
539                 job->reconnected = true;
540             continue;
541         }
542 
543         if ((newstate = qemuBlockjobConvertMonitorStatus(jobinfo[i]->status)) < 0)
544             continue;
545 
546         if (newstate != job->state) {
547             if ((job->state == QEMU_BLOCKJOB_STATE_FAILED ||
548                  job->state == QEMU_BLOCKJOB_STATE_COMPLETED)) {
549                 /* preserve the old state but allow the job to be bumped to
550                  * execute the finishing steps */
551                 job->newstate = job->state;
552             } else if (newstate == QEMU_BLOCKJOB_STATE_CONCLUDED) {
553                 job->errmsg = g_strdup(jobinfo[i]->error);
554 
555                 if (job->errmsg)
556                     job->newstate = QEMU_BLOCKJOB_STATE_FAILED;
557                 else
558                     job->newstate = QEMU_BLOCKJOB_STATE_COMPLETED;
559             } else if (newstate == QEMU_BLOCKJOB_STATE_READY) {
560                 /* Apply _READY state only if it was not applied before */
561                 if (job->state == QEMU_BLOCKJOB_STATE_NEW ||
562                     job->state == QEMU_BLOCKJOB_STATE_RUNNING)
563                     job->newstate = newstate;
564             }
565             /* don't update the job otherwise */
566         }
567 
568         job->reconnected = true;
569 
570         if (job->newstate != -1)
571             qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
572         /* 'job' may be invalid after this update */
573     }
574 
575     /* remove data for job which qemu didn't report (the algorithm is
576      * inefficient, but the possibility of such jobs is very low */
577     while ((job = virHashSearch(priv->blockjobs, qemuBlockJobRefreshJobsFindInactive, NULL, NULL))) {
578         VIR_WARN("dropping blockjob '%s' untracked by qemu", job->name);
579         qemuBlockJobUnregister(job, vm);
580     }
581 
582     ret = 0;
583 
584  cleanup:
585     for (i = 0; i < njobinfo; i++)
586         qemuMonitorJobInfoFree(jobinfo[i]);
587     VIR_FREE(jobinfo);
588 
589     return ret;
590 }
591 
592 
593 /**
594  * qemuBlockJobEmitEvents:
595  *
596  * Emits the VIR_DOMAIN_EVENT_ID_BLOCK_JOB and VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2
597  * for a block job. The former event is emitted only for local disks.
598  */
599 static void
qemuBlockJobEmitEvents(virQEMUDriver * driver,virDomainObj * vm,virDomainDiskDef * disk,virDomainBlockJobType type,virConnectDomainEventBlockJobStatus status)600 qemuBlockJobEmitEvents(virQEMUDriver *driver,
601                        virDomainObj *vm,
602                        virDomainDiskDef *disk,
603                        virDomainBlockJobType type,
604                        virConnectDomainEventBlockJobStatus status)
605 {
606     virObjectEvent *event = NULL;
607     virObjectEvent *event2 = NULL;
608 
609     /* don't emit events for jobs without disk */
610     if (!disk)
611         return;
612 
613     /* don't emit events for internal jobs and states */
614     if (type >= VIR_DOMAIN_BLOCK_JOB_TYPE_LAST ||
615         status >= VIR_DOMAIN_BLOCK_JOB_LAST)
616         return;
617 
618     if (virStorageSourceIsLocalStorage(disk->src) &&
619         !virStorageSourceIsEmpty(disk->src)) {
620         event = virDomainEventBlockJobNewFromObj(vm, virDomainDiskGetSource(disk),
621                                                  type, status);
622         virObjectEventStateQueue(driver->domainEventState, event);
623     }
624 
625     event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type, status);
626     virObjectEventStateQueue(driver->domainEventState, event2);
627 }
628 
629 /**
630  * qemuBlockJobCleanStorageSourceRuntime:
631  * @src: storage source to clean from runtime data
632  *
633  * Remove all runtime related data from the storage source.
634  */
635 static void
qemuBlockJobCleanStorageSourceRuntime(virStorageSource * src)636 qemuBlockJobCleanStorageSourceRuntime(virStorageSource *src)
637 {
638     src->id = 0;
639     src->detected = false;
640     VIR_FREE(src->relPath);
641     VIR_FREE(src->backingStoreRaw);
642     VIR_FREE(src->nodestorage);
643     VIR_FREE(src->nodeformat);
644     VIR_FREE(src->tlsAlias);
645     VIR_FREE(src->tlsCertdir);
646 }
647 
648 
649 /**
650  * qemuBlockJobRewriteConfigDiskSource:
651  * @vm: domain object
652  * @disk: live definition disk
653  * @newsrc: new source which should be also considered for the new disk
654  *
655  * For block jobs which modify the running disk source it is required that we
656  * try our best to update the config XML's disk source as well in most cases.
657  *
658  * This helper finds the disk from the persistent definition corresponding to
659  * @disk and updates its source to @newsrc.
660  */
661 static void
qemuBlockJobRewriteConfigDiskSource(virDomainObj * vm,virDomainDiskDef * disk,virStorageSource * newsrc)662 qemuBlockJobRewriteConfigDiskSource(virDomainObj *vm,
663                                     virDomainDiskDef *disk,
664                                     virStorageSource *newsrc)
665 {
666     virDomainDiskDef *persistDisk = NULL;
667     g_autoptr(virStorageSource) copy = NULL;
668     virStorageSource *n;
669 
670     if (!vm->newDef)
671         return;
672 
673     if (!(persistDisk = virDomainDiskByTarget(vm->newDef, disk->dst)))
674         return;
675 
676     if (!virStorageSourceIsSameLocation(disk->src, persistDisk->src))
677         return;
678 
679     if (!(copy = virStorageSourceCopy(newsrc, true)) ||
680         virStorageSourceInitChainElement(copy, persistDisk->src, true) < 0) {
681         VIR_WARN("Unable to update persistent definition on vm %s after block job",
682                  vm->def->name);
683         return;
684     }
685 
686     for (n = copy; virStorageSourceIsBacking(n); n = n->backingStore) {
687         qemuBlockJobCleanStorageSourceRuntime(n);
688 
689         /* discard any detected backing store */
690         if (virStorageSourceIsBacking(n->backingStore) &&
691             n->backingStore->detected) {
692             virObjectUnref(n->backingStore);
693             n->backingStore = NULL;
694             break;
695         }
696     }
697 
698     virObjectUnref(persistDisk->src);
699     persistDisk->src = g_steal_pointer(&copy);
700 }
701 
702 
703 static void
qemuBlockJobEventProcessLegacyCompleted(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,int asyncJob)704 qemuBlockJobEventProcessLegacyCompleted(virQEMUDriver *driver,
705                                         virDomainObj *vm,
706                                         qemuBlockJobData *job,
707                                         int asyncJob)
708 {
709     virDomainDiskDef *disk = job->disk;
710 
711     if (!disk)
712         return;
713 
714     if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_PIVOT) {
715         qemuBlockJobRewriteConfigDiskSource(vm, disk, disk->mirror);
716         /* XXX We want to revoke security labels as well as audit that
717          * revocation, before dropping the original source.  But it gets
718          * tricky if both source and mirror share common backing files (we
719          * want to only revoke the non-shared portion of the chain); so for
720          * now, we leak the access to the original.  */
721         virDomainLockImageDetach(driver->lockManager, vm, disk->src);
722 
723         /* Move secret driver metadata */
724         if (qemuSecurityMoveImageMetadata(driver, vm, disk->src, disk->mirror) < 0) {
725             VIR_WARN("Unable to move disk metadata on "
726                      "vm %s from %s to %s (disk target %s)",
727                      vm->def->name,
728                      NULLSTR(disk->src->path),
729                      NULLSTR(disk->mirror->path),
730                      disk->dst);
731         }
732 
733         virObjectUnref(disk->src);
734         disk->src = disk->mirror;
735     } else {
736         if (disk->mirror) {
737             virDomainLockImageDetach(driver->lockManager, vm, disk->mirror);
738 
739             /* Ideally, we would restore seclabels on the backing chain here
740              * but we don't know if somebody else is not using parts of it.
741              * Remove security driver metadata so that they are not leaked. */
742             qemuBlockRemoveImageMetadata(driver, vm, disk->dst, disk->mirror);
743 
744             virObjectUnref(disk->mirror);
745         }
746 
747         qemuBlockRemoveImageMetadata(driver, vm, disk->dst, disk->src);
748     }
749 
750     /* Recompute the cached backing chain to match our
751      * updates.  Better would be storing the chain ourselves
752      * rather than reprobing, but we haven't quite completed
753      * that conversion to use our XML tracking. */
754     disk->mirror = NULL;
755     disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
756     disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
757     disk->src->id = 0;
758     virStorageSourceBackingStoreClear(disk->src);
759     ignore_value(qemuDomainDetermineDiskChain(driver, vm, disk, NULL, true));
760     ignore_value(qemuBlockNodeNamesDetect(driver, vm, asyncJob));
761     qemuBlockJobUnregister(job, vm);
762     qemuDomainSaveConfig(vm);
763 }
764 
765 
766 /**
767  * qemuBlockJobEventProcessLegacy:
768  * @driver: qemu driver
769  * @vm: domain
770  * @job: job to process events for
771  *
772  * Update disk's mirror state in response to a block job event
773  * from QEMU. For mirror state's that must survive libvirt
774  * restart, also update the domain's status XML.
775  */
776 static void
qemuBlockJobEventProcessLegacy(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,int asyncJob)777 qemuBlockJobEventProcessLegacy(virQEMUDriver *driver,
778                                virDomainObj *vm,
779                                qemuBlockJobData *job,
780                                int asyncJob)
781 {
782     virDomainDiskDef *disk = job->disk;
783 
784     VIR_DEBUG("disk=%s, mirrorState=%s, type=%d, state=%d, newstate=%d",
785               disk->dst,
786               NULLSTR(virDomainDiskMirrorStateTypeToString(disk->mirrorState)),
787               job->type,
788               job->state,
789               job->newstate);
790 
791     if (job->newstate == -1)
792         return;
793 
794     qemuBlockJobEmitEvents(driver, vm, disk, job->type, job->newstate);
795 
796     job->state = job->newstate;
797     job->newstate = -1;
798 
799     /* If we completed a block pull or commit, then update the XML
800      * to match.  */
801     switch ((virConnectDomainEventBlockJobStatus) job->state) {
802     case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
803         qemuBlockJobEventProcessLegacyCompleted(driver, vm, job, asyncJob);
804         break;
805 
806     case VIR_DOMAIN_BLOCK_JOB_READY:
807         disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_READY;
808         qemuDomainSaveStatus(vm);
809         break;
810 
811     case VIR_DOMAIN_BLOCK_JOB_FAILED:
812     case VIR_DOMAIN_BLOCK_JOB_CANCELED:
813         if (disk->mirror) {
814             virDomainLockImageDetach(driver->lockManager, vm, disk->mirror);
815 
816             /* Ideally, we would restore seclabels on the backing chain here
817              * but we don't know if somebody else is not using parts of it.
818              * Remove security driver metadata so that they are not leaked. */
819             qemuBlockRemoveImageMetadata(driver, vm, disk->dst, disk->mirror);
820 
821             virObjectUnref(disk->mirror);
822             disk->mirror = NULL;
823         }
824         disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
825         disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
826         qemuBlockJobUnregister(job, vm);
827         break;
828 
829     case VIR_DOMAIN_BLOCK_JOB_LAST:
830         break;
831     }
832 }
833 
834 
835 static void
qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriver * driver,virDomainObj * vm,qemuDomainAsyncJob asyncJob,virStorageSource * chain)836 qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriver *driver,
837                                              virDomainObj *vm,
838                                              qemuDomainAsyncJob asyncJob,
839                                              virStorageSource *chain)
840 {
841     g_autoptr(qemuBlockStorageSourceChainData) data = NULL;
842 
843     if (!(data = qemuBlockStorageSourceChainDetachPrepareBlockdev(chain)))
844         return;
845 
846     if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
847         return;
848 
849     qemuBlockStorageSourceChainDetach(qemuDomainGetMonitor(vm), data);
850     if (qemuDomainObjExitMonitor(driver, vm) < 0)
851         return;
852 
853     qemuDomainStorageSourceChainAccessRevoke(driver, vm, chain);
854 }
855 
856 
857 /**
858  * qemuBlockJobGetConfigDisk:
859  * @vm: domain object
860  * @disk: disk from the running definition
861  * @diskChainBottom: the last element of backing chain of @disk which is relevant
862  *
863  * Finds and returns the disk corresponding to @disk in the inactive definition.
864  * The inactive disk must have the backing chain starting from the source until
865  * @@diskChainBottom identical. If @diskChainBottom is NULL the whole backing
866  * chains of both @disk and the persistent config definition equivalent must
867  * be identical.
868  */
869 static virDomainDiskDef *
qemuBlockJobGetConfigDisk(virDomainObj * vm,virDomainDiskDef * disk,virStorageSource * diskChainBottom)870 qemuBlockJobGetConfigDisk(virDomainObj *vm,
871                           virDomainDiskDef *disk,
872                           virStorageSource *diskChainBottom)
873 {
874     virStorageSource *disksrc = NULL;
875     virStorageSource *cfgsrc = NULL;
876     virDomainDiskDef *ret = NULL;
877 
878     if (!vm->newDef || !disk)
879         return NULL;
880 
881     disksrc = disk->src;
882 
883     if (!(ret = virDomainDiskByTarget(vm->newDef, disk->dst)))
884         return NULL;
885 
886     cfgsrc = ret->src;
887 
888     while (disksrc && cfgsrc) {
889         if (!virStorageSourceIsSameLocation(disksrc, cfgsrc))
890             return NULL;
891 
892         if (diskChainBottom && diskChainBottom == disksrc)
893             return ret;
894 
895         disksrc = disksrc->backingStore;
896         cfgsrc = cfgsrc->backingStore;
897     }
898 
899     if (disksrc || cfgsrc)
900         return NULL;
901 
902     return ret;
903 }
904 
905 
906 /**
907  * qemuBlockJobClearConfigChain:
908  * @vm: domain object
909  * @disk: disk object from running definition of @vm
910  *
911  * In cases when the backing chain definitions of the live disk differ from
912  * the definition for the next start config and the backing chain would touch
913  * it we'd not be able to restore the chain in the next start config properly.
914  *
915  * This function checks that the source of the running disk definition and the
916  * config disk definition are the same and if such it clears the backing chain
917  * data.
918  */
919 static void
qemuBlockJobClearConfigChain(virDomainObj * vm,virDomainDiskDef * disk)920 qemuBlockJobClearConfigChain(virDomainObj *vm,
921                              virDomainDiskDef *disk)
922 {
923     virDomainDiskDef *cfgdisk = NULL;
924 
925     if (!vm->newDef || !disk)
926         return;
927 
928     if (!(cfgdisk = virDomainDiskByTarget(vm->newDef, disk->dst)))
929         return;
930 
931     if (!virStorageSourceIsSameLocation(disk->src, cfgdisk->src))
932         return;
933 
934     virObjectUnref(cfgdisk->src->backingStore);
935     cfgdisk->src->backingStore = NULL;
936 }
937 
938 
939 static int
qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)940 qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObj *vm,
941                                              qemuBlockJobData *job,
942                                              qemuDomainAsyncJob asyncJob)
943 {
944     qemuDomainObjPrivate *priv = vm->privateData;
945     g_autoptr(GHashTable) blockNamedNodeData = NULL;
946     g_autoptr(virJSONValue) actions = NULL;
947 
948     if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, asyncJob)))
949         return -1;
950 
951     if (qemuBlockGetBitmapMergeActions(job->disk->src,
952                                        job->data.pull.base,
953                                        job->disk->src,
954                                        NULL, NULL, NULL,
955                                        &actions,
956                                        blockNamedNodeData) < 0)
957         return -1;
958 
959     if (!actions)
960         return 0;
961 
962     if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
963         return -1;
964 
965     qemuMonitorTransaction(priv->mon, &actions);
966 
967     if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
968         return -1;
969 
970     return 0;
971 }
972 
973 
974 /**
975  * qemuBlockJobProcessEventCompletedPull:
976  * @driver: qemu driver object
977  * @vm: domain object
978  * @job: job data
979  * @asyncJob: qemu asynchronous job type (for monitor interaction)
980  *
981  * This function executes the finalizing steps after a successful block pull job
982  * (block-stream in qemu terminology. The pull job copies all the data from the
983  * images in the backing chain up to the 'base' image. The 'base' image becomes
984  * the backing store of the active top level image. If 'base' was not used
985  * everything is pulled into the top level image and the top level image will
986  * cease to have backing store. All intermediate images between the active image
987  * and base image are no longer required and can be unplugged.
988  */
989 static void
qemuBlockJobProcessEventCompletedPull(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)990 qemuBlockJobProcessEventCompletedPull(virQEMUDriver *driver,
991                                       virDomainObj *vm,
992                                       qemuBlockJobData *job,
993                                       qemuDomainAsyncJob asyncJob)
994 {
995     virStorageSource *base = NULL;
996     virStorageSource *baseparent = NULL;
997     virDomainDiskDef *cfgdisk = NULL;
998     virStorageSource *cfgbase = NULL;
999     virStorageSource *cfgbaseparent = NULL;
1000     virStorageSource *n;
1001     virStorageSource *tmp;
1002 
1003     VIR_DEBUG("pull job '%s' on VM '%s' completed", job->name, vm->def->name);
1004 
1005     /* if the job isn't associated with a disk there's nothing to do */
1006     if (!job->disk)
1007         return;
1008 
1009     if (!(cfgdisk = qemuBlockJobGetConfigDisk(vm, job->disk, job->data.pull.base)))
1010         qemuBlockJobClearConfigChain(vm, job->disk);
1011 
1012     qemuBlockJobProcessEventCompletedPullBitmaps(vm, job, asyncJob);
1013 
1014     /* when pulling if 'base' is right below the top image we don't have to modify it */
1015     if (job->disk->src->backingStore == job->data.pull.base)
1016         return;
1017 
1018     if (job->data.pull.base) {
1019         base = job->data.pull.base;
1020 
1021         if (cfgdisk)
1022             cfgbase = cfgdisk->src->backingStore;
1023 
1024         for (n = job->disk->src->backingStore; n && n != job->data.pull.base; n = n->backingStore) {
1025             /* find the image on top of 'base' */
1026 
1027             if (cfgbase) {
1028                 cfgbaseparent = cfgbase;
1029                 cfgbase = cfgbase->backingStore;
1030             }
1031 
1032             baseparent = n;
1033         }
1034     } else {
1035         /* create terminators for the chain; since we are pulling everything
1036          * into the top image the chain is automatically considered terminated */
1037         base = virStorageSourceNew();
1038 
1039         if (cfgdisk)
1040             cfgbase = virStorageSourceNew();
1041     }
1042 
1043     tmp = job->disk->src->backingStore;
1044     job->disk->src->backingStore = base;
1045     if (baseparent)
1046         baseparent->backingStore = NULL;
1047     qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, tmp);
1048     virObjectUnref(tmp);
1049 
1050     if (cfgdisk) {
1051         tmp = cfgdisk->src->backingStore;
1052         cfgdisk->src->backingStore = cfgbase;
1053         if (cfgbaseparent)
1054             cfgbaseparent->backingStore = NULL;
1055         virObjectUnref(tmp);
1056     }
1057 }
1058 
1059 
1060 /**
1061  * qemuBlockJobDeleteImages:
1062  * @driver: qemu driver object
1063  * @vm: domain object
1064  * @disk: disk object that the chain to be deleted is associated with
1065  * @top: top snapshot of the chain to be deleted
1066  *
1067  * Helper for removing snapshot images.  Intended for callers like
1068  * qemuBlockJobProcessEventCompletedCommit() and
1069  * qemuBlockJobProcessEventCompletedActiveCommit() as it relies on adjustments
1070  * these functions perform on the 'backingStore' chain to function correctly.
1071  *
1072  * TODO look into removing backing store for non-local snapshots too
1073  */
1074 static void
qemuBlockJobDeleteImages(virQEMUDriver * driver,virDomainObj * vm,virDomainDiskDef * disk,virStorageSource * top)1075 qemuBlockJobDeleteImages(virQEMUDriver *driver,
1076                          virDomainObj *vm,
1077                          virDomainDiskDef *disk,
1078                          virStorageSource *top)
1079 {
1080     virStorageSource *p = top;
1081     g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
1082     uid_t uid;
1083     gid_t gid;
1084 
1085     for (; p != NULL; p = p->backingStore) {
1086         if (virStorageSourceGetActualType(p) == VIR_STORAGE_TYPE_FILE) {
1087 
1088             qemuDomainGetImageIds(cfg, vm, p, disk->src, &uid, &gid);
1089 
1090             if (virFileRemove(p->path, uid, gid) < 0) {
1091                 VIR_WARN("Unable to remove snapshot image file '%s' (%s)",
1092                          p->path, g_strerror(errno));
1093             }
1094         }
1095     }
1096 }
1097 
1098 
1099 /**
1100  * qemuBlockJobProcessEventCompletedCommitBitmaps:
1101  *
1102  * Handles the bitmap changes after commit. This returns -1 on monitor failures.
1103  */
1104 static int
qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)1105 qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObj *vm,
1106                                                qemuBlockJobData *job,
1107                                                qemuDomainAsyncJob asyncJob)
1108 {
1109     qemuDomainObjPrivate *priv = vm->privateData;
1110     g_autoptr(GHashTable) blockNamedNodeData = NULL;
1111     g_autoptr(virJSONValue) actions = NULL;
1112     bool active = job->type == QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT;
1113 
1114     if (!active &&
1115         !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
1116         return 0;
1117 
1118     if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, asyncJob)))
1119         return -1;
1120 
1121     if (qemuBlockBitmapsHandleCommitFinish(job->data.commit.top,
1122                                            job->data.commit.base,
1123                                            active,
1124                                            blockNamedNodeData,
1125                                            &actions) < 0)
1126         return 0;
1127 
1128     if (!actions)
1129         return 0;
1130 
1131     if (!active) {
1132         if (qemuBlockReopenReadWrite(vm, job->data.commit.base, asyncJob) < 0)
1133             return -1;
1134     }
1135 
1136     if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
1137         return -1;
1138 
1139     qemuMonitorTransaction(priv->mon, &actions);
1140 
1141     if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
1142         return -1;
1143 
1144     if (!active) {
1145         if (qemuBlockReopenReadOnly(vm, job->data.commit.base, asyncJob) < 0)
1146             return -1;
1147     }
1148 
1149     return 0;
1150 }
1151 
1152 
1153 /**
1154  * qemuBlockJobProcessEventCompletedCommit:
1155  * @driver: qemu driver object
1156  * @vm: domain object
1157  * @job: job data
1158  * @asyncJob: qemu asynchronous job type (for monitor interaction)
1159  *
1160  * This function executes the finalizing steps after a successful block commit
1161  * job. The commit job moves the blocks from backing chain images starting from
1162  * 'top' into the 'base' image. The overlay of the 'top' image ('topparent')
1163  * then directly references the 'base' image. All intermediate images can be
1164  * removed/deleted.
1165  */
1166 static void
qemuBlockJobProcessEventCompletedCommit(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)1167 qemuBlockJobProcessEventCompletedCommit(virQEMUDriver *driver,
1168                                         virDomainObj *vm,
1169                                         qemuBlockJobData *job,
1170                                         qemuDomainAsyncJob asyncJob)
1171 {
1172     virStorageSource *baseparent = NULL;
1173     virDomainDiskDef *cfgdisk = NULL;
1174     virStorageSource *cfgnext = NULL;
1175     virStorageSource *cfgtopparent = NULL;
1176     virStorageSource *cfgtop = NULL;
1177     virStorageSource *cfgbase = NULL;
1178     virStorageSource *cfgbaseparent = NULL;
1179     virStorageSource *n;
1180 
1181     VIR_DEBUG("commit job '%s' on VM '%s' completed", job->name, vm->def->name);
1182 
1183     /* if the job isn't associated with a disk there's nothing to do */
1184     if (!job->disk)
1185         return;
1186 
1187     if ((cfgdisk = qemuBlockJobGetConfigDisk(vm, job->disk, job->data.commit.base)))
1188         cfgnext = cfgdisk->src;
1189 
1190     if (!cfgdisk)
1191         qemuBlockJobClearConfigChain(vm, job->disk);
1192 
1193     for (n = job->disk->src; n && n != job->data.commit.base; n = n->backingStore) {
1194         if (cfgnext) {
1195             if (n == job->data.commit.topparent)
1196                 cfgtopparent = cfgnext;
1197 
1198             if (n == job->data.commit.top)
1199                 cfgtop = cfgnext;
1200 
1201             cfgbaseparent = cfgnext;
1202             cfgnext = cfgnext->backingStore;
1203         }
1204         baseparent = n;
1205     }
1206 
1207     if (!n)
1208         return;
1209 
1210     if (qemuBlockJobProcessEventCompletedCommitBitmaps(vm, job, asyncJob) < 0)
1211         return;
1212 
1213     /* revert access to images */
1214     qemuDomainStorageSourceAccessAllow(driver, vm, job->data.commit.base,
1215                                        true, false, false);
1216     if (job->data.commit.topparent != job->disk->src)
1217         qemuDomainStorageSourceAccessAllow(driver, vm, job->data.commit.topparent,
1218                                            true, false, true);
1219 
1220     baseparent->backingStore = NULL;
1221     job->data.commit.topparent->backingStore = job->data.commit.base;
1222 
1223     qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, job->data.commit.top);
1224 
1225     if (job->data.commit.deleteCommittedImages)
1226         qemuBlockJobDeleteImages(driver, vm, job->disk, job->data.commit.top);
1227 
1228     virObjectUnref(job->data.commit.top);
1229     job->data.commit.top = NULL;
1230 
1231     if (cfgbaseparent) {
1232         cfgbase = g_steal_pointer(&cfgbaseparent->backingStore);
1233 
1234         if (cfgtopparent)
1235             cfgtopparent->backingStore = cfgbase;
1236         else
1237             cfgdisk->src = cfgbase;
1238 
1239         virObjectUnref(cfgtop);
1240     }
1241 }
1242 
1243 
1244 /**
1245  * qemuBlockJobProcessEventCompletedActiveCommit:
1246  * @driver: qemu driver object
1247  * @vm: domain object
1248  * @job: job data
1249  * @asyncJob: qemu asynchronous job type (for monitor interaction)
1250  *
1251  * This function executes the finalizing steps after a successful active layer
1252  * block commit job. The commit job moves the blocks from backing chain images
1253  * starting from the active disk source image into the 'base' image. The disk
1254  * source then changes to the 'base' image. All intermediate images can be
1255  * removed/deleted.
1256  */
1257 static void
qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)1258 qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriver *driver,
1259                                               virDomainObj *vm,
1260                                               qemuBlockJobData *job,
1261                                               qemuDomainAsyncJob asyncJob)
1262 {
1263     virStorageSource *baseparent = NULL;
1264     virDomainDiskDef *cfgdisk = NULL;
1265     virStorageSource *cfgnext = NULL;
1266     virStorageSource *cfgtop = NULL;
1267     virStorageSource *cfgbase = NULL;
1268     virStorageSource *cfgbaseparent = NULL;
1269     virStorageSource *n;
1270 
1271     VIR_DEBUG("active commit job '%s' on VM '%s' completed", job->name, vm->def->name);
1272 
1273     /* if the job isn't associated with a disk there's nothing to do */
1274     if (!job->disk)
1275         return;
1276 
1277     if ((cfgdisk = qemuBlockJobGetConfigDisk(vm, job->disk, job->data.commit.base)))
1278         cfgnext = cfgdisk->src;
1279 
1280     for (n = job->disk->src; n && n != job->data.commit.base; n = n->backingStore) {
1281         if (cfgnext) {
1282             if (n == job->data.commit.top)
1283                 cfgtop = cfgnext;
1284 
1285             cfgbaseparent = cfgnext;
1286             cfgnext = cfgnext->backingStore;
1287         }
1288         baseparent = n;
1289     }
1290 
1291     if (!n)
1292         return;
1293 
1294     if (!cfgdisk) {
1295         /* in case when the config disk chain didn't match but the disk top seems
1296          * to be identical we need to modify the disk source since the active
1297          * commit makes the top level image invalid.
1298          */
1299         qemuBlockJobRewriteConfigDiskSource(vm, job->disk, job->data.commit.base);
1300     } else {
1301         cfgbase = g_steal_pointer(&cfgbaseparent->backingStore);
1302         cfgdisk->src = cfgbase;
1303         cfgdisk->src->readonly = cfgtop->readonly;
1304         virObjectUnref(cfgtop);
1305     }
1306 
1307     /* Move security driver metadata */
1308     if (qemuSecurityMoveImageMetadata(driver, vm, job->disk->src, job->data.commit.base) < 0)
1309         VIR_WARN("Unable to move disk metadata on vm %s", vm->def->name);
1310 
1311     baseparent->backingStore = NULL;
1312     job->disk->src = job->data.commit.base;
1313     job->disk->src->readonly = job->data.commit.top->readonly;
1314 
1315     if (qemuBlockJobProcessEventCompletedCommitBitmaps(vm, job, asyncJob) < 0)
1316         return;
1317 
1318     qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, job->data.commit.top);
1319 
1320     if (job->data.commit.deleteCommittedImages)
1321         qemuBlockJobDeleteImages(driver, vm, job->disk, job->data.commit.top);
1322 
1323     virObjectUnref(job->data.commit.top);
1324     job->data.commit.top = NULL;
1325     /* the mirror element does not serve functional purpose for the commit job */
1326     virObjectUnref(job->disk->mirror);
1327     job->disk->mirror = NULL;
1328 }
1329 
1330 
1331 static int
qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)1332 qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObj *vm,
1333                                              qemuBlockJobData *job,
1334                                              qemuDomainAsyncJob asyncJob)
1335 {
1336     qemuDomainObjPrivate *priv = vm->privateData;
1337     g_autoptr(GHashTable) blockNamedNodeData = NULL;
1338     g_autoptr(virJSONValue) actions = NULL;
1339     bool shallow = job->jobflags & VIR_DOMAIN_BLOCK_COPY_SHALLOW;
1340 
1341     if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
1342         return 0;
1343 
1344     if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, asyncJob)))
1345         return -1;
1346 
1347     if (qemuBlockBitmapsHandleBlockcopy(job->disk->src,
1348                                         job->disk->mirror,
1349                                         blockNamedNodeData,
1350                                         shallow,
1351                                         &actions) < 0)
1352         return 0;
1353 
1354     if (!actions)
1355         return 0;
1356 
1357     if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
1358         return -1;
1359 
1360     qemuMonitorTransaction(priv->mon, &actions);
1361 
1362     if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
1363         return -1;
1364 
1365     return 0;
1366 }
1367 
1368 static void
qemuBlockJobProcessEventConcludedCopyPivot(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)1369 qemuBlockJobProcessEventConcludedCopyPivot(virQEMUDriver *driver,
1370                                            virDomainObj *vm,
1371                                            qemuBlockJobData *job,
1372                                            qemuDomainAsyncJob asyncJob)
1373 {
1374     qemuDomainObjPrivate *priv = vm->privateData;
1375     VIR_DEBUG("copy job '%s' on VM '%s' pivoted", job->name, vm->def->name);
1376 
1377     /* mirror may be NULL for copy job corresponding to migration */
1378     if (!job->disk ||
1379         !job->disk->mirror)
1380         return;
1381 
1382     qemuBlockJobProcessEventCompletedCopyBitmaps(vm, job, asyncJob);
1383 
1384     /* for shallow copy without reusing external image the user can either not
1385      * specify the backing chain in which case libvirt will open and use the
1386      * chain the user provided or not specify a chain in which case we'll
1387      * inherit the rest of the chain */
1388     if (job->data.copy.shallownew &&
1389         !virStorageSourceIsBacking(job->disk->mirror->backingStore))
1390         job->disk->mirror->backingStore = g_steal_pointer(&job->disk->src->backingStore);
1391 
1392     if (job->disk->src->readonly &&
1393         virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
1394         ignore_value(qemuBlockReopenReadOnly(vm, job->disk->mirror, asyncJob));
1395 
1396     qemuBlockJobRewriteConfigDiskSource(vm, job->disk, job->disk->mirror);
1397 
1398     qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, job->disk->src);
1399     virObjectUnref(job->disk->src);
1400     job->disk->src = g_steal_pointer(&job->disk->mirror);
1401 }
1402 
1403 
1404 static void
qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)1405 qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriver *driver,
1406                                            virDomainObj *vm,
1407                                            qemuBlockJobData *job,
1408                                            qemuDomainAsyncJob asyncJob)
1409 {
1410     VIR_DEBUG("copy job '%s' on VM '%s' aborted", job->name, vm->def->name);
1411 
1412     /* mirror may be NULL for copy job corresponding to migration */
1413     if (!job->disk ||
1414         !job->disk->mirror)
1415         return;
1416 
1417     /* activeWrite bitmap is removed automatically here */
1418     qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, job->disk->mirror);
1419     virObjectUnref(job->disk->mirror);
1420     job->disk->mirror = NULL;
1421 }
1422 
1423 
1424 static void
qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)1425 qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriver *driver,
1426                                            virDomainObj *vm,
1427                                            qemuBlockJobData *job,
1428                                            qemuDomainAsyncJob asyncJob)
1429 {
1430     qemuDomainObjPrivate *priv = vm->privateData;
1431     virDomainDiskDef *disk = job->disk;
1432 
1433     VIR_DEBUG("active commit job '%s' on VM '%s' failed", job->name, vm->def->name);
1434 
1435     if (!disk)
1436         return;
1437 
1438     if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
1439         return;
1440 
1441     qemuMonitorBitmapRemove(priv->mon,
1442                             disk->mirror->nodeformat,
1443                             "libvirt-tmp-activewrite");
1444 
1445     if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
1446         return;
1447 
1448     /* Ideally, we would make the backing chain read only again (yes, SELinux
1449      * can do that using different labels). But that is not implemented yet and
1450      * not leaking security driver metadata is more important. */
1451     qemuBlockRemoveImageMetadata(driver, vm, disk->dst, disk->mirror);
1452 
1453     virObjectUnref(disk->mirror);
1454     disk->mirror = NULL;
1455 }
1456 
1457 
1458 static void
qemuBlockJobProcessEventConcludedCreate(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)1459 qemuBlockJobProcessEventConcludedCreate(virQEMUDriver *driver,
1460                                         virDomainObj *vm,
1461                                         qemuBlockJobData *job,
1462                                         qemuDomainAsyncJob asyncJob)
1463 {
1464     g_autoptr(qemuBlockStorageSourceAttachData) backend = NULL;
1465 
1466     /* if there is a synchronous client waiting for this job that means that
1467      * it will handle further hotplug of the created volume and also that
1468      * the 'chain' which was registered is under their control */
1469     if (job->synchronous) {
1470         virObjectUnref(job->chain);
1471         job->chain = NULL;
1472         return;
1473     }
1474 
1475     if (!job->data.create.src)
1476         return;
1477 
1478     if (!(backend = qemuBlockStorageSourceDetachPrepare(job->data.create.src, NULL)))
1479         return;
1480 
1481     /* the format node part was not attached yet, so we don't need to detach it */
1482     backend->formatAttached = false;
1483     if (job->data.create.storage) {
1484         backend->storageAttached = false;
1485         backend->storageSliceAttached = false;
1486         VIR_FREE(backend->encryptsecretAlias);
1487     }
1488 
1489     if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
1490         return;
1491 
1492     qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), backend);
1493 
1494     if (qemuDomainObjExitMonitor(driver, vm) < 0)
1495         return;
1496 
1497     qemuDomainStorageSourceAccessRevoke(driver, vm, job->data.create.src);
1498 }
1499 
1500 
1501 static void
qemuBlockJobProcessEventConcludedBackup(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob,qemuBlockjobState newstate,unsigned long long progressCurrent,unsigned long long progressTotal)1502 qemuBlockJobProcessEventConcludedBackup(virQEMUDriver *driver,
1503                                         virDomainObj *vm,
1504                                         qemuBlockJobData *job,
1505                                         qemuDomainAsyncJob asyncJob,
1506                                         qemuBlockjobState newstate,
1507                                         unsigned long long progressCurrent,
1508                                         unsigned long long progressTotal)
1509 {
1510     g_autoptr(qemuBlockStorageSourceAttachData) backend = NULL;
1511 
1512     qemuBackupNotifyBlockjobEnd(vm, job->disk, newstate, job->errmsg,
1513                                 progressCurrent, progressTotal, asyncJob);
1514 
1515     if (job->data.backup.store &&
1516         !(backend = qemuBlockStorageSourceDetachPrepare(job->data.backup.store, NULL)))
1517         return;
1518 
1519     if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
1520         return;
1521 
1522     if (backend)
1523         qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), backend);
1524 
1525     if (job->data.backup.bitmap)
1526         qemuMonitorBitmapRemove(qemuDomainGetMonitor(vm),
1527                                 job->disk->src->nodeformat,
1528                                 job->data.backup.bitmap);
1529 
1530     if (qemuDomainObjExitMonitor(driver, vm) < 0)
1531         return;
1532 
1533     if (job->data.backup.store)
1534         qemuDomainStorageSourceAccessRevoke(driver, vm, job->data.backup.store);
1535 }
1536 
1537 
1538 static void
qemuBlockJobEventProcessConcludedTransition(qemuBlockJobData * job,virQEMUDriver * driver,virDomainObj * vm,qemuDomainAsyncJob asyncJob,unsigned long long progressCurrent,unsigned long long progressTotal)1539 qemuBlockJobEventProcessConcludedTransition(qemuBlockJobData *job,
1540                                             virQEMUDriver *driver,
1541                                             virDomainObj *vm,
1542                                             qemuDomainAsyncJob asyncJob,
1543                                             unsigned long long progressCurrent,
1544                                             unsigned long long progressTotal)
1545 {
1546     bool success = job->newstate == QEMU_BLOCKJOB_STATE_COMPLETED;
1547 
1548     switch ((qemuBlockJobType) job->type) {
1549     case QEMU_BLOCKJOB_TYPE_PULL:
1550         if (success)
1551             qemuBlockJobProcessEventCompletedPull(driver, vm, job, asyncJob);
1552         break;
1553 
1554     case QEMU_BLOCKJOB_TYPE_COMMIT:
1555         if (success)
1556             qemuBlockJobProcessEventCompletedCommit(driver, vm, job, asyncJob);
1557         break;
1558 
1559     case QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT:
1560         if (success) {
1561             qemuBlockJobProcessEventCompletedActiveCommit(driver, vm, job, asyncJob);
1562         } else {
1563             qemuBlockJobProcessEventFailedActiveCommit(driver, vm, job, asyncJob);
1564         }
1565         break;
1566 
1567     case QEMU_BLOCKJOB_TYPE_CREATE:
1568         qemuBlockJobProcessEventConcludedCreate(driver, vm, job, asyncJob);
1569         break;
1570 
1571     case QEMU_BLOCKJOB_TYPE_COPY:
1572         if (job->state == QEMU_BLOCKJOB_STATE_PIVOTING && success)
1573             qemuBlockJobProcessEventConcludedCopyPivot(driver, vm, job, asyncJob);
1574         else
1575             qemuBlockJobProcessEventConcludedCopyAbort(driver, vm, job, asyncJob);
1576         break;
1577 
1578     case QEMU_BLOCKJOB_TYPE_BACKUP:
1579         qemuBlockJobProcessEventConcludedBackup(driver, vm, job, asyncJob,
1580                                                 job->newstate, progressCurrent,
1581                                                 progressTotal);
1582         break;
1583 
1584     case QEMU_BLOCKJOB_TYPE_BROKEN:
1585     case QEMU_BLOCKJOB_TYPE_NONE:
1586     case QEMU_BLOCKJOB_TYPE_INTERNAL:
1587     case QEMU_BLOCKJOB_TYPE_LAST:
1588     default:
1589         break;
1590     }
1591 
1592     qemuBlockJobEmitEvents(driver, vm, job->disk, job->type, job->newstate);
1593     job->state = job->newstate;
1594     job->newstate = -1;
1595 }
1596 
1597 
1598 static void
qemuBlockJobEventProcessConcluded(qemuBlockJobData * job,virQEMUDriver * driver,virDomainObj * vm,qemuDomainAsyncJob asyncJob)1599 qemuBlockJobEventProcessConcluded(qemuBlockJobData *job,
1600                                   virQEMUDriver *driver,
1601                                   virDomainObj *vm,
1602                                   qemuDomainAsyncJob asyncJob)
1603 {
1604     qemuMonitorJobInfo **jobinfo = NULL;
1605     size_t njobinfo = 0;
1606     size_t i;
1607     bool refreshed = false;
1608     unsigned long long progressCurrent = 0;
1609     unsigned long long progressTotal = 0;
1610 
1611     if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
1612         goto cleanup;
1613 
1614     /* we need to fetch the error state as the event does not propagate it */
1615     if (job->newstate == QEMU_BLOCKJOB_STATE_CONCLUDED &&
1616         qemuMonitorGetJobInfo(qemuDomainGetMonitor(vm), &jobinfo, &njobinfo) == 0) {
1617 
1618         for (i = 0; i < njobinfo; i++) {
1619             if (STRNEQ_NULLABLE(job->name, jobinfo[i]->id))
1620                 continue;
1621 
1622             progressCurrent = jobinfo[i]->progressCurrent;
1623             progressTotal = jobinfo[i]->progressTotal;
1624 
1625             job->errmsg = g_strdup(jobinfo[i]->error);
1626 
1627             if (job->errmsg)
1628                 job->newstate = QEMU_BLOCKJOB_STATE_FAILED;
1629             else
1630                 job->newstate = QEMU_BLOCKJOB_STATE_COMPLETED;
1631 
1632             refreshed = true;
1633 
1634             break;
1635         }
1636 
1637         if (i == njobinfo)
1638             VIR_WARN("failed to refresh job '%s'", job->name);
1639     }
1640 
1641     /* dismiss job in qemu */
1642     ignore_value(qemuMonitorJobDismiss(qemuDomainGetMonitor(vm), job->name));
1643 
1644     if (qemuDomainObjExitMonitor(driver, vm) < 0)
1645         goto cleanup;
1646 
1647     if ((job->newstate == QEMU_BLOCKJOB_STATE_COMPLETED ||
1648          job->newstate == QEMU_BLOCKJOB_STATE_FAILED) &&
1649         job->state == QEMU_BLOCKJOB_STATE_ABORTING)
1650         job->newstate = QEMU_BLOCKJOB_STATE_CANCELLED;
1651 
1652     if (refreshed)
1653         qemuDomainSaveStatus(vm);
1654 
1655     VIR_DEBUG("handling job '%s' state '%d' newstate '%d'", job->name, job->state, job->newstate);
1656 
1657     qemuBlockJobEventProcessConcludedTransition(job, driver, vm, asyncJob,
1658                                                 progressCurrent, progressTotal);
1659 
1660     /* unplug the backing chains in case the job inherited them */
1661     if (!job->disk) {
1662         if (job->chain)
1663             qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob,
1664                                                          job->chain);
1665         if (job->mirrorChain)
1666             qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob,
1667                                                          job->mirrorChain);
1668     }
1669 
1670  cleanup:
1671     qemuBlockJobUnregister(job, vm);
1672     qemuDomainSaveConfig(vm);
1673 
1674     for (i = 0; i < njobinfo; i++)
1675         qemuMonitorJobInfoFree(jobinfo[i]);
1676     VIR_FREE(jobinfo);
1677 }
1678 
1679 
1680 static void
qemuBlockJobEventProcess(virQEMUDriver * driver,virDomainObj * vm,qemuBlockJobData * job,qemuDomainAsyncJob asyncJob)1681 qemuBlockJobEventProcess(virQEMUDriver *driver,
1682                          virDomainObj *vm,
1683                          qemuBlockJobData *job,
1684                          qemuDomainAsyncJob asyncJob)
1685 
1686 {
1687     switch ((qemuBlockjobState) job->newstate) {
1688     case QEMU_BLOCKJOB_STATE_COMPLETED:
1689     case QEMU_BLOCKJOB_STATE_FAILED:
1690     case QEMU_BLOCKJOB_STATE_CANCELLED:
1691     case QEMU_BLOCKJOB_STATE_CONCLUDED:
1692         if (job->disk) {
1693             job->disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
1694             job->disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
1695         }
1696         qemuBlockJobEventProcessConcluded(job, driver, vm, asyncJob);
1697         break;
1698 
1699     case QEMU_BLOCKJOB_STATE_READY:
1700         /* in certain cases qemu can blip out and back into 'ready' state for
1701          * a blockjob. In cases when we already are past RUNNING the job such
1702          * as when pivoting/aborting this could reset the internally set job
1703          * state, thus we ignore it if the job isn't in expected state */
1704         if (job->state == QEMU_BLOCKJOB_STATE_NEW ||
1705             job->state == QEMU_BLOCKJOB_STATE_RUNNING) {
1706             /* mirror may be NULL for copy job corresponding to migration */
1707             if (job->disk) {
1708                 job->disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_READY;
1709                 qemuBlockJobEmitEvents(driver, vm, job->disk, job->type, job->newstate);
1710             }
1711             job->state = job->newstate;
1712             qemuDomainSaveStatus(vm);
1713         }
1714         job->newstate = -1;
1715         break;
1716 
1717     case QEMU_BLOCKJOB_STATE_NEW:
1718     case QEMU_BLOCKJOB_STATE_RUNNING:
1719     case QEMU_BLOCKJOB_STATE_LAST:
1720     /* these are never processed as 'newstate' */
1721     case QEMU_BLOCKJOB_STATE_ABORTING:
1722     case QEMU_BLOCKJOB_STATE_PIVOTING:
1723     default:
1724         job->newstate = -1;
1725     }
1726 }
1727 
1728 
1729 /**
1730  * qemuBlockJobUpdate:
1731  * @vm: domain
1732  * @job: job data
1733  * @asyncJob: current qemu asynchronous job type
1734  *
1735  * Update disk's mirror state in response to a block job event stored in
1736  * blockJobStatus by qemuProcessHandleBlockJob event handler.
1737  */
1738 void
qemuBlockJobUpdate(virDomainObj * vm,qemuBlockJobData * job,int asyncJob)1739 qemuBlockJobUpdate(virDomainObj *vm,
1740                    qemuBlockJobData *job,
1741                    int asyncJob)
1742 {
1743     qemuDomainObjPrivate *priv = vm->privateData;
1744 
1745     if (job->newstate == -1)
1746         return;
1747 
1748     if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
1749         qemuBlockJobEventProcess(priv->driver, vm, job, asyncJob);
1750     else
1751         qemuBlockJobEventProcessLegacy(priv->driver, vm, job, asyncJob);
1752 }
1753 
1754 
1755 /**
1756  * qemuBlockJobSyncBegin:
1757  * @job: block job data
1758  * @disk: domain disk
1759  *
1760  * Begin a new synchronous block job for @disk. The synchronous
1761  * block job is ended by a call to qemuBlockJobSyncEnd, or by
1762  * the guest quitting.
1763  *
1764  * During a synchronous block job, a block job event for @disk
1765  * will not be processed asynchronously. Instead, it will be
1766  * processed only when qemuBlockJobUpdate or qemuBlockJobSyncEnd
1767  * is called.
1768  */
1769 void
qemuBlockJobSyncBegin(qemuBlockJobData * job)1770 qemuBlockJobSyncBegin(qemuBlockJobData *job)
1771 {
1772     const char *diskdst = NULL;
1773 
1774     if (job->disk)
1775         diskdst = job->disk->dst;
1776 
1777     VIR_DEBUG("disk=%s", NULLSTR(diskdst));
1778     job->synchronous = true;
1779 }
1780 
1781 
1782 /**
1783  * qemuBlockJobSyncEnd:
1784  * @vm: domain
1785  * @disk: domain disk
1786  *
1787  * End a synchronous block job for @disk. Any pending block job event
1788  * for the disk is processed. Note that it's not necessary to call this function
1789  * in case the block job was not started successfully if
1790  * qemuBlockJobStartupFinalize will be called.
1791  */
1792 void
qemuBlockJobSyncEnd(virDomainObj * vm,qemuBlockJobData * job,int asyncJob)1793 qemuBlockJobSyncEnd(virDomainObj *vm,
1794                     qemuBlockJobData *job,
1795                     int asyncJob)
1796 {
1797     const char *diskdst = NULL;
1798 
1799     if (job->disk)
1800         diskdst = job->disk->dst;
1801 
1802     VIR_DEBUG("disk=%s", NULLSTR(diskdst));
1803     job->synchronous = false;
1804     qemuBlockJobUpdate(vm, job, asyncJob);
1805 }
1806 
1807 
1808 qemuBlockJobData *
qemuBlockJobGetByDisk(virDomainDiskDef * disk)1809 qemuBlockJobGetByDisk(virDomainDiskDef *disk)
1810 {
1811     qemuBlockJobData *job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;
1812 
1813     if (!job)
1814         return NULL;
1815 
1816     return virObjectRef(job);
1817 }
1818 
1819 
1820 /**
1821  * @monitorstatus: Status of the blockjob from qemu monitor (qemuMonitorJobStatus)
1822  *
1823  * Converts the block job status from the monitor to the one used by
1824  * qemuBlockJobData. If the status is unknown or does not require any handling
1825  * QEMU_BLOCKJOB_TYPE_LAST is returned.
1826  */
1827 qemuBlockjobState
qemuBlockjobConvertMonitorStatus(int monitorstatus)1828 qemuBlockjobConvertMonitorStatus(int monitorstatus)
1829 {
1830     qemuBlockjobState ret = QEMU_BLOCKJOB_STATE_LAST;
1831 
1832     switch ((qemuMonitorJobStatus) monitorstatus) {
1833     case QEMU_MONITOR_JOB_STATUS_READY:
1834         ret = QEMU_BLOCKJOB_STATE_READY;
1835         break;
1836 
1837     case QEMU_MONITOR_JOB_STATUS_CONCLUDED:
1838         ret = QEMU_BLOCKJOB_STATE_CONCLUDED;
1839         break;
1840 
1841     case QEMU_MONITOR_JOB_STATUS_UNKNOWN:
1842     case QEMU_MONITOR_JOB_STATUS_CREATED:
1843     case QEMU_MONITOR_JOB_STATUS_RUNNING:
1844     case QEMU_MONITOR_JOB_STATUS_PAUSED:
1845     case QEMU_MONITOR_JOB_STATUS_STANDBY:
1846     case QEMU_MONITOR_JOB_STATUS_WAITING:
1847     case QEMU_MONITOR_JOB_STATUS_PENDING:
1848     case QEMU_MONITOR_JOB_STATUS_ABORTING:
1849     case QEMU_MONITOR_JOB_STATUS_UNDEFINED:
1850     case QEMU_MONITOR_JOB_STATUS_NULL:
1851     case QEMU_MONITOR_JOB_STATUS_LAST:
1852     default:
1853         break;
1854     }
1855 
1856     return ret;
1857 
1858 }
1859