xref: /netbsd/external/gpl2/lvm2/dist/lib/cache/lvmcache.c (revision 7c604eea)
1 /*	$NetBSD: lvmcache.c,v 1.1.1.3 2009/12/02 00:26:21 haad Exp $	*/
2 
3 /*
4  * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
6  *
7  * This file is part of LVM2.
8  *
9  * This copyrighted material is made available to anyone wishing to use,
10  * modify, copy, or redistribute it subject to the terms and conditions
11  * of the GNU Lesser General Public License v.2.1.
12  *
13  * You should have received a copy of the GNU Lesser General Public License
14  * along with this program; if not, write to the Free Software Foundation,
15  * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17 
18 #include "lib.h"
19 #include "lvmcache.h"
20 #include "toolcontext.h"
21 #include "dev-cache.h"
22 #include "locking.h"
23 #include "metadata.h"
24 #include "filter.h"
25 #include "filter-persistent.h"
26 #include "memlock.h"
27 #include "str_list.h"
28 #include "format-text.h"
29 #include "format_pool.h"
30 #include "format1.h"
31 
32 static struct dm_hash_table *_pvid_hash = NULL;
33 static struct dm_hash_table *_vgid_hash = NULL;
34 static struct dm_hash_table *_vgname_hash = NULL;
35 static struct dm_hash_table *_lock_hash = NULL;
36 static struct dm_list _vginfos;
37 static int _scanning_in_progress = 0;
38 static int _has_scanned = 0;
39 static int _vgs_locked = 0;
40 static int _vg_global_lock_held = 0;	/* Global lock held when cache wiped? */
41 
lvmcache_init(void)42 int lvmcache_init(void)
43 {
44 	dm_list_init(&_vginfos);
45 
46 	if (!(_vgname_hash = dm_hash_create(128)))
47 		return 0;
48 
49 	if (!(_vgid_hash = dm_hash_create(128)))
50 		return 0;
51 
52 	if (!(_pvid_hash = dm_hash_create(128)))
53 		return 0;
54 
55 	if (!(_lock_hash = dm_hash_create(128)))
56 		return 0;
57 
58 	/*
59 	 * Reinitialising the cache clears the internal record of
60 	 * which locks are held.  The global lock can be held during
61 	 * this operation so its state must be restored afterwards.
62 	 */
63 	if (_vg_global_lock_held) {
64 		lvmcache_lock_vgname(VG_GLOBAL, 0);
65 		_vg_global_lock_held = 0;
66 	}
67 
68 	return 1;
69 }
70 
71 /* Volume Group metadata cache functions */
_free_cached_vgmetadata(struct lvmcache_vginfo * vginfo)72 static void _free_cached_vgmetadata(struct lvmcache_vginfo *vginfo)
73 {
74 	if (!vginfo || !vginfo->vgmetadata)
75 		return;
76 
77 	dm_free(vginfo->vgmetadata);
78 
79 	vginfo->vgmetadata = NULL;
80 
81 	log_debug("Metadata cache: VG %s wiped.", vginfo->vgname);
82 }
83 
84 /*
85  * Cache VG metadata against the vginfo with matching vgid.
86  */
_store_metadata(struct volume_group * vg,unsigned precommitted)87 static void _store_metadata(struct volume_group *vg, unsigned precommitted)
88 {
89 	char uuid[64] __attribute((aligned(8)));
90 	struct lvmcache_vginfo *vginfo;
91 	int size;
92 
93 	if (!(vginfo = vginfo_from_vgid((const char *)&vg->id))) {
94 		stack;
95 		return;
96 	}
97 
98 	if (vginfo->vgmetadata)
99 		_free_cached_vgmetadata(vginfo);
100 
101 	if (!(size = export_vg_to_buffer(vg, &vginfo->vgmetadata))) {
102 		stack;
103 		return;
104 	}
105 
106 	vginfo->precommitted = precommitted;
107 
108 	if (!id_write_format((const struct id *)vginfo->vgid, uuid, sizeof(uuid))) {
109 		stack;
110 		return;
111 	}
112 
113 	log_debug("Metadata cache: VG %s (%s) stored (%d bytes%s).",
114 		  vginfo->vgname, uuid, size,
115 		  precommitted ? ", precommitted" : "");
116 }
117 
_update_cache_info_lock_state(struct lvmcache_info * info,int locked,int * cached_vgmetadata_valid)118 static void _update_cache_info_lock_state(struct lvmcache_info *info,
119 					  int locked,
120 					  int *cached_vgmetadata_valid)
121 {
122 	int was_locked = (info->status & CACHE_LOCKED) ? 1 : 0;
123 
124 	/*
125 	 * Cache becomes invalid whenever lock state changes unless
126 	 * exclusive VG_GLOBAL is held (i.e. while scanning).
127 	 */
128 	if (!vgname_is_locked(VG_GLOBAL) && (was_locked != locked)) {
129 		info->status |= CACHE_INVALID;
130 		*cached_vgmetadata_valid = 0;
131 	}
132 
133 	if (locked)
134 		info->status |= CACHE_LOCKED;
135 	else
136 		info->status &= ~CACHE_LOCKED;
137 }
138 
_update_cache_vginfo_lock_state(struct lvmcache_vginfo * vginfo,int locked)139 static void _update_cache_vginfo_lock_state(struct lvmcache_vginfo *vginfo,
140 					    int locked)
141 {
142 	struct lvmcache_info *info;
143 	int cached_vgmetadata_valid = 1;
144 
145 	dm_list_iterate_items(info, &vginfo->infos)
146 		_update_cache_info_lock_state(info, locked,
147 					      &cached_vgmetadata_valid);
148 
149 	if (!cached_vgmetadata_valid)
150 		_free_cached_vgmetadata(vginfo);
151 }
152 
_update_cache_lock_state(const char * vgname,int locked)153 static void _update_cache_lock_state(const char *vgname, int locked)
154 {
155 	struct lvmcache_vginfo *vginfo;
156 
157 	if (!(vginfo = vginfo_from_vgname(vgname, NULL)))
158 		return;
159 
160 	_update_cache_vginfo_lock_state(vginfo, locked);
161 }
162 
_drop_metadata(const char * vgname)163 static void _drop_metadata(const char *vgname)
164 {
165 	struct lvmcache_vginfo *vginfo;
166 	struct lvmcache_info *info;
167 
168 	if (!(vginfo = vginfo_from_vgname(vgname, NULL)))
169 		return;
170 
171 	/*
172 	 * Invalidate cached PV labels.
173 	 * If cached precommitted metadata exists that means we
174 	 * already invalidated the PV labels (before caching it)
175 	 * and we must not do it again.
176 	 */
177 
178 	if (!vginfo->precommitted)
179 		dm_list_iterate_items(info, &vginfo->infos)
180 			info->status |= CACHE_INVALID;
181 
182 	_free_cached_vgmetadata(vginfo);
183 }
184 
lvmcache_drop_metadata(const char * vgname)185 void lvmcache_drop_metadata(const char *vgname)
186 {
187 	/* For VG_ORPHANS, we need to invalidate all labels on orphan PVs. */
188 	if (!strcmp(vgname, VG_ORPHANS)) {
189 		_drop_metadata(FMT_TEXT_ORPHAN_VG_NAME);
190 		_drop_metadata(FMT_LVM1_ORPHAN_VG_NAME);
191 		_drop_metadata(FMT_POOL_ORPHAN_VG_NAME);
192 
193 		/* Indicate that PVs could now be missing from the cache */
194 		init_full_scan_done(0);
195 	} else if (!vgname_is_locked(VG_GLOBAL))
196 		_drop_metadata(vgname);
197 }
198 
199 /*
200  * Ensure vgname2 comes after vgname1 alphabetically.
201  * Special VG names beginning with '#' don't count.
202  */
_vgname_order_correct(const char * vgname1,const char * vgname2)203 static int _vgname_order_correct(const char *vgname1, const char *vgname2)
204 {
205 	if ((*vgname1 == '#') || (*vgname2 == '#'))
206 		return 1;
207 
208 	if (strcmp(vgname1, vgname2) < 0)
209 		return 1;
210 
211 	return 0;
212 }
213 
214 /*
215  * Ensure VG locks are acquired in alphabetical order.
216  */
lvmcache_verify_lock_order(const char * vgname)217 int lvmcache_verify_lock_order(const char *vgname)
218 {
219 	struct dm_hash_node *n;
220 	const char *vgname2;
221 
222 	if (!_lock_hash)
223 		return_0;
224 
225 	dm_hash_iterate(n, _lock_hash) {
226 		if (!dm_hash_get_data(_lock_hash, n))
227 			return_0;
228 
229 		vgname2 = dm_hash_get_key(_lock_hash, n);
230 
231 		if (!_vgname_order_correct(vgname2, vgname)) {
232 			log_errno(EDEADLK, "Internal error: VG lock %s must "
233 				  "be requested before %s, not after.",
234 				  vgname, vgname2);
235 			return_0;
236 		}
237 	}
238 
239 	return 1;
240 }
241 
lvmcache_lock_vgname(const char * vgname,int read_only __attribute ((unused)))242 void lvmcache_lock_vgname(const char *vgname, int read_only __attribute((unused)))
243 {
244 	if (!_lock_hash && !lvmcache_init()) {
245 		log_error("Internal cache initialisation failed");
246 		return;
247 	}
248 
249 	if (dm_hash_lookup(_lock_hash, vgname))
250 		log_error("Internal error: Nested locking attempted on VG %s.",
251 			  vgname);
252 
253 	if (!dm_hash_insert(_lock_hash, vgname, (void *) 1))
254 		log_error("Cache locking failure for %s", vgname);
255 
256 	_update_cache_lock_state(vgname, 1);
257 
258 	if (strcmp(vgname, VG_GLOBAL))
259 		_vgs_locked++;
260 }
261 
vgname_is_locked(const char * vgname)262 int vgname_is_locked(const char *vgname)
263 {
264 	if (!_lock_hash)
265 		return 0;
266 
267 	return dm_hash_lookup(_lock_hash, vgname) ? 1 : 0;
268 }
269 
lvmcache_unlock_vgname(const char * vgname)270 void lvmcache_unlock_vgname(const char *vgname)
271 {
272 	if (!dm_hash_lookup(_lock_hash, vgname))
273 		log_error("Internal error: Attempt to unlock unlocked VG %s.",
274 			  vgname);
275 
276 	_update_cache_lock_state(vgname, 0);
277 
278 	dm_hash_remove(_lock_hash, vgname);
279 
280 	/* FIXME Do this per-VG */
281 	if (strcmp(vgname, VG_GLOBAL) && !--_vgs_locked)
282 		dev_close_all();
283 }
284 
vgs_locked(void)285 int vgs_locked(void)
286 {
287 	return _vgs_locked;
288 }
289 
_vginfo_attach_info(struct lvmcache_vginfo * vginfo,struct lvmcache_info * info)290 static void _vginfo_attach_info(struct lvmcache_vginfo *vginfo,
291 				struct lvmcache_info *info)
292 {
293 	if (!vginfo)
294 		return;
295 
296 	info->vginfo = vginfo;
297 	dm_list_add(&vginfo->infos, &info->list);
298 }
299 
_vginfo_detach_info(struct lvmcache_info * info)300 static void _vginfo_detach_info(struct lvmcache_info *info)
301 {
302 	if (!dm_list_empty(&info->list)) {
303 		dm_list_del(&info->list);
304 		dm_list_init(&info->list);
305 	}
306 
307 	info->vginfo = NULL;
308 }
309 
310 /* If vgid supplied, require a match. */
vginfo_from_vgname(const char * vgname,const char * vgid)311 struct lvmcache_vginfo *vginfo_from_vgname(const char *vgname, const char *vgid)
312 {
313 	struct lvmcache_vginfo *vginfo;
314 
315 	if (!vgname)
316 		return vginfo_from_vgid(vgid);
317 
318 	if (!_vgname_hash)
319 		return NULL;
320 
321 	if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname)))
322 		return NULL;
323 
324 	if (vgid)
325 		do
326 			if (!strncmp(vgid, vginfo->vgid, ID_LEN))
327 				return vginfo;
328 		while ((vginfo = vginfo->next));
329 
330 	return vginfo;
331 }
332 
fmt_from_vgname(const char * vgname,const char * vgid)333 const struct format_type *fmt_from_vgname(const char *vgname, const char *vgid)
334 {
335 	struct lvmcache_vginfo *vginfo;
336 	struct lvmcache_info *info;
337 	struct label *label;
338 	struct dm_list *devh, *tmp;
339 	struct dm_list devs;
340 	struct device_list *devl;
341 	char vgid_found[ID_LEN + 1] __attribute((aligned(8)));
342 
343 	if (!(vginfo = vginfo_from_vgname(vgname, vgid)))
344 		return NULL;
345 
346 	/* This function is normally called before reading metadata so
347  	 * we check cached labels here. Unfortunately vginfo is volatile. */
348 	dm_list_init(&devs);
349 	dm_list_iterate_items(info, &vginfo->infos) {
350 		if (!(devl = dm_malloc(sizeof(*devl)))) {
351 			log_error("device_list element allocation failed");
352 			return NULL;
353 		}
354 		devl->dev = info->dev;
355 		dm_list_add(&devs, &devl->list);
356 	}
357 
358 	memcpy(vgid_found, vginfo->vgid, sizeof(vgid_found));
359 
360 	dm_list_iterate_safe(devh, tmp, &devs) {
361 		devl = dm_list_item(devh, struct device_list);
362 		label_read(devl->dev, &label, UINT64_C(0));
363 		dm_list_del(&devl->list);
364 		dm_free(devl);
365 	}
366 
367 	/* If vginfo changed, caller needs to rescan */
368 	if (!(vginfo = vginfo_from_vgname(vgname, vgid_found)) ||
369 	    strncmp(vginfo->vgid, vgid_found, ID_LEN))
370 		return NULL;
371 
372 	return vginfo->fmt;
373 }
374 
vginfo_from_vgid(const char * vgid)375 struct lvmcache_vginfo *vginfo_from_vgid(const char *vgid)
376 {
377 	struct lvmcache_vginfo *vginfo;
378 	char id[ID_LEN + 1] __attribute((aligned(8)));
379 
380 	if (!_vgid_hash || !vgid)
381 		return NULL;
382 
383 	/* vgid not necessarily NULL-terminated */
384 	strncpy(&id[0], vgid, ID_LEN);
385 	id[ID_LEN] = '\0';
386 
387 	if (!(vginfo = dm_hash_lookup(_vgid_hash, id)))
388 		return NULL;
389 
390 	return vginfo;
391 }
392 
vgname_from_vgid(struct dm_pool * mem,const char * vgid)393 const char *vgname_from_vgid(struct dm_pool *mem, const char *vgid)
394 {
395 	struct lvmcache_vginfo *vginfo;
396 	const char *vgname = NULL;
397 
398 	if ((vginfo = vginfo_from_vgid(vgid)))
399 		vgname = vginfo->vgname;
400 
401 	if (mem && vgname)
402 		return dm_pool_strdup(mem, vgname);
403 
404 	return vgname;
405 }
406 
_info_is_valid(struct lvmcache_info * info)407 static int _info_is_valid(struct lvmcache_info *info)
408 {
409 	if (info->status & CACHE_INVALID)
410 		return 0;
411 
412 	/*
413 	 * The caller must hold the VG lock to manipulate metadata.
414 	 * In a cluster, remote nodes sometimes read metadata in the
415 	 * knowledge that the controlling node is holding the lock.
416 	 * So if the VG appears to be unlocked here, it should be safe
417 	 * to use the cached value.
418 	 */
419 	if (info->vginfo && !vgname_is_locked(info->vginfo->vgname))
420 		return 1;
421 
422 	if (!(info->status & CACHE_LOCKED))
423 		return 0;
424 
425 	return 1;
426 }
427 
_vginfo_is_valid(struct lvmcache_vginfo * vginfo)428 static int _vginfo_is_valid(struct lvmcache_vginfo *vginfo)
429 {
430 	struct lvmcache_info *info;
431 
432 	/* Invalid if any info is invalid */
433 	dm_list_iterate_items(info, &vginfo->infos)
434 		if (!_info_is_valid(info))
435 			return 0;
436 
437 	return 1;
438 }
439 
440 /* vginfo is invalid if it does not contain at least one valid info */
_vginfo_is_invalid(struct lvmcache_vginfo * vginfo)441 static int _vginfo_is_invalid(struct lvmcache_vginfo *vginfo)
442 {
443 	struct lvmcache_info *info;
444 
445 	dm_list_iterate_items(info, &vginfo->infos)
446 		if (_info_is_valid(info))
447 			return 0;
448 
449 	return 1;
450 }
451 
452 /*
453  * If valid_only is set, data will only be returned if the cached data is
454  * known still to be valid.
455  */
info_from_pvid(const char * pvid,int valid_only)456 struct lvmcache_info *info_from_pvid(const char *pvid, int valid_only)
457 {
458 	struct lvmcache_info *info;
459 	char id[ID_LEN + 1] __attribute((aligned(8)));
460 
461 	if (!_pvid_hash || !pvid)
462 		return NULL;
463 
464 	strncpy(&id[0], pvid, ID_LEN);
465 	id[ID_LEN] = '\0';
466 
467 	if (!(info = dm_hash_lookup(_pvid_hash, id)))
468 		return NULL;
469 
470 	if (valid_only && !_info_is_valid(info))
471 		return NULL;
472 
473 	return info;
474 }
475 
_rescan_entry(struct lvmcache_info * info)476 static void _rescan_entry(struct lvmcache_info *info)
477 {
478 	struct label *label;
479 
480 	if (info->status & CACHE_INVALID)
481 		label_read(info->dev, &label, UINT64_C(0));
482 }
483 
_scan_invalid(void)484 static int _scan_invalid(void)
485 {
486 	dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _rescan_entry);
487 
488 	return 1;
489 }
490 
lvmcache_label_scan(struct cmd_context * cmd,int full_scan)491 int lvmcache_label_scan(struct cmd_context *cmd, int full_scan)
492 {
493 	struct label *label;
494 	struct dev_iter *iter;
495 	struct device *dev;
496 	struct format_type *fmt;
497 
498 	int r = 0;
499 
500 	/* Avoid recursion when a PVID can't be found! */
501 	if (_scanning_in_progress)
502 		return 0;
503 
504 	_scanning_in_progress = 1;
505 
506 	if (!_vgname_hash && !lvmcache_init()) {
507 		log_error("Internal cache initialisation failed");
508 		goto out;
509 	}
510 
511 	if (_has_scanned && !full_scan) {
512 		r = _scan_invalid();
513 		goto out;
514 	}
515 
516 	if (full_scan == 2 && !refresh_filters(cmd)) {
517 		log_error("refresh filters failed");
518 		goto out;
519 	}
520 
521 	if (!(iter = dev_iter_create(cmd->filter, (full_scan == 2) ? 1 : 0))) {
522 		log_error("dev_iter creation failed");
523 		goto out;
524 	}
525 
526 	while ((dev = dev_iter_get(iter)))
527 		label_read(dev, &label, UINT64_C(0));
528 
529 	dev_iter_destroy(iter);
530 
531 	_has_scanned = 1;
532 
533 	/* Perform any format-specific scanning e.g. text files */
534 	dm_list_iterate_items(fmt, &cmd->formats) {
535 		if (fmt->ops->scan && !fmt->ops->scan(fmt))
536 			goto out;
537 	}
538 
539 	/*
540 	 * If we are a long-lived process, write out the updated persistent
541 	 * device cache for the benefit of short-lived processes.
542 	 */
543 	if (full_scan == 2 && cmd->is_long_lived && cmd->dump_filter)
544 		persistent_filter_dump(cmd->filter);
545 
546 	r = 1;
547 
548       out:
549 	_scanning_in_progress = 0;
550 
551 	return r;
552 }
553 
lvmcache_get_vg(const char * vgid,unsigned precommitted)554 struct volume_group *lvmcache_get_vg(const char *vgid, unsigned precommitted)
555 {
556 	struct lvmcache_vginfo *vginfo;
557 	struct volume_group *vg;
558 	struct format_instance *fid;
559 
560 	if (!vgid || !(vginfo = vginfo_from_vgid(vgid)) || !vginfo->vgmetadata)
561 		return NULL;
562 
563 	if (!_vginfo_is_valid(vginfo))
564 		return NULL;
565 
566 	/*
567 	 * Don't return cached data if either:
568 	 * (i)  precommitted metadata is requested but we don't have it cached
569 	 *      - caller should read it off disk;
570 	 * (ii) live metadata is requested but we have precommitted metadata cached
571 	 *      and no devices are suspended so caller may read it off disk.
572 	 *
573 	 * If live metadata is requested but we have precommitted metadata cached
574 	 * and devices are suspended, we assume this precommitted metadata has
575 	 * already been preloaded and committed so it's OK to return it as live.
576 	 * Note that we do not clear the PRECOMMITTED flag.
577 	 */
578 	if ((precommitted && !vginfo->precommitted) ||
579 	    (!precommitted && vginfo->precommitted && !memlock()))
580 		return NULL;
581 
582 	if (!(fid =  vginfo->fmt->ops->create_instance(vginfo->fmt,
583 						       vginfo->vgname,
584 						       vgid, NULL)))
585 		return_NULL;
586 
587 	if (!(vg = import_vg_from_buffer(vginfo->vgmetadata, fid)) ||
588 	    !vg_validate(vg)) {
589 		_free_cached_vgmetadata(vginfo);
590 		vg_release(vg);
591 		return_NULL;
592 	}
593 
594 	log_debug("Using cached %smetadata for VG %s.",
595 		  vginfo->precommitted ? "pre-committed" : "", vginfo->vgname);
596 
597 	return vg;
598 }
599 
lvmcache_get_vgids(struct cmd_context * cmd,int full_scan)600 struct dm_list *lvmcache_get_vgids(struct cmd_context *cmd, int full_scan)
601 {
602 	struct dm_list *vgids;
603 	struct lvmcache_vginfo *vginfo;
604 
605 	lvmcache_label_scan(cmd, full_scan);
606 
607 	if (!(vgids = str_list_create(cmd->mem))) {
608 		log_error("vgids list allocation failed");
609 		return NULL;
610 	}
611 
612 	dm_list_iterate_items(vginfo, &_vginfos) {
613 		if (!str_list_add(cmd->mem, vgids,
614 				  dm_pool_strdup(cmd->mem, vginfo->vgid))) {
615 			log_error("strlist allocation failed");
616 			return NULL;
617 		}
618 	}
619 
620 	return vgids;
621 }
622 
lvmcache_get_vgnames(struct cmd_context * cmd,int full_scan)623 struct dm_list *lvmcache_get_vgnames(struct cmd_context *cmd, int full_scan)
624 {
625 	struct dm_list *vgnames;
626 	struct lvmcache_vginfo *vginfo;
627 
628 	lvmcache_label_scan(cmd, full_scan);
629 
630 	if (!(vgnames = str_list_create(cmd->mem))) {
631 		log_errno(ENOMEM, "vgnames list allocation failed");
632 		return NULL;
633 	}
634 
635 	dm_list_iterate_items(vginfo, &_vginfos) {
636 		if (!str_list_add(cmd->mem, vgnames,
637 				  dm_pool_strdup(cmd->mem, vginfo->vgname))) {
638 			log_errno(ENOMEM, "strlist allocation failed");
639 			return NULL;
640 		}
641 	}
642 
643 	return vgnames;
644 }
645 
lvmcache_get_pvids(struct cmd_context * cmd,const char * vgname,const char * vgid)646 struct dm_list *lvmcache_get_pvids(struct cmd_context *cmd, const char *vgname,
647 				const char *vgid)
648 {
649 	struct dm_list *pvids;
650 	struct lvmcache_vginfo *vginfo;
651 	struct lvmcache_info *info;
652 
653 	if (!(pvids = str_list_create(cmd->mem))) {
654 		log_error("pvids list allocation failed");
655 		return NULL;
656 	}
657 
658 	if (!(vginfo = vginfo_from_vgname(vgname, vgid)))
659 		return pvids;
660 
661 	dm_list_iterate_items(info, &vginfo->infos) {
662 		if (!str_list_add(cmd->mem, pvids,
663 				  dm_pool_strdup(cmd->mem, info->dev->pvid))) {
664 			log_error("strlist allocation failed");
665 			return NULL;
666 		}
667 	}
668 
669 	return pvids;
670 }
671 
device_from_pvid(struct cmd_context * cmd,struct id * pvid)672 struct device *device_from_pvid(struct cmd_context *cmd, struct id *pvid)
673 {
674 	struct label *label;
675 	struct lvmcache_info *info;
676 
677 	/* Already cached ? */
678 	if ((info = info_from_pvid((char *) pvid, 0))) {
679 		if (label_read(info->dev, &label, UINT64_C(0))) {
680 			info = (struct lvmcache_info *) label->info;
681 			if (id_equal(pvid, (struct id *) &info->dev->pvid))
682 				return info->dev;
683 		}
684 	}
685 
686 	lvmcache_label_scan(cmd, 0);
687 
688 	/* Try again */
689 	if ((info = info_from_pvid((char *) pvid, 0))) {
690 		if (label_read(info->dev, &label, UINT64_C(0))) {
691 			info = (struct lvmcache_info *) label->info;
692 			if (id_equal(pvid, (struct id *) &info->dev->pvid))
693 				return info->dev;
694 		}
695 	}
696 
697 	if (memlock())
698 		return NULL;
699 
700 	lvmcache_label_scan(cmd, 2);
701 
702 	/* Try again */
703 	if ((info = info_from_pvid((char *) pvid, 0))) {
704 		if (label_read(info->dev, &label, UINT64_C(0))) {
705 			info = (struct lvmcache_info *) label->info;
706 			if (id_equal(pvid, (struct id *) &info->dev->pvid))
707 				return info->dev;
708 		}
709 	}
710 
711 	return NULL;
712 }
713 
_free_vginfo(struct lvmcache_vginfo * vginfo)714 static int _free_vginfo(struct lvmcache_vginfo *vginfo)
715 {
716 	struct lvmcache_vginfo *primary_vginfo, *vginfo2;
717 	int r = 1;
718 
719 	_free_cached_vgmetadata(vginfo);
720 
721 	vginfo2 = primary_vginfo = vginfo_from_vgname(vginfo->vgname, NULL);
722 
723 	if (vginfo == primary_vginfo) {
724 		dm_hash_remove(_vgname_hash, vginfo->vgname);
725 		if (vginfo->next && !dm_hash_insert(_vgname_hash, vginfo->vgname,
726 						    vginfo->next)) {
727 			log_error("_vgname_hash re-insertion for %s failed",
728 				  vginfo->vgname);
729 			r = 0;
730 		}
731 	} else do
732 		if (vginfo2->next == vginfo) {
733 			vginfo2->next = vginfo->next;
734 			break;
735 		}
736  	while ((vginfo2 = primary_vginfo->next));
737 
738 	if (vginfo->vgname)
739 		dm_free(vginfo->vgname);
740 
741 	if (vginfo->creation_host)
742 		dm_free(vginfo->creation_host);
743 
744 	if (*vginfo->vgid && _vgid_hash &&
745 	    vginfo_from_vgid(vginfo->vgid) == vginfo)
746 		dm_hash_remove(_vgid_hash, vginfo->vgid);
747 
748 	dm_list_del(&vginfo->list);
749 
750 	dm_free(vginfo);
751 
752 	return r;
753 }
754 
755 /*
756  * vginfo must be info->vginfo unless info is NULL
757  */
_drop_vginfo(struct lvmcache_info * info,struct lvmcache_vginfo * vginfo)758 static int _drop_vginfo(struct lvmcache_info *info, struct lvmcache_vginfo *vginfo)
759 {
760 	if (info)
761 		_vginfo_detach_info(info);
762 
763 	/* vginfo still referenced? */
764 	if (!vginfo || is_orphan_vg(vginfo->vgname) ||
765 	    !dm_list_empty(&vginfo->infos))
766 		return 1;
767 
768 	if (!_free_vginfo(vginfo))
769 		return_0;
770 
771 	return 1;
772 }
773 
774 /* Unused
775 void lvmcache_del(struct lvmcache_info *info)
776 {
777 	if (info->dev->pvid[0] && _pvid_hash)
778 		dm_hash_remove(_pvid_hash, info->dev->pvid);
779 
780 	_drop_vginfo(info, info->vginfo);
781 
782 	info->label->labeller->ops->destroy_label(info->label->labeller,
783 						info->label);
784 	dm_free(info);
785 
786 	return;
787 } */
788 
_lvmcache_update_pvid(struct lvmcache_info * info,const char * pvid)789 static int _lvmcache_update_pvid(struct lvmcache_info *info, const char *pvid)
790 {
791 	/*
792 	 * Nothing to do if already stored with same pvid.
793 	 */
794 	if (((dm_hash_lookup(_pvid_hash, pvid)) == info) &&
795 	    !strcmp(info->dev->pvid, pvid))
796 		return 1;
797 	if (*info->dev->pvid)
798 		dm_hash_remove(_pvid_hash, info->dev->pvid);
799 	strncpy(info->dev->pvid, pvid, sizeof(info->dev->pvid));
800 	if (!dm_hash_insert(_pvid_hash, pvid, info)) {
801 		log_error("_lvmcache_update: pvid insertion failed: %s", pvid);
802 		return 0;
803 	}
804 
805 	return 1;
806 }
807 
808 /*
809  * vginfo must be info->vginfo unless info is NULL (orphans)
810  */
_lvmcache_update_vgid(struct lvmcache_info * info,struct lvmcache_vginfo * vginfo,const char * vgid)811 static int _lvmcache_update_vgid(struct lvmcache_info *info,
812 				 struct lvmcache_vginfo *vginfo,
813 				 const char *vgid)
814 {
815 	if (!vgid || !vginfo ||
816 	    !strncmp(vginfo->vgid, vgid, ID_LEN))
817 		return 1;
818 
819 	if (vginfo && *vginfo->vgid)
820 		dm_hash_remove(_vgid_hash, vginfo->vgid);
821 	if (!vgid) {
822 		log_debug("lvmcache: %s: clearing VGID", info ? dev_name(info->dev) : vginfo->vgname);
823 		return 1;
824 	}
825 
826 	strncpy(vginfo->vgid, vgid, ID_LEN);
827 	vginfo->vgid[ID_LEN] = '\0';
828 	if (!dm_hash_insert(_vgid_hash, vginfo->vgid, vginfo)) {
829 		log_error("_lvmcache_update: vgid hash insertion failed: %s",
830 			  vginfo->vgid);
831 		return 0;
832 	}
833 
834 	if (!is_orphan_vg(vginfo->vgname))
835 		log_debug("lvmcache: %s: setting %s VGID to %s",
836 			  dev_name(info->dev), vginfo->vgname,
837 			  vginfo->vgid);
838 
839 	return 1;
840 }
841 
_insert_vginfo(struct lvmcache_vginfo * new_vginfo,const char * vgid,uint32_t vgstatus,const char * creation_host,struct lvmcache_vginfo * primary_vginfo)842 static int _insert_vginfo(struct lvmcache_vginfo *new_vginfo, const char *vgid,
843 			  uint32_t vgstatus, const char *creation_host,
844 			  struct lvmcache_vginfo *primary_vginfo)
845 {
846 	struct lvmcache_vginfo *last_vginfo = primary_vginfo;
847 	char uuid_primary[64] __attribute((aligned(8)));
848 	char uuid_new[64] __attribute((aligned(8)));
849 	int use_new = 0;
850 
851 	/* Pre-existing VG takes precedence. Unexported VG takes precedence. */
852 	if (primary_vginfo) {
853 		if (!id_write_format((const struct id *)vgid, uuid_new, sizeof(uuid_new)))
854 			return_0;
855 
856 		if (!id_write_format((const struct id *)&primary_vginfo->vgid, uuid_primary,
857 				     sizeof(uuid_primary)))
858 			return_0;
859 
860 		/*
861 		 * If   Primary not exported, new exported => keep
862 		 * Else Primary exported, new not exported => change
863 		 * Else Primary has hostname for this machine => keep
864 		 * Else Primary has no hostname, new has one => change
865 		 * Else New has hostname for this machine => change
866 		 * Else Keep primary.
867 		 */
868 		if (!(primary_vginfo->status & EXPORTED_VG) &&
869 		    (vgstatus & EXPORTED_VG))
870 			log_error("WARNING: Duplicate VG name %s: "
871 				  "Existing %s takes precedence over "
872 				  "exported %s", new_vginfo->vgname,
873 				  uuid_primary, uuid_new);
874 		else if ((primary_vginfo->status & EXPORTED_VG) &&
875 			   !(vgstatus & EXPORTED_VG)) {
876 			log_error("WARNING: Duplicate VG name %s: "
877 				  "%s takes precedence over exported %s",
878 				  new_vginfo->vgname, uuid_new,
879 				  uuid_primary);
880 			use_new = 1;
881 		} else if (primary_vginfo->creation_host &&
882 			   !strcmp(primary_vginfo->creation_host,
883 				   primary_vginfo->fmt->cmd->hostname))
884 			log_error("WARNING: Duplicate VG name %s: "
885 				  "Existing %s (created here) takes precedence "
886 				  "over %s", new_vginfo->vgname, uuid_primary,
887 				  uuid_new);
888 		else if (!primary_vginfo->creation_host && creation_host) {
889 			log_error("WARNING: Duplicate VG name %s: "
890 				  "%s (with creation_host) takes precedence over %s",
891 				  new_vginfo->vgname, uuid_new,
892 				  uuid_primary);
893 			use_new = 1;
894 		} else if (creation_host &&
895 			   !strcmp(creation_host,
896 				   primary_vginfo->fmt->cmd->hostname)) {
897 			log_error("WARNING: Duplicate VG name %s: "
898 				  "%s (created here) takes precedence over %s",
899 				  new_vginfo->vgname, uuid_new,
900 				  uuid_primary);
901 			use_new = 1;
902 		}
903 
904 		if (!use_new) {
905 			while (last_vginfo->next)
906 				last_vginfo = last_vginfo->next;
907 			last_vginfo->next = new_vginfo;
908 			return 1;
909 		}
910 
911 		dm_hash_remove(_vgname_hash, primary_vginfo->vgname);
912 	}
913 
914 	if (!dm_hash_insert(_vgname_hash, new_vginfo->vgname, new_vginfo)) {
915 		log_error("cache_update: vg hash insertion failed: %s",
916 		  	new_vginfo->vgname);
917 		return 0;
918 	}
919 
920 	if (primary_vginfo)
921 		new_vginfo->next = primary_vginfo;
922 
923 	return 1;
924 }
925 
_lvmcache_update_vgname(struct lvmcache_info * info,const char * vgname,const char * vgid,uint32_t vgstatus,const char * creation_host,const struct format_type * fmt)926 static int _lvmcache_update_vgname(struct lvmcache_info *info,
927 				   const char *vgname, const char *vgid,
928 				   uint32_t vgstatus, const char *creation_host,
929 				   const struct format_type *fmt)
930 {
931 	struct lvmcache_vginfo *vginfo, *primary_vginfo, *orphan_vginfo;
932 	struct lvmcache_info *info2, *info3;
933 	char mdabuf[32];
934 	// struct lvmcache_vginfo  *old_vginfo, *next;
935 
936 	if (!vgname || (info && info->vginfo && !strcmp(info->vginfo->vgname, vgname)))
937 		return 1;
938 
939 	/* Remove existing vginfo entry */
940 	if (info)
941 		_drop_vginfo(info, info->vginfo);
942 
943 	/* Get existing vginfo or create new one */
944 	if (!(vginfo = vginfo_from_vgname(vgname, vgid))) {
945 /*** FIXME - vginfo ends up duplicated instead of renamed.
946 		// Renaming?  This lookup fails.
947 		if ((vginfo = vginfo_from_vgid(vgid))) {
948 			next = vginfo->next;
949 			old_vginfo = vginfo_from_vgname(vginfo->vgname, NULL);
950 			if (old_vginfo == vginfo) {
951 				dm_hash_remove(_vgname_hash, old_vginfo->vgname);
952 				if (old_vginfo->next) {
953 					if (!dm_hash_insert(_vgname_hash, old_vginfo->vgname, old_vginfo->next)) {
954 						log_error("vg hash re-insertion failed: %s",
955 							  old_vginfo->vgname);
956 						return 0;
957 					}
958 				}
959 			} else do {
960 				if (old_vginfo->next == vginfo) {
961 					old_vginfo->next = vginfo->next;
962 					break;
963 				}
964 			} while ((old_vginfo = old_vginfo->next));
965 			vginfo->next = NULL;
966 
967 			dm_free(vginfo->vgname);
968 			if (!(vginfo->vgname = dm_strdup(vgname))) {
969 				log_error("cache vgname alloc failed for %s", vgname);
970 				return 0;
971 			}
972 
973 			// Rename so can assume new name does not already exist
974 			if (!dm_hash_insert(_vgname_hash, vginfo->vgname, vginfo->next)) {
975 				log_error("vg hash re-insertion failed: %s",
976 					  vginfo->vgname);
977 		      		return 0;
978 			}
979 		} else {
980 ***/
981 		if (!(vginfo = dm_malloc(sizeof(*vginfo)))) {
982 			log_error("lvmcache_update_vgname: list alloc failed");
983 			return 0;
984 		}
985 		memset(vginfo, 0, sizeof(*vginfo));
986 		if (!(vginfo->vgname = dm_strdup(vgname))) {
987 			dm_free(vginfo);
988 			log_error("cache vgname alloc failed for %s", vgname);
989 			return 0;
990 		}
991 		dm_list_init(&vginfo->infos);
992 
993 		/*
994 		 * If we're scanning and there's an invalidated entry, remove it.
995 		 * Otherwise we risk bogus warnings of duplicate VGs.
996 		 */
997 		while ((primary_vginfo = vginfo_from_vgname(vgname, NULL)) &&
998 		       _scanning_in_progress && _vginfo_is_invalid(primary_vginfo))
999 			dm_list_iterate_items_safe(info2, info3, &primary_vginfo->infos) {
1000 				orphan_vginfo = vginfo_from_vgname(primary_vginfo->fmt->orphan_vg_name, NULL);
1001 				_drop_vginfo(info2, primary_vginfo);
1002 				_vginfo_attach_info(orphan_vginfo, info2);
1003 				if (info2->mdas.n)
1004 					sprintf(mdabuf, " with %u mdas",
1005 						dm_list_size(&info2->mdas));
1006 				else
1007 					mdabuf[0] = '\0';
1008 				log_debug("lvmcache: %s: now in VG %s%s%s%s%s",
1009 					  dev_name(info2->dev),
1010 					  vgname, orphan_vginfo->vgid[0] ? " (" : "",
1011 					  orphan_vginfo->vgid[0] ? orphan_vginfo->vgid : "",
1012 					  orphan_vginfo->vgid[0] ? ")" : "", mdabuf);
1013 		}
1014 
1015 		if (!_insert_vginfo(vginfo, vgid, vgstatus, creation_host,
1016 				    primary_vginfo)) {
1017 			dm_free(vginfo->vgname);
1018 			dm_free(vginfo);
1019 			return 0;
1020 		}
1021 		/* Ensure orphans appear last on list_iterate */
1022 		if (is_orphan_vg(vgname))
1023 			dm_list_add(&_vginfos, &vginfo->list);
1024 		else
1025 			dm_list_add_h(&_vginfos, &vginfo->list);
1026 /***
1027 		}
1028 ***/
1029 	}
1030 
1031 	if (info)
1032 		_vginfo_attach_info(vginfo, info);
1033 	else if (!_lvmcache_update_vgid(NULL, vginfo, vgid)) /* Orphans */
1034 		return_0;
1035 
1036 	_update_cache_vginfo_lock_state(vginfo, vgname_is_locked(vgname));
1037 
1038 	/* FIXME Check consistency of list! */
1039 	vginfo->fmt = fmt;
1040 
1041 	if (info) {
1042 		if (info->mdas.n)
1043 			sprintf(mdabuf, " with %u mdas", dm_list_size(&info->mdas));
1044 		else
1045 			mdabuf[0] = '\0';
1046 		log_debug("lvmcache: %s: now in VG %s%s%s%s%s",
1047 			  dev_name(info->dev),
1048 			  vgname, vginfo->vgid[0] ? " (" : "",
1049 			  vginfo->vgid[0] ? vginfo->vgid : "",
1050 			  vginfo->vgid[0] ? ")" : "", mdabuf);
1051 	} else
1052 		log_debug("lvmcache: initialised VG %s", vgname);
1053 
1054 	return 1;
1055 }
1056 
_lvmcache_update_vgstatus(struct lvmcache_info * info,uint32_t vgstatus,const char * creation_host)1057 static int _lvmcache_update_vgstatus(struct lvmcache_info *info, uint32_t vgstatus,
1058 				     const char *creation_host)
1059 {
1060 	if (!info || !info->vginfo)
1061 		return 1;
1062 
1063 	if ((info->vginfo->status & EXPORTED_VG) != (vgstatus & EXPORTED_VG))
1064 		log_debug("lvmcache: %s: VG %s %s exported",
1065 			  dev_name(info->dev), info->vginfo->vgname,
1066 			  vgstatus & EXPORTED_VG ? "now" : "no longer");
1067 
1068 	info->vginfo->status = vgstatus;
1069 
1070 	if (!creation_host)
1071 		return 1;
1072 
1073 	if (info->vginfo->creation_host && !strcmp(creation_host,
1074 						   info->vginfo->creation_host))
1075 		return 1;
1076 
1077 	if (info->vginfo->creation_host)
1078 		dm_free(info->vginfo->creation_host);
1079 
1080 	if (!(info->vginfo->creation_host = dm_strdup(creation_host))) {
1081 		log_error("cache creation host alloc failed for %s",
1082 			  creation_host);
1083 		return 0;
1084 	}
1085 
1086 	log_debug("lvmcache: %s: VG %s: Set creation host to %s.",
1087 		  dev_name(info->dev), info->vginfo->vgname, creation_host);
1088 
1089 	return 1;
1090 }
1091 
lvmcache_add_orphan_vginfo(const char * vgname,struct format_type * fmt)1092 int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt)
1093 {
1094 	if (!_lock_hash && !lvmcache_init()) {
1095 		log_error("Internal cache initialisation failed");
1096 		return 0;
1097 	}
1098 
1099 	return _lvmcache_update_vgname(NULL, vgname, vgname, 0, "", fmt);
1100 }
1101 
lvmcache_update_vgname_and_id(struct lvmcache_info * info,const char * vgname,const char * vgid,uint32_t vgstatus,const char * creation_host)1102 int lvmcache_update_vgname_and_id(struct lvmcache_info *info,
1103 				  const char *vgname, const char *vgid,
1104 				  uint32_t vgstatus, const char *creation_host)
1105 {
1106 	if (!vgname && !info->vginfo) {
1107 		log_error("Internal error: NULL vgname handed to cache");
1108 		/* FIXME Remove this */
1109 		vgname = info->fmt->orphan_vg_name;
1110 		vgid = vgname;
1111 	}
1112 
1113 	/* If PV without mdas is already in a real VG, don't make it orphan */
1114 	if (is_orphan_vg(vgname) && info->vginfo && !dm_list_size(&info->mdas) &&
1115 	    !is_orphan_vg(info->vginfo->vgname) && memlock())
1116 		return 1;
1117 
1118 	/* If moving PV from orphan to real VG, always mark it valid */
1119 	if (!is_orphan_vg(vgname))
1120 		info->status &= ~CACHE_INVALID;
1121 
1122 	if (!_lvmcache_update_vgname(info, vgname, vgid, vgstatus,
1123 				     creation_host, info->fmt) ||
1124 	    !_lvmcache_update_vgid(info, info->vginfo, vgid) ||
1125 	    !_lvmcache_update_vgstatus(info, vgstatus, creation_host))
1126 		return_0;
1127 
1128 	return 1;
1129 }
1130 
lvmcache_update_vg(struct volume_group * vg,unsigned precommitted)1131 int lvmcache_update_vg(struct volume_group *vg, unsigned precommitted)
1132 {
1133 	struct pv_list *pvl;
1134 	struct lvmcache_info *info;
1135 	char pvid_s[ID_LEN + 1] __attribute((aligned(8)));
1136 
1137 	pvid_s[sizeof(pvid_s) - 1] = '\0';
1138 
1139 	dm_list_iterate_items(pvl, &vg->pvs) {
1140 		strncpy(pvid_s, (char *) &pvl->pv->id, sizeof(pvid_s) - 1);
1141 		/* FIXME Could pvl->pv->dev->pvid ever be different? */
1142 		if ((info = info_from_pvid(pvid_s, 0)) &&
1143 		    !lvmcache_update_vgname_and_id(info, vg->name,
1144 						   (char *) &vg->id,
1145 						   vg->status, NULL))
1146 			return_0;
1147 	}
1148 
1149 	/* store text representation of vg to cache */
1150 	if (vg->cmd->current_settings.cache_vgmetadata)
1151 		_store_metadata(vg, precommitted);
1152 
1153 	return 1;
1154 }
1155 
lvmcache_add(struct labeller * labeller,const char * pvid,struct device * dev,const char * vgname,const char * vgid,uint32_t vgstatus)1156 struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid,
1157 				   struct device *dev,
1158 				   const char *vgname, const char *vgid,
1159 				   uint32_t vgstatus)
1160 {
1161 	struct label *label;
1162 	struct lvmcache_info *existing, *info;
1163 	char pvid_s[ID_LEN + 1] __attribute((aligned(8)));
1164 
1165 	if (!_vgname_hash && !lvmcache_init()) {
1166 		log_error("Internal cache initialisation failed");
1167 		return NULL;
1168 	}
1169 
1170 	strncpy(pvid_s, pvid, sizeof(pvid_s));
1171 	pvid_s[sizeof(pvid_s) - 1] = '\0';
1172 
1173 	if (!(existing = info_from_pvid(pvid_s, 0)) &&
1174 	    !(existing = info_from_pvid(dev->pvid, 0))) {
1175 		if (!(label = label_create(labeller)))
1176 			return_NULL;
1177 		if (!(info = dm_malloc(sizeof(*info)))) {
1178 			log_error("lvmcache_info allocation failed");
1179 			label_destroy(label);
1180 			return NULL;
1181 		}
1182 		memset(info, 0, sizeof(*info));
1183 
1184 		label->info = info;
1185 		info->label = label;
1186 		dm_list_init(&info->list);
1187 		info->dev = dev;
1188 	} else {
1189 		if (existing->dev != dev) {
1190 			/* Is the existing entry a duplicate pvid e.g. md ? */
1191 			if (dev_subsystem_part_major(existing->dev) &&
1192 			    !dev_subsystem_part_major(dev)) {
1193 				log_very_verbose("Ignoring duplicate PV %s on "
1194 						 "%s - using %s %s",
1195 						 pvid, dev_name(dev),
1196 						 dev_subsystem_name(existing->dev),
1197 						 dev_name(existing->dev));
1198 				return NULL;
1199 			} else if (dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1200 				   !dm_is_dm_major(MAJOR(dev->dev))) {
1201 				log_very_verbose("Ignoring duplicate PV %s on "
1202 						 "%s - using dm %s",
1203 						 pvid, dev_name(dev),
1204 						 dev_name(existing->dev));
1205 				return NULL;
1206 			} else if (!dev_subsystem_part_major(existing->dev) &&
1207 				   dev_subsystem_part_major(dev))
1208 				log_very_verbose("Duplicate PV %s on %s - "
1209 						 "using %s %s", pvid,
1210 						 dev_name(existing->dev),
1211 						 dev_subsystem_name(existing->dev),
1212 						 dev_name(dev));
1213 			else if (!dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1214 				 dm_is_dm_major(MAJOR(dev->dev)))
1215 				log_very_verbose("Duplicate PV %s on %s - "
1216 						 "using dm %s", pvid,
1217 						 dev_name(existing->dev),
1218 						 dev_name(dev));
1219 			/* FIXME If both dm, check dependencies */
1220 			//else if (dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1221 				 //dm_is_dm_major(MAJOR(dev->dev)))
1222 				 //
1223 			else if (!strcmp(pvid_s, existing->dev->pvid))
1224 				log_error("Found duplicate PV %s: using %s not "
1225 					  "%s", pvid, dev_name(dev),
1226 					  dev_name(existing->dev));
1227 		}
1228 		if (strcmp(pvid_s, existing->dev->pvid))
1229 			log_debug("Updating pvid cache to %s (%s) from %s (%s)",
1230 				  pvid_s, dev_name(dev),
1231 				  existing->dev->pvid, dev_name(existing->dev));
1232 		/* Switch over to new preferred device */
1233 		existing->dev = dev;
1234 		info = existing;
1235 		/* Has labeller changed? */
1236 		if (info->label->labeller != labeller) {
1237 			label_destroy(info->label);
1238 			if (!(info->label = label_create(labeller)))
1239 				/* FIXME leaves info without label! */
1240 				return_NULL;
1241 			info->label->info = info;
1242 		}
1243 		label = info->label;
1244 	}
1245 
1246 	info->fmt = (const struct format_type *) labeller->private;
1247 	info->status |= CACHE_INVALID;
1248 
1249 	if (!_lvmcache_update_pvid(info, pvid_s)) {
1250 		if (!existing) {
1251 			dm_free(info);
1252 			label_destroy(label);
1253 		}
1254 		return NULL;
1255 	}
1256 
1257 	if (!lvmcache_update_vgname_and_id(info, vgname, vgid, vgstatus, NULL)) {
1258 		if (!existing) {
1259 			dm_hash_remove(_pvid_hash, pvid_s);
1260 			strcpy(info->dev->pvid, "");
1261 			dm_free(info);
1262 			label_destroy(label);
1263 		}
1264 		return NULL;
1265 	}
1266 
1267 	return info;
1268 }
1269 
_lvmcache_destroy_entry(struct lvmcache_info * info)1270 static void _lvmcache_destroy_entry(struct lvmcache_info *info)
1271 {
1272 	_vginfo_detach_info(info);
1273 	strcpy(info->dev->pvid, "");
1274 	label_destroy(info->label);
1275 	dm_free(info);
1276 }
1277 
_lvmcache_destroy_vgnamelist(struct lvmcache_vginfo * vginfo)1278 static void _lvmcache_destroy_vgnamelist(struct lvmcache_vginfo *vginfo)
1279 {
1280 	struct lvmcache_vginfo *next;
1281 
1282 	do {
1283 		next = vginfo->next;
1284 		if (!_free_vginfo(vginfo))
1285 			stack;
1286 	} while ((vginfo = next));
1287 }
1288 
_lvmcache_destroy_lockname(struct dm_hash_node * n)1289 static void _lvmcache_destroy_lockname(struct dm_hash_node *n)
1290 {
1291 	char *vgname;
1292 
1293 	if (!dm_hash_get_data(_lock_hash, n))
1294 		return;
1295 
1296 	vgname = dm_hash_get_key(_lock_hash, n);
1297 
1298 	if (!strcmp(vgname, VG_GLOBAL))
1299 		_vg_global_lock_held = 1;
1300 	else
1301 		log_error("Internal error: Volume Group %s was not unlocked",
1302 			  dm_hash_get_key(_lock_hash, n));
1303 }
1304 
lvmcache_destroy(struct cmd_context * cmd,int retain_orphans)1305 void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans)
1306 {
1307 	struct dm_hash_node *n;
1308 	log_verbose("Wiping internal VG cache");
1309 
1310 	_has_scanned = 0;
1311 
1312 	if (_vgid_hash) {
1313 		dm_hash_destroy(_vgid_hash);
1314 		_vgid_hash = NULL;
1315 	}
1316 
1317 	if (_pvid_hash) {
1318 		dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _lvmcache_destroy_entry);
1319 		dm_hash_destroy(_pvid_hash);
1320 		_pvid_hash = NULL;
1321 	}
1322 
1323 	if (_vgname_hash) {
1324 		dm_hash_iter(_vgname_hash,
1325 			  (dm_hash_iterate_fn) _lvmcache_destroy_vgnamelist);
1326 		dm_hash_destroy(_vgname_hash);
1327 		_vgname_hash = NULL;
1328 	}
1329 
1330 	if (_lock_hash) {
1331 		dm_hash_iterate(n, _lock_hash)
1332 			_lvmcache_destroy_lockname(n);
1333 		dm_hash_destroy(_lock_hash);
1334 		_lock_hash = NULL;
1335 	}
1336 
1337 	if (!dm_list_empty(&_vginfos))
1338 		log_error("Internal error: _vginfos list should be empty");
1339 	dm_list_init(&_vginfos);
1340 
1341 	if (retain_orphans)
1342 		init_lvmcache_orphans(cmd);
1343 }
1344