1 /*	$NetBSD: libdm-iface.c,v 1.1.1.3 2009/12/02 00:26:11 haad Exp $	*/
2 
3 /*
4  * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5  * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6  *
7  * This file is part of the device-mapper userspace tools.
8  *
9  * This copyrighted material is made available to anyone wishing to use,
10  * modify, copy, or redistribute it subject to the terms and conditions
11  * of the GNU Lesser General Public License v.2.1.
12  *
13  * You should have received a copy of the GNU Lesser General Public License
14  * along with this program; if not, write to the Free Software Foundation,
15  * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17 
18 #include "dmlib.h"
19 #include "libdm-targets.h"
20 #include "libdm-common.h"
21 
22 #ifdef DM_COMPAT
23 #  include "libdm-compat.h"
24 #endif
25 
26 #include <fcntl.h>
27 #include <dirent.h>
28 #include <sys/ioctl.h>
29 #include <sys/utsname.h>
30 #include <limits.h>
31 
32 #ifdef linux
33 #  include "kdev_t.h"
34 #  include <linux/limits.h>
35 #else
36 #  define MAJOR(x) major((x))
37 #  define MINOR(x) minor((x))
38 #  define MKDEV(x,y) makedev((x),(y))
39 #endif
40 
41 #include "dm-ioctl.h"
42 
43 /*
44  * Ensure build compatibility.
45  * The hard-coded versions here are the highest present
46  * in the _cmd_data arrays.
47  */
48 
49 #if !((DM_VERSION_MAJOR == 1 && DM_VERSION_MINOR >= 0) || \
50       (DM_VERSION_MAJOR == 4 && DM_VERSION_MINOR >= 0))
51 #error The version of dm-ioctl.h included is incompatible.
52 #endif
53 
54 /* FIXME This should be exported in device-mapper.h */
55 #define DM_NAME "device-mapper"
56 
57 #define PROC_MISC "/proc/misc"
58 #define PROC_DEVICES "/proc/devices"
59 #define MISC_NAME "misc"
60 
61 #define NUMBER_OF_MAJORS 4096
62 
63 /* dm major version no for running kernel */
64 static unsigned _dm_version = DM_VERSION_MAJOR;
65 static unsigned _dm_version_minor = 0;
66 static unsigned _dm_version_patchlevel = 0;
67 static int _log_suppress = 0;
68 
69 /*
70  * If the kernel dm driver only supports one major number
71  * we store it in _dm_device_major.  Otherwise we indicate
72  * which major numbers have been claimed by device-mapper
73  * in _dm_bitset.
74  */
75 static unsigned _dm_multiple_major_support = 1;
76 static dm_bitset_t _dm_bitset = NULL;
77 static uint32_t _dm_device_major = 0;
78 
79 static int _control_fd = -1;
80 static int _version_checked = 0;
81 static int _version_ok = 1;
82 static unsigned _ioctl_buffer_double_factor = 0;
83 
84 
85 /*
86  * Support both old and new major numbers to ease the transition.
87  * Clumsy, but only temporary.
88  */
89 #if DM_VERSION_MAJOR == 4 && defined(DM_COMPAT)
90 const int _dm_compat = 1;
91 #else
92 const int _dm_compat = 0;
93 #endif
94 
95 
96 /* *INDENT-OFF* */
97 static struct cmd_data _cmd_data_v4[] = {
98 	{"create",	DM_DEV_CREATE,		{4, 0, 0}},
99 	{"reload",	DM_TABLE_LOAD,		{4, 0, 0}},
100 	{"remove",	DM_DEV_REMOVE,		{4, 0, 0}},
101 	{"remove_all",	DM_REMOVE_ALL,		{4, 0, 0}},
102 	{"suspend",	DM_DEV_SUSPEND,		{4, 0, 0}},
103 	{"resume",	DM_DEV_SUSPEND,		{4, 0, 0}},
104 	{"info",	DM_DEV_STATUS,		{4, 0, 0}},
105 	{"deps",	DM_TABLE_DEPS,		{4, 0, 0}},
106 	{"rename",	DM_DEV_RENAME,		{4, 0, 0}},
107 	{"version",	DM_VERSION,		{4, 0, 0}},
108 	{"status",	DM_TABLE_STATUS,	{4, 0, 0}},
109 	{"table",	DM_TABLE_STATUS,	{4, 0, 0}},
110 	{"waitevent",	DM_DEV_WAIT,		{4, 0, 0}},
111 	{"names",	DM_LIST_DEVICES,	{4, 0, 0}},
112 	{"clear",	DM_TABLE_CLEAR,		{4, 0, 0}},
113 	{"mknodes",	DM_DEV_STATUS,		{4, 0, 0}},
114 #ifdef DM_LIST_VERSIONS
115 	{"versions",	DM_LIST_VERSIONS,	{4, 1, 0}},
116 #endif
117 #ifdef DM_TARGET_MSG
118 	{"message",	DM_TARGET_MSG,		{4, 2, 0}},
119 #endif
120 #ifdef DM_DEV_SET_GEOMETRY
121 	{"setgeometry",	DM_DEV_SET_GEOMETRY,	{4, 6, 0}},
122 #endif
123 };
124 /* *INDENT-ON* */
125 
126 #define ALIGNMENT_V1 sizeof(int)
127 #define ALIGNMENT 8
128 
129 /* FIXME Rejig library to record & use errno instead */
130 #ifndef DM_EXISTS_FLAG
131 #  define DM_EXISTS_FLAG 0x00000004
132 #endif
133 
134 static void *_align(void *ptr, unsigned int a)
135 {
136 	register unsigned long agn = --a;
137 
138 	return (void *) (((unsigned long) ptr + agn) & ~agn);
139 }
140 
141 #ifdef DM_IOCTLS
142 /*
143  * Set number to NULL to populate _dm_bitset - otherwise first
144  * match is returned.
145  */
146 static int _get_proc_number(const char *file, const char *name,
147 			    uint32_t *number)
148 {
149 	FILE *fl;
150 	char nm[256];
151 	int c;
152 	uint32_t num;
153 
154 	if (!(fl = fopen(file, "r"))) {
155 		log_sys_error("fopen", file);
156 		return 0;
157 	}
158 
159 	while (!feof(fl)) {
160 		if (fscanf(fl, "%d %255s\n", &num, &nm[0]) == 2) {
161 			if (!strcmp(name, nm)) {
162 				if (number) {
163 					*number = num;
164 					if (fclose(fl))
165 						log_sys_error("fclose", file);
166 					return 1;
167 				}
168 				dm_bit_set(_dm_bitset, num);
169 			}
170 		} else do {
171 			c = fgetc(fl);
172 		} while (c != EOF && c != '\n');
173 	}
174 	if (fclose(fl))
175 		log_sys_error("fclose", file);
176 
177 	if (number) {
178 		log_error("%s: No entry for %s found", file, name);
179 		return 0;
180 	}
181 
182 	return 1;
183 }
184 
185 static int _control_device_number(uint32_t *major, uint32_t *minor)
186 {
187 	if (!_get_proc_number(PROC_DEVICES, MISC_NAME, major) ||
188 	    !_get_proc_number(PROC_MISC, DM_NAME, minor)) {
189 		*major = 0;
190 		return 0;
191 	}
192 
193 	return 1;
194 }
195 
196 /*
197  * Returns 1 if exists; 0 if it doesn't; -1 if it's wrong
198  */
199 static int _control_exists(const char *control, uint32_t major, uint32_t minor)
200 {
201 	struct stat buf;
202 
203 	if (stat(control, &buf) < 0) {
204 		if (errno != ENOENT)
205 			log_sys_error("stat", control);
206 		return 0;
207 	}
208 
209 	if (!S_ISCHR(buf.st_mode)) {
210 		log_verbose("%s: Wrong inode type", control);
211 		if (!unlink(control))
212 			return 0;
213 		log_sys_error("unlink", control);
214 		return -1;
215 	}
216 
217 	if (major && buf.st_rdev != MKDEV(major, minor)) {
218 		log_verbose("%s: Wrong device number: (%u, %u) instead of "
219 			    "(%u, %u)", control,
220 			    MAJOR(buf.st_mode), MINOR(buf.st_mode),
221 			    major, minor);
222 		if (!unlink(control))
223 			return 0;
224 		log_sys_error("unlink", control);
225 		return -1;
226 	}
227 
228 	return 1;
229 }
230 
231 static int _create_control(const char *control, uint32_t major, uint32_t minor)
232 {
233 	int ret;
234 	mode_t old_umask;
235 
236 	if (!major)
237 		return 0;
238 
239 	old_umask = umask(DM_DEV_DIR_UMASK);
240 	ret = dm_create_dir(dm_dir());
241 	umask(old_umask);
242 
243 	if (!ret)
244 		return 0;
245 
246 	log_verbose("Creating device %s (%u, %u)", control, major, minor);
247 
248 	if (mknod(control, S_IFCHR | S_IRUSR | S_IWUSR,
249 		  MKDEV(major, minor)) < 0)  {
250 		log_sys_error("mknod", control);
251 		return 0;
252 	}
253 
254 #ifdef HAVE_SELINUX
255 	if (!dm_set_selinux_context(control, S_IFCHR)) {
256 		stack;
257 		return 0;
258 	}
259 #endif
260 
261 	return 1;
262 }
263 #endif
264 
265 /*
266  * FIXME Update bitset in long-running process if dm claims new major numbers.
267  */
268 static int _create_dm_bitset(void)
269 {
270 #ifdef DM_IOCTLS
271 	struct utsname uts;
272 
273 	if (_dm_bitset || _dm_device_major)
274 		return 1;
275 
276 	if (uname(&uts))
277 		return 0;
278 
279 	/*
280 	 * 2.6 kernels are limited to one major number.
281 	 * Assume 2.4 kernels are patched not to.
282 	 * FIXME Check _dm_version and _dm_version_minor if 2.6 changes this.
283 	 */
284 	if (!strncmp(uts.release, "2.6.", 4))
285 		_dm_multiple_major_support = 0;
286 
287 	if (!_dm_multiple_major_support) {
288 		if (!_get_proc_number(PROC_DEVICES, DM_NAME, &_dm_device_major))
289 			return 0;
290 		return 1;
291 	}
292 
293 	/* Multiple major numbers supported */
294 	if (!(_dm_bitset = dm_bitset_create(NULL, NUMBER_OF_MAJORS)))
295 		return 0;
296 
297 	if (!_get_proc_number(PROC_DEVICES, DM_NAME, NULL)) {
298 		dm_bitset_destroy(_dm_bitset);
299 		_dm_bitset = NULL;
300 		return 0;
301 	}
302 
303 	return 1;
304 #else
305 	return 0;
306 #endif
307 }
308 
309 int dm_is_dm_major(uint32_t major)
310 {
311 	if (!_create_dm_bitset())
312 		return 0;
313 
314 	if (_dm_multiple_major_support)
315 		return dm_bit(_dm_bitset, major) ? 1 : 0;
316 	else
317 		return (major == _dm_device_major) ? 1 : 0;
318 }
319 
320 static int _open_control(void)
321 {
322 #ifdef DM_IOCTLS
323 	char control[PATH_MAX];
324 	uint32_t major = 0, minor;
325 
326 	if (_control_fd != -1)
327 		return 1;
328 
329 	snprintf(control, sizeof(control), "%s/control", dm_dir());
330 
331 	if (!_control_device_number(&major, &minor))
332 		log_error("Is device-mapper driver missing from kernel?");
333 
334 	if (!_control_exists(control, major, minor) &&
335 	    !_create_control(control, major, minor))
336 		goto error;
337 
338 	if ((_control_fd = open(control, O_RDWR)) < 0) {
339 		log_sys_error("open", control);
340 		goto error;
341 	}
342 
343 	if (!_create_dm_bitset()) {
344 		log_error("Failed to set up list of device-mapper major numbers");
345 		return 0;
346 	}
347 
348 	return 1;
349 
350 error:
351 	log_error("Failure to communicate with kernel device-mapper driver.");
352 	return 0;
353 #else
354 	return 1;
355 #endif
356 }
357 
358 void dm_task_destroy(struct dm_task *dmt)
359 {
360 	struct target *t, *n;
361 
362 	for (t = dmt->head; t; t = n) {
363 		n = t->next;
364 		dm_free(t->params);
365 		dm_free(t->type);
366 		dm_free(t);
367 	}
368 
369 	if (dmt->dev_name)
370 		dm_free(dmt->dev_name);
371 
372 	if (dmt->newname)
373 		dm_free(dmt->newname);
374 
375 	if (dmt->message)
376 		dm_free(dmt->message);
377 
378 	if (dmt->dmi.v4)
379 		dm_free(dmt->dmi.v4);
380 
381 	if (dmt->uuid)
382 		dm_free(dmt->uuid);
383 
384 	dm_free(dmt);
385 }
386 
387 /*
388  * Protocol Version 1 compatibility functions.
389  */
390 
391 #ifdef DM_COMPAT
392 
393 static int _dm_task_get_driver_version_v1(struct dm_task *dmt, char *version,
394 					  size_t size)
395 {
396 	unsigned int *v;
397 
398 	if (!dmt->dmi.v1) {
399 		version[0] = '\0';
400 		return 0;
401 	}
402 
403 	v = dmt->dmi.v1->version;
404 	snprintf(version, size, "%u.%u.%u", v[0], v[1], v[2]);
405 	return 1;
406 }
407 
408 /* Unmarshall the target info returned from a status call */
409 static int _unmarshal_status_v1(struct dm_task *dmt, struct dm_ioctl_v1 *dmi)
410 {
411 	char *outbuf = (char *) dmi + dmi->data_start;
412 	char *outptr = outbuf;
413 	int32_t i;
414 	struct dm_target_spec_v1 *spec;
415 
416 	for (i = 0; i < dmi->target_count; i++) {
417 		spec = (struct dm_target_spec_v1 *) outptr;
418 
419 		if (!dm_task_add_target(dmt, spec->sector_start,
420 					(uint64_t) spec->length,
421 					spec->target_type,
422 					outptr + sizeof(*spec))) {
423 			return 0;
424 		}
425 
426 		outptr = outbuf + spec->next;
427 	}
428 
429 	return 1;
430 }
431 
432 static int _dm_format_dev_v1(char *buf, int bufsize, uint32_t dev_major,
433 			     uint32_t dev_minor)
434 {
435 	int r;
436 
437 	if (bufsize < 8)
438 		return 0;
439 
440 	r = snprintf(buf, bufsize, "%03x:%03x", dev_major, dev_minor);
441 	if (r < 0 || r > bufsize - 1)
442 		return 0;
443 
444 	return 1;
445 }
446 
447 static int _dm_task_get_info_v1(struct dm_task *dmt, struct dm_info *info)
448 {
449 	if (!dmt->dmi.v1)
450 		return 0;
451 
452 	memset(info, 0, sizeof(*info));
453 
454 	info->exists = dmt->dmi.v1->flags & DM_EXISTS_FLAG ? 1 : 0;
455 	if (!info->exists)
456 		return 1;
457 
458 	info->suspended = dmt->dmi.v1->flags & DM_SUSPEND_FLAG ? 1 : 0;
459 	info->read_only = dmt->dmi.v1->flags & DM_READONLY_FLAG ? 1 : 0;
460 	info->target_count = dmt->dmi.v1->target_count;
461 	info->open_count = dmt->dmi.v1->open_count;
462 	info->event_nr = 0;
463 	info->major = MAJOR(dmt->dmi.v1->dev);
464 	info->minor = MINOR(dmt->dmi.v1->dev);
465 	info->live_table = 1;
466 	info->inactive_table = 0;
467 
468 	return 1;
469 }
470 
471 static const char *_dm_task_get_name_v1(const struct dm_task *dmt)
472 {
473 	return (dmt->dmi.v1->name);
474 }
475 
476 static const char *_dm_task_get_uuid_v1(const struct dm_task *dmt)
477 {
478 	return (dmt->dmi.v1->uuid);
479 }
480 
481 static struct dm_deps *_dm_task_get_deps_v1(struct dm_task *dmt)
482 {
483 	log_error("deps version 1 no longer supported by libdevmapper");
484 	return NULL;
485 }
486 
487 static struct dm_names *_dm_task_get_names_v1(struct dm_task *dmt)
488 {
489 	return (struct dm_names *) (((void *) dmt->dmi.v1) +
490 				    dmt->dmi.v1->data_start);
491 }
492 
493 static void *_add_target_v1(struct target *t, void *out, void *end)
494 {
495 	void *out_sp = out;
496 	struct dm_target_spec_v1 sp;
497 	size_t sp_size = sizeof(struct dm_target_spec_v1);
498 	int len;
499 	const char no_space[] = "Ran out of memory building ioctl parameter";
500 
501 	out += sp_size;
502 	if (out >= end) {
503 		log_error(no_space);
504 		return NULL;
505 	}
506 
507 	sp.status = 0;
508 	sp.sector_start = t->start;
509 	sp.length = t->length;
510 	strncpy(sp.target_type, t->type, sizeof(sp.target_type));
511 
512 	len = strlen(t->params);
513 
514 	if ((out + len + 1) >= end) {
515 		log_error(no_space);
516 
517 		log_error("t->params= '%s'", t->params);
518 		return NULL;
519 	}
520 	strcpy((char *) out, t->params);
521 	out += len + 1;
522 
523 	/* align next block */
524 	out = _align(out, ALIGNMENT_V1);
525 
526 	sp.next = out - out_sp;
527 
528 	memcpy(out_sp, &sp, sp_size);
529 
530 	return out;
531 }
532 
533 static struct dm_ioctl_v1 *_flatten_v1(struct dm_task *dmt)
534 {
535 	const size_t min_size = 16 * 1024;
536 	const int (*version)[3];
537 
538 	struct dm_ioctl_v1 *dmi;
539 	struct target *t;
540 	size_t len = sizeof(struct dm_ioctl_v1);
541 	void *b, *e;
542 	int count = 0;
543 
544 	for (t = dmt->head; t; t = t->next) {
545 		len += sizeof(struct dm_target_spec_v1);
546 		len += strlen(t->params) + 1 + ALIGNMENT_V1;
547 		count++;
548 	}
549 
550 	if (count && dmt->newname) {
551 		log_error("targets and newname are incompatible");
552 		return NULL;
553 	}
554 
555 	if (dmt->newname)
556 		len += strlen(dmt->newname) + 1;
557 
558 	/*
559 	 * Give len a minimum size so that we have space to store
560 	 * dependencies or status information.
561 	 */
562 	if (len < min_size)
563 		len = min_size;
564 
565 	if (!(dmi = dm_malloc(len)))
566 		return NULL;
567 
568 	memset(dmi, 0, len);
569 
570 	version = &_cmd_data_v1[dmt->type].version;
571 
572 	dmi->version[0] = (*version)[0];
573 	dmi->version[1] = (*version)[1];
574 	dmi->version[2] = (*version)[2];
575 
576 	dmi->data_size = len;
577 	dmi->data_start = sizeof(struct dm_ioctl_v1);
578 
579 	if (dmt->dev_name)
580 		strncpy(dmi->name, dmt->dev_name, sizeof(dmi->name));
581 
582 	if (dmt->type == DM_DEVICE_SUSPEND)
583 		dmi->flags |= DM_SUSPEND_FLAG;
584 	if (dmt->read_only)
585 		dmi->flags |= DM_READONLY_FLAG;
586 
587 	if (dmt->minor >= 0) {
588 		if (dmt->major <= 0) {
589 			log_error("Missing major number for persistent device");
590 			return NULL;
591 		}
592 		dmi->flags |= DM_PERSISTENT_DEV_FLAG;
593 		dmi->dev = MKDEV(dmt->major, dmt->minor);
594 	}
595 
596 	if (dmt->uuid)
597 		strncpy(dmi->uuid, dmt->uuid, sizeof(dmi->uuid));
598 
599 	dmi->target_count = count;
600 
601 	b = (void *) (dmi + 1);
602 	e = (void *) ((char *) dmi + len);
603 
604 	for (t = dmt->head; t; t = t->next)
605 		if (!(b = _add_target_v1(t, b, e)))
606 			goto bad;
607 
608 	if (dmt->newname)
609 		strcpy(b, dmt->newname);
610 
611 	return dmi;
612 
613       bad:
614 	dm_free(dmi);
615 	return NULL;
616 }
617 
618 static int _dm_names_v1(struct dm_ioctl_v1 *dmi)
619 {
620 	const char *dev_dir = dm_dir();
621 	int r = 1, len;
622 	const char *name;
623 	struct dirent *dirent;
624 	DIR *d;
625 	struct dm_names *names, *old_names = NULL;
626 	void *end = (void *) dmi + dmi->data_size;
627 	struct stat buf;
628 	char path[PATH_MAX];
629 
630 	log_warn("WARNING: Device list may be incomplete with interface "
631 		  "version 1.");
632 	log_warn("Please upgrade your kernel device-mapper driver.");
633 
634 	if (!(d = opendir(dev_dir))) {
635 		log_sys_error("opendir", dev_dir);
636 		return 0;
637 	}
638 
639 	names = (struct dm_names *) ((void *) dmi + dmi->data_start);
640 
641 	names->dev = 0;		/* Flags no data */
642 
643 	while ((dirent = readdir(d))) {
644 		name = dirent->d_name;
645 
646 		if (name[0] == '.' || !strcmp(name, "control"))
647 			continue;
648 
649 		if (old_names)
650 			old_names->next = (uint32_t) ((void *) names -
651 						      (void *) old_names);
652 		snprintf(path, sizeof(path), "%s/%s", dev_dir, name);
653 		if (stat(path, &buf)) {
654 			log_sys_error("stat", path);
655 			continue;
656 		}
657 		if (!S_ISBLK(buf.st_mode))
658 			continue;
659 		names->dev = (uint64_t) buf.st_rdev;
660 		names->next = 0;
661 		len = strlen(name);
662 		if (((void *) (names + 1) + len + 1) >= end) {
663 			log_error("Insufficient buffer space for device list");
664 			r = 0;
665 			break;
666 		}
667 
668 		strcpy(names->name, name);
669 
670 		old_names = names;
671 		names = _align((void *) ++names + len + 1, ALIGNMENT);
672 	}
673 
674 	if (closedir(d))
675 		log_sys_error("closedir", dev_dir);
676 
677 	return r;
678 }
679 
680 static int _dm_task_run_v1(struct dm_task *dmt)
681 {
682 	struct dm_ioctl_v1 *dmi;
683 	unsigned int command;
684 
685 	dmi = _flatten_v1(dmt);
686 	if (!dmi) {
687 		log_error("Couldn't create ioctl argument.");
688 		return 0;
689 	}
690 
691 	if (!_open_control())
692 		return 0;
693 
694 	if ((unsigned) dmt->type >=
695 	    (sizeof(_cmd_data_v1) / sizeof(*_cmd_data_v1))) {
696 		log_error("Internal error: unknown device-mapper task %d",
697 			  dmt->type);
698 		goto bad;
699 	}
700 
701 	command = _cmd_data_v1[dmt->type].cmd;
702 
703 	if (dmt->type == DM_DEVICE_TABLE)
704 		dmi->flags |= DM_STATUS_TABLE_FLAG;
705 
706 	log_debug("dm %s %s %s%s%s [%u]", _cmd_data_v1[dmt->type].name,
707 		  dmi->name, dmi->uuid, dmt->newname ? " " : "",
708 		  dmt->newname ? dmt->newname : "",
709 		  dmi->data_size);
710 	if (dmt->type == DM_DEVICE_LIST) {
711 		if (!_dm_names_v1(dmi))
712 			goto bad;
713 	}
714 #ifdef DM_IOCTLS
715 	else if (ioctl(_control_fd, command, dmi) < 0) {
716 		if (_log_suppress)
717 			log_verbose("device-mapper: %s ioctl failed: %s",
718 				    _cmd_data_v1[dmt->type].name,
719 				    strerror(errno));
720 		else
721 			log_error("device-mapper: %s ioctl failed: %s",
722 				  _cmd_data_v1[dmt->type].name,
723 				  strerror(errno));
724 		goto bad;
725 	}
726 #else /* Userspace alternative for testing */
727 #endif
728 
729 	if (dmi->flags & DM_BUFFER_FULL_FLAG)
730 		/* FIXME Increase buffer size and retry operation (if query) */
731 		log_error("WARNING: libdevmapper buffer too small for data");
732 
733 	switch (dmt->type) {
734 	case DM_DEVICE_CREATE:
735 		add_dev_node(dmt->dev_name, MAJOR(dmi->dev), MINOR(dmi->dev),
736 			     dmt->uid, dmt->gid, dmt->mode, 0);
737 		break;
738 
739 	case DM_DEVICE_REMOVE:
740 		rm_dev_node(dmt->dev_name, 0);
741 		break;
742 
743 	case DM_DEVICE_RENAME:
744 		rename_dev_node(dmt->dev_name, dmt->newname, 0);
745 		break;
746 
747 	case DM_DEVICE_MKNODES:
748 		if (dmi->flags & DM_EXISTS_FLAG)
749 			add_dev_node(dmt->dev_name, MAJOR(dmi->dev),
750 				     MINOR(dmi->dev), dmt->uid,
751 				     dmt->gid, dmt->mode, 0);
752 		else
753 			rm_dev_node(dmt->dev_name, 0);
754 		break;
755 
756 	case DM_DEVICE_STATUS:
757 	case DM_DEVICE_TABLE:
758 		if (!_unmarshal_status_v1(dmt, dmi))
759 			goto bad;
760 		break;
761 
762 	case DM_DEVICE_SUSPEND:
763 	case DM_DEVICE_RESUME:
764 		dmt->type = DM_DEVICE_INFO;
765 		if (!dm_task_run(dmt))
766 			goto bad;
767 		dm_free(dmi);	/* We'll use what info returned */
768 		return 1;
769 	}
770 
771 	dmt->dmi.v1 = dmi;
772 	return 1;
773 
774       bad:
775 	dm_free(dmi);
776 	return 0;
777 }
778 
779 #endif
780 
781 /*
782  * Protocol Version 4 functions.
783  */
784 
785 int dm_task_get_driver_version(struct dm_task *dmt, char *version, size_t size)
786 {
787 	unsigned *v;
788 
789 #ifdef DM_COMPAT
790 	if (_dm_version == 1)
791 		return _dm_task_get_driver_version_v1(dmt, version, size);
792 #endif
793 
794 	if (!dmt->dmi.v4) {
795 		version[0] = '\0';
796 		return 0;
797 	}
798 
799 	v = dmt->dmi.v4->version;
800 	snprintf(version, size, "%u.%u.%u", v[0], v[1], v[2]);
801 	_dm_version_minor = v[1];
802 	_dm_version_patchlevel = v[2];
803 
804 	return 1;
805 }
806 
807 static int _check_version(char *version, size_t size, int log_suppress)
808 {
809 	struct dm_task *task;
810 	int r;
811 
812 	if (!(task = dm_task_create(DM_DEVICE_VERSION))) {
813 		log_error("Failed to get device-mapper version");
814 		version[0] = '\0';
815 		return 0;
816 	}
817 
818 	if (log_suppress)
819 		_log_suppress = 1;
820 
821 	r = dm_task_run(task);
822 	dm_task_get_driver_version(task, version, size);
823 	dm_task_destroy(task);
824 	_log_suppress = 0;
825 
826 	return r;
827 }
828 
829 /*
830  * Find out device-mapper's major version number the first time
831  * this is called and whether or not we support it.
832  */
833 int dm_check_version(void)
834 {
835 	char libversion[64], dmversion[64];
836 	const char *compat = "";
837 
838 	if (_version_checked)
839 		return _version_ok;
840 
841 	_version_checked = 1;
842 
843 	if (_check_version(dmversion, sizeof(dmversion), _dm_compat))
844 		return 1;
845 
846 	if (!_dm_compat)
847 		goto bad;
848 
849 	log_verbose("device-mapper ioctl protocol version %u failed. "
850 		    "Trying protocol version 1.", _dm_version);
851 	_dm_version = 1;
852 	if (_check_version(dmversion, sizeof(dmversion), 0)) {
853 		log_verbose("Using device-mapper ioctl protocol version 1");
854 		return 1;
855 	}
856 
857 	compat = "(compat)";
858 
859 	dm_get_library_version(libversion, sizeof(libversion));
860 
861 	log_error("Incompatible libdevmapper %s%s and kernel driver %s",
862 		  libversion, compat, dmversion);
863 
864       bad:
865 	_version_ok = 0;
866 	return 0;
867 }
868 
869 int dm_cookie_supported(void)
870 {
871 	return (dm_check_version() &&
872 	        _dm_version >= 4 &&
873 	        _dm_version_minor >= 15);
874 }
875 
876 void *dm_get_next_target(struct dm_task *dmt, void *next,
877 			 uint64_t *start, uint64_t *length,
878 			 char **target_type, char **params)
879 {
880 	struct target *t = (struct target *) next;
881 
882 	if (!t)
883 		t = dmt->head;
884 
885 	if (!t)
886 		return NULL;
887 
888 	*start = t->start;
889 	*length = t->length;
890 	*target_type = t->type;
891 	*params = t->params;
892 
893 	return t->next;
894 }
895 
896 /* Unmarshall the target info returned from a status call */
897 static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi)
898 {
899 	char *outbuf = (char *) dmi + dmi->data_start;
900 	char *outptr = outbuf;
901 	uint32_t i;
902 	struct dm_target_spec *spec;
903 
904 	for (i = 0; i < dmi->target_count; i++) {
905 		spec = (struct dm_target_spec *) outptr;
906 		if (!dm_task_add_target(dmt, spec->sector_start,
907 					spec->length,
908 					spec->target_type,
909 					outptr + sizeof(*spec))) {
910 			return 0;
911 		}
912 
913 		outptr = outbuf + spec->next;
914 	}
915 
916 	return 1;
917 }
918 
919 int dm_format_dev(char *buf, int bufsize, uint32_t dev_major,
920 		  uint32_t dev_minor)
921 {
922 	int r;
923 
924 #ifdef DM_COMPAT
925 	if (_dm_version == 1)
926 		return _dm_format_dev_v1(buf, bufsize, dev_major, dev_minor);
927 #endif
928 
929 	if (bufsize < 8)
930 		return 0;
931 
932 	r = snprintf(buf, (size_t) bufsize, "%u:%u", dev_major, dev_minor);
933 	if (r < 0 || r > bufsize - 1)
934 		return 0;
935 
936 	return 1;
937 }
938 
939 int dm_task_get_info(struct dm_task *dmt, struct dm_info *info)
940 {
941 #ifdef DM_COMPAT
942 	if (_dm_version == 1)
943 		return _dm_task_get_info_v1(dmt, info);
944 #endif
945 
946 	if (!dmt->dmi.v4)
947 		return 0;
948 
949 	memset(info, 0, sizeof(*info));
950 
951 	info->exists = dmt->dmi.v4->flags & DM_EXISTS_FLAG ? 1 : 0;
952 	if (!info->exists)
953 		return 1;
954 
955 	info->suspended = dmt->dmi.v4->flags & DM_SUSPEND_FLAG ? 1 : 0;
956 	info->read_only = dmt->dmi.v4->flags & DM_READONLY_FLAG ? 1 : 0;
957 	info->live_table = dmt->dmi.v4->flags & DM_ACTIVE_PRESENT_FLAG ? 1 : 0;
958 	info->inactive_table = dmt->dmi.v4->flags & DM_INACTIVE_PRESENT_FLAG ?
959 	    1 : 0;
960 	info->target_count = dmt->dmi.v4->target_count;
961 	info->open_count = dmt->dmi.v4->open_count;
962 	info->event_nr = dmt->dmi.v4->event_nr;
963 	info->major = MAJOR(dmt->dmi.v4->dev);
964 	info->minor = MINOR(dmt->dmi.v4->dev);
965 
966 	return 1;
967 }
968 
969 uint32_t dm_task_get_read_ahead(const struct dm_task *dmt, uint32_t *read_ahead)
970 {
971 	const char *dev_name;
972 
973 	*read_ahead = 0;
974 
975 #ifdef DM_COMPAT
976 	/* Not supporting this */
977         if (_dm_version == 1)
978                 return 1;
979 #endif
980 
981         if (!dmt->dmi.v4 || !(dmt->dmi.v4->flags & DM_EXISTS_FLAG))
982 		return 0;
983 
984 	if (*dmt->dmi.v4->name)
985 		dev_name = dmt->dmi.v4->name;
986 	else if (dmt->dev_name)
987 		dev_name = dmt->dev_name;
988 	else {
989 		log_error("Get read ahead request failed: device name unrecorded.");
990 		return 0;
991 	}
992 
993 	return get_dev_node_read_ahead(dev_name, read_ahead);
994 }
995 
996 const char *dm_task_get_name(const struct dm_task *dmt)
997 {
998 #ifdef DM_COMPAT
999 	if (_dm_version == 1)
1000 		return _dm_task_get_name_v1(dmt);
1001 #endif
1002 
1003 	return (dmt->dmi.v4->name);
1004 }
1005 
1006 const char *dm_task_get_uuid(const struct dm_task *dmt)
1007 {
1008 #ifdef DM_COMPAT
1009 	if (_dm_version == 1)
1010 		return _dm_task_get_uuid_v1(dmt);
1011 #endif
1012 
1013 	return (dmt->dmi.v4->uuid);
1014 }
1015 
1016 struct dm_deps *dm_task_get_deps(struct dm_task *dmt)
1017 {
1018 #ifdef DM_COMPAT
1019 	if (_dm_version == 1)
1020 		return _dm_task_get_deps_v1(dmt);
1021 #endif
1022 
1023 	return (struct dm_deps *) (((void *) dmt->dmi.v4) +
1024 				   dmt->dmi.v4->data_start);
1025 }
1026 
1027 struct dm_names *dm_task_get_names(struct dm_task *dmt)
1028 {
1029 #ifdef DM_COMPAT
1030 	if (_dm_version == 1)
1031 		return _dm_task_get_names_v1(dmt);
1032 #endif
1033 
1034 	return (struct dm_names *) (((void *) dmt->dmi.v4) +
1035 				    dmt->dmi.v4->data_start);
1036 }
1037 
1038 struct dm_versions *dm_task_get_versions(struct dm_task *dmt)
1039 {
1040 	return (struct dm_versions *) (((void *) dmt->dmi.v4) +
1041 				       dmt->dmi.v4->data_start);
1042 }
1043 
1044 int dm_task_set_ro(struct dm_task *dmt)
1045 {
1046 	dmt->read_only = 1;
1047 	return 1;
1048 }
1049 
1050 int dm_task_set_read_ahead(struct dm_task *dmt, uint32_t read_ahead,
1051 			   uint32_t read_ahead_flags)
1052 {
1053 	dmt->read_ahead = read_ahead;
1054 	dmt->read_ahead_flags = read_ahead_flags;
1055 
1056 	return 1;
1057 }
1058 
1059 int dm_task_suppress_identical_reload(struct dm_task *dmt)
1060 {
1061 	dmt->suppress_identical_reload = 1;
1062 	return 1;
1063 }
1064 
1065 int dm_task_set_newname(struct dm_task *dmt, const char *newname)
1066 {
1067 	if (strchr(newname, '/')) {
1068 		log_error("Name \"%s\" invalid. It contains \"/\".", newname);
1069 		return 0;
1070 	}
1071 
1072 	if (strlen(newname) >= DM_NAME_LEN) {
1073 		log_error("Name \"%s\" too long", newname);
1074 		return 0;
1075 	}
1076 
1077 	if (!(dmt->newname = dm_strdup(newname))) {
1078 		log_error("dm_task_set_newname: strdup(%s) failed", newname);
1079 		return 0;
1080 	}
1081 
1082 	return 1;
1083 }
1084 
1085 int dm_task_set_message(struct dm_task *dmt, const char *message)
1086 {
1087 	if (!(dmt->message = dm_strdup(message))) {
1088 		log_error("dm_task_set_message: strdup(%s) failed", message);
1089 		return 0;
1090 	}
1091 
1092 	return 1;
1093 }
1094 
1095 int dm_task_set_sector(struct dm_task *dmt, uint64_t sector)
1096 {
1097 	dmt->sector = sector;
1098 
1099 	return 1;
1100 }
1101 
1102 int dm_task_set_geometry(struct dm_task *dmt, const char *cylinders, const char *heads, const char *sectors, const char *start)
1103 {
1104 	size_t len = strlen(cylinders) + 1 + strlen(heads) + 1 + strlen(sectors) + 1 + strlen(start) + 1;
1105 
1106 	if (!(dmt->geometry = dm_malloc(len))) {
1107 		log_error("dm_task_set_geometry: dm_malloc failed");
1108 		return 0;
1109 	}
1110 
1111 	if (sprintf(dmt->geometry, "%s %s %s %s", cylinders, heads, sectors, start) < 0) {
1112 		log_error("dm_task_set_geometry: sprintf failed");
1113 		return 0;
1114 	}
1115 
1116 	return 1;
1117 }
1118 
1119 int dm_task_no_flush(struct dm_task *dmt)
1120 {
1121 	dmt->no_flush = 1;
1122 
1123 	return 1;
1124 }
1125 
1126 int dm_task_no_open_count(struct dm_task *dmt)
1127 {
1128 	dmt->no_open_count = 1;
1129 
1130 	return 1;
1131 }
1132 
1133 int dm_task_skip_lockfs(struct dm_task *dmt)
1134 {
1135 	dmt->skip_lockfs = 1;
1136 
1137 	return 1;
1138 }
1139 
1140 int dm_task_query_inactive_table(struct dm_task *dmt)
1141 {
1142 	dmt->query_inactive_table = 1;
1143 
1144 	return 1;
1145 }
1146 
1147 int dm_task_set_event_nr(struct dm_task *dmt, uint32_t event_nr)
1148 {
1149 	dmt->event_nr = event_nr;
1150 
1151 	return 1;
1152 }
1153 
1154 struct target *create_target(uint64_t start, uint64_t len, const char *type,
1155 			     const char *params)
1156 {
1157 	struct target *t = dm_malloc(sizeof(*t));
1158 
1159 	if (!t) {
1160 		log_error("create_target: malloc(%" PRIsize_t ") failed",
1161 			  sizeof(*t));
1162 		return NULL;
1163 	}
1164 
1165 	memset(t, 0, sizeof(*t));
1166 
1167 	if (!(t->params = dm_strdup(params))) {
1168 		log_error("create_target: strdup(params) failed");
1169 		goto bad;
1170 	}
1171 
1172 	if (!(t->type = dm_strdup(type))) {
1173 		log_error("create_target: strdup(type) failed");
1174 		goto bad;
1175 	}
1176 
1177 	t->start = start;
1178 	t->length = len;
1179 	return t;
1180 
1181       bad:
1182 	dm_free(t->params);
1183 	dm_free(t->type);
1184 	dm_free(t);
1185 	return NULL;
1186 }
1187 
1188 static void *_add_target(struct target *t, void *out, void *end)
1189 {
1190 	void *out_sp = out;
1191 	struct dm_target_spec sp;
1192 	size_t sp_size = sizeof(struct dm_target_spec);
1193 	int len;
1194 	const char no_space[] = "Ran out of memory building ioctl parameter";
1195 
1196 	out += sp_size;
1197 	if (out >= end) {
1198 		log_error(no_space);
1199 		return NULL;
1200 	}
1201 
1202 	sp.status = 0;
1203 	sp.sector_start = t->start;
1204 	sp.length = t->length;
1205 	strncpy(sp.target_type, t->type, sizeof(sp.target_type));
1206 
1207 	len = strlen(t->params);
1208 
1209 	if ((out + len + 1) >= end) {
1210 		log_error(no_space);
1211 
1212 		log_error("t->params= '%s'", t->params);
1213 		return NULL;
1214 	}
1215 	strcpy((char *) out, t->params);
1216 	out += len + 1;
1217 
1218 	/* align next block */
1219 	out = _align(out, ALIGNMENT);
1220 
1221 	sp.next = out - out_sp;
1222 	memcpy(out_sp, &sp, sp_size);
1223 
1224 	return out;
1225 }
1226 
1227 static int _lookup_dev_name(uint64_t dev, char *buf, size_t len)
1228 {
1229 	struct dm_names *names;
1230 	unsigned next = 0;
1231 	struct dm_task *dmt;
1232 	int r = 0;
1233 
1234 	if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
1235 		return 0;
1236 
1237 	if (!dm_task_run(dmt))
1238 		goto out;
1239 
1240 	if (!(names = dm_task_get_names(dmt)))
1241 		goto out;
1242 
1243 	if (!names->dev)
1244 		goto out;
1245 
1246 	do {
1247 		names = (void *) names + next;
1248 		if (names->dev == dev) {
1249 			strncpy(buf, names->name, len);
1250 			r = 1;
1251 			break;
1252 		}
1253 		next = names->next;
1254 	} while (next);
1255 
1256       out:
1257 	dm_task_destroy(dmt);
1258 	return r;
1259 }
1260 
1261 static struct dm_ioctl *_flatten(struct dm_task *dmt, unsigned repeat_count)
1262 {
1263 	const size_t min_size = 16 * 1024;
1264 	const int (*version)[3];
1265 
1266 	struct dm_ioctl *dmi;
1267 	struct target *t;
1268 	struct dm_target_msg *tmsg;
1269 	size_t len = sizeof(struct dm_ioctl);
1270 	void *b, *e;
1271 	int count = 0;
1272 
1273 	for (t = dmt->head; t; t = t->next) {
1274 		len += sizeof(struct dm_target_spec);
1275 		len += strlen(t->params) + 1 + ALIGNMENT;
1276 		count++;
1277 	}
1278 
1279 	if (count && (dmt->sector || dmt->message)) {
1280 		log_error("targets and message are incompatible");
1281 		return NULL;
1282 	}
1283 
1284 	if (count && dmt->newname) {
1285 		log_error("targets and newname are incompatible");
1286 		return NULL;
1287 	}
1288 
1289 	if (count && dmt->geometry) {
1290 		log_error("targets and geometry are incompatible");
1291 		return NULL;
1292 	}
1293 
1294 	if (dmt->newname && (dmt->sector || dmt->message)) {
1295 		log_error("message and newname are incompatible");
1296 		return NULL;
1297 	}
1298 
1299 	if (dmt->newname && dmt->geometry) {
1300 		log_error("geometry and newname are incompatible");
1301 		return NULL;
1302 	}
1303 
1304 	if (dmt->geometry && (dmt->sector || dmt->message)) {
1305 		log_error("geometry and message are incompatible");
1306 		return NULL;
1307 	}
1308 
1309 	if (dmt->sector && !dmt->message) {
1310 		log_error("message is required with sector");
1311 		return NULL;
1312 	}
1313 
1314 	if (dmt->newname)
1315 		len += strlen(dmt->newname) + 1;
1316 
1317 	if (dmt->message)
1318 		len += sizeof(struct dm_target_msg) + strlen(dmt->message) + 1;
1319 
1320 	if (dmt->geometry)
1321 		len += strlen(dmt->geometry) + 1;
1322 
1323 	/*
1324 	 * Give len a minimum size so that we have space to store
1325 	 * dependencies or status information.
1326 	 */
1327 	if (len < min_size)
1328 		len = min_size;
1329 
1330 	/* Increase buffer size if repeating because buffer was too small */
1331 	while (repeat_count--)
1332 		len *= 2;
1333 
1334 	if (!(dmi = dm_malloc(len)))
1335 		return NULL;
1336 
1337 	memset(dmi, 0, len);
1338 
1339 	version = &_cmd_data_v4[dmt->type].version;
1340 
1341 	dmi->version[0] = (*version)[0];
1342 	dmi->version[1] = (*version)[1];
1343 	dmi->version[2] = (*version)[2];
1344 
1345 	dmi->data_size = len;
1346 	dmi->data_start = sizeof(struct dm_ioctl);
1347 
1348 	if (dmt->minor >= 0) {
1349 		if (dmt->major <= 0) {
1350 			log_error("Missing major number for persistent device.");
1351 			goto bad;
1352 		}
1353 
1354 		if (!_dm_multiple_major_support && dmt->allow_default_major_fallback &&
1355 		    dmt->major != _dm_device_major) {
1356 			log_verbose("Overriding major number of %" PRIu32
1357 				    " with %" PRIu32 " for persistent device.",
1358 				    dmt->major, _dm_device_major);
1359 			dmt->major = _dm_device_major;
1360 		}
1361 
1362 		dmi->flags |= DM_PERSISTENT_DEV_FLAG;
1363 		dmi->dev = MKDEV(dmt->major, dmt->minor);
1364 	}
1365 
1366 	/* Does driver support device number referencing? */
1367 	if (_dm_version_minor < 3 && !dmt->dev_name && !dmt->uuid && dmi->dev) {
1368 		if (!_lookup_dev_name(dmi->dev, dmi->name, sizeof(dmi->name))) {
1369 			log_error("Unable to find name for device (%" PRIu32
1370 				  ":%" PRIu32 ")", dmt->major, dmt->minor);
1371 			goto bad;
1372 		}
1373 		log_verbose("device (%" PRIu32 ":%" PRIu32 ") is %s "
1374 			    "for compatibility with old kernel",
1375 			    dmt->major, dmt->minor, dmi->name);
1376 	}
1377 
1378 	/* FIXME Until resume ioctl supplies name, use dev_name for readahead */
1379 	if (dmt->dev_name && (dmt->type != DM_DEVICE_RESUME || dmt->minor < 0 ||
1380 			      dmt->major < 0))
1381 		strncpy(dmi->name, dmt->dev_name, sizeof(dmi->name));
1382 
1383 	if (dmt->uuid)
1384 		strncpy(dmi->uuid, dmt->uuid, sizeof(dmi->uuid));
1385 
1386 	if (dmt->type == DM_DEVICE_SUSPEND)
1387 		dmi->flags |= DM_SUSPEND_FLAG;
1388 	if (dmt->no_flush)
1389 		dmi->flags |= DM_NOFLUSH_FLAG;
1390 	if (dmt->read_only)
1391 		dmi->flags |= DM_READONLY_FLAG;
1392 	if (dmt->skip_lockfs)
1393 		dmi->flags |= DM_SKIP_LOCKFS_FLAG;
1394 	if (dmt->query_inactive_table) {
1395 		if (_dm_version_minor < 16)
1396 			log_warn("WARNING: Inactive table query unsupported "
1397 				 "by kernel.  It will use live table.");
1398 		dmi->flags |= DM_QUERY_INACTIVE_TABLE_FLAG;
1399 	}
1400 
1401 	dmi->target_count = count;
1402 	dmi->event_nr = dmt->event_nr;
1403 
1404 	b = (void *) (dmi + 1);
1405 	e = (void *) ((char *) dmi + len);
1406 
1407 	for (t = dmt->head; t; t = t->next)
1408 		if (!(b = _add_target(t, b, e)))
1409 			goto bad;
1410 
1411 	if (dmt->newname)
1412 		strcpy(b, dmt->newname);
1413 
1414 	if (dmt->message) {
1415 		tmsg = (struct dm_target_msg *) b;
1416 		tmsg->sector = dmt->sector;
1417 		strcpy(tmsg->message, dmt->message);
1418 	}
1419 
1420 	if (dmt->geometry)
1421 		strcpy(b, dmt->geometry);
1422 
1423 	return dmi;
1424 
1425       bad:
1426 	dm_free(dmi);
1427 	return NULL;
1428 }
1429 
1430 static int _process_mapper_dir(struct dm_task *dmt)
1431 {
1432 	struct dirent *dirent;
1433 	DIR *d;
1434 	const char *dir;
1435 	int r = 1;
1436 
1437 	dir = dm_dir();
1438 	if (!(d = opendir(dir))) {
1439 		log_sys_error("opendir", dir);
1440 		return 0;
1441 	}
1442 
1443 	while ((dirent = readdir(d))) {
1444 		if (!strcmp(dirent->d_name, ".") ||
1445 		    !strcmp(dirent->d_name, "..") ||
1446 		    !strcmp(dirent->d_name, "control"))
1447 			continue;
1448 		dm_task_set_name(dmt, dirent->d_name);
1449 		dm_task_run(dmt);
1450 	}
1451 
1452 	if (closedir(d))
1453 		log_sys_error("closedir", dir);
1454 
1455 	return r;
1456 }
1457 
1458 static int _process_all_v4(struct dm_task *dmt)
1459 {
1460 	struct dm_task *task;
1461 	struct dm_names *names;
1462 	unsigned next = 0;
1463 	int r = 1;
1464 
1465 	if (!(task = dm_task_create(DM_DEVICE_LIST)))
1466 		return 0;
1467 
1468 	if (!dm_task_run(task)) {
1469 		r = 0;
1470 		goto out;
1471 	}
1472 
1473 	if (!(names = dm_task_get_names(task))) {
1474 		r = 0;
1475 		goto out;
1476 	}
1477 
1478 	if (!names->dev)
1479 		goto out;
1480 
1481 	do {
1482 		names = (void *) names + next;
1483 		if (!dm_task_set_name(dmt, names->name)) {
1484 			r = 0;
1485 			goto out;
1486 		}
1487 		if (!dm_task_run(dmt))
1488 			r = 0;
1489 		next = names->next;
1490 	} while (next);
1491 
1492       out:
1493 	dm_task_destroy(task);
1494 	return r;
1495 }
1496 
1497 static int _mknodes_v4(struct dm_task *dmt)
1498 {
1499 	(void) _process_mapper_dir(dmt);
1500 
1501 	return _process_all_v4(dmt);
1502 }
1503 
1504 /*
1505  * If an operation that uses a cookie fails, decrement the
1506  * semaphore instead of udev.
1507  */
1508 static int _udev_complete(struct dm_task *dmt)
1509 {
1510 	uint32_t cookie;
1511 
1512 	if (dmt->cookie_set) {
1513 		/* strip flags from the cookie and use cookie magic instead */
1514 		cookie = (dmt->event_nr & ~DM_UDEV_FLAGS_MASK) |
1515 			  (DM_COOKIE_MAGIC << DM_UDEV_FLAGS_SHIFT);
1516 		return dm_udev_complete(cookie);
1517 	}
1518 
1519 	return 1;
1520 }
1521 
1522 static int _create_and_load_v4(struct dm_task *dmt)
1523 {
1524 	struct dm_task *task;
1525 	int r;
1526 
1527 	/* Use new task struct to create the device */
1528 	if (!(task = dm_task_create(DM_DEVICE_CREATE))) {
1529 		log_error("Failed to create device-mapper task struct");
1530 		_udev_complete(dmt);
1531 		return 0;
1532 	}
1533 
1534 	/* Copy across relevant fields */
1535 	if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1536 		dm_task_destroy(task);
1537 		_udev_complete(dmt);
1538 		return 0;
1539 	}
1540 
1541 	if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) {
1542 		dm_task_destroy(task);
1543 		_udev_complete(dmt);
1544 		return 0;
1545 	}
1546 
1547 	task->major = dmt->major;
1548 	task->minor = dmt->minor;
1549 	task->uid = dmt->uid;
1550 	task->gid = dmt->gid;
1551 	task->mode = dmt->mode;
1552 	/* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */
1553 	task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK;
1554 	task->cookie_set = dmt->cookie_set;
1555 
1556 	r = dm_task_run(task);
1557 	dm_task_destroy(task);
1558 	if (!r) {
1559 		_udev_complete(dmt);
1560 		return 0;
1561 	}
1562 
1563 	/* Next load the table */
1564 	if (!(task = dm_task_create(DM_DEVICE_RELOAD))) {
1565 		log_error("Failed to create device-mapper task struct");
1566 		_udev_complete(dmt);
1567 		return 0;
1568 	}
1569 
1570 	/* Copy across relevant fields */
1571 	if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1572 		dm_task_destroy(task);
1573 		_udev_complete(dmt);
1574 		return 0;
1575 	}
1576 
1577 	task->read_only = dmt->read_only;
1578 	task->head = dmt->head;
1579 	task->tail = dmt->tail;
1580 
1581 	r = dm_task_run(task);
1582 
1583 	task->head = NULL;
1584 	task->tail = NULL;
1585 	dm_task_destroy(task);
1586 	if (!r) {
1587 		_udev_complete(dmt);
1588 		goto revert;
1589 	}
1590 
1591 	/* Use the original structure last so the info will be correct */
1592 	dmt->type = DM_DEVICE_RESUME;
1593 	dm_free(dmt->uuid);
1594 	dmt->uuid = NULL;
1595 
1596 	r = dm_task_run(dmt);
1597 
1598 	if (r)
1599 		return r;
1600 
1601       revert:
1602  	dmt->type = DM_DEVICE_REMOVE;
1603 	dm_free(dmt->uuid);
1604 	dmt->uuid = NULL;
1605 	dmt->cookie_set = 0;
1606 
1607 	if (!dm_task_run(dmt))
1608 		log_error("Failed to revert device creation.");
1609 
1610 	return r;
1611 }
1612 
1613 uint64_t dm_task_get_existing_table_size(struct dm_task *dmt)
1614 {
1615 	return dmt->existing_table_size;
1616 }
1617 
1618 static int _reload_with_suppression_v4(struct dm_task *dmt)
1619 {
1620 	struct dm_task *task;
1621 	struct target *t1, *t2;
1622 	int r;
1623 
1624 	/* New task to get existing table information */
1625 	if (!(task = dm_task_create(DM_DEVICE_TABLE))) {
1626 		log_error("Failed to create device-mapper task struct");
1627 		return 0;
1628 	}
1629 
1630 	/* Copy across relevant fields */
1631 	if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1632 		dm_task_destroy(task);
1633 		return 0;
1634 	}
1635 
1636 	if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) {
1637 		dm_task_destroy(task);
1638 		return 0;
1639 	}
1640 
1641 	task->major = dmt->major;
1642 	task->minor = dmt->minor;
1643 
1644 	r = dm_task_run(task);
1645 
1646 	if (!r) {
1647 		dm_task_destroy(task);
1648 		return r;
1649 	}
1650 
1651 	/* Store existing table size */
1652 	t2 = task->head;
1653 	while (t2 && t2->next)
1654 		t2 = t2->next;
1655 	dmt->existing_table_size = t2 ? t2->start + t2->length : 0;
1656 
1657 	if ((task->dmi.v4->flags & DM_READONLY_FLAG) ? 1 : 0 != dmt->read_only)
1658 		goto no_match;
1659 
1660 	t1 = dmt->head;
1661 	t2 = task->head;
1662 
1663 	while (t1 && t2) {
1664 		while (t2->params[strlen(t2->params) - 1] == ' ')
1665 			t2->params[strlen(t2->params) - 1] = '\0';
1666 		if ((t1->start != t2->start) ||
1667 		    (t1->length != t2->length) ||
1668 		    (strcmp(t1->type, t2->type)) ||
1669 		    (strcmp(t1->params, t2->params)))
1670 			goto no_match;
1671 		t1 = t1->next;
1672 		t2 = t2->next;
1673 	}
1674 
1675 	if (!t1 && !t2) {
1676 		dmt->dmi.v4 = task->dmi.v4;
1677 		task->dmi.v4 = NULL;
1678 		dm_task_destroy(task);
1679 		return 1;
1680 	}
1681 
1682 no_match:
1683 	dm_task_destroy(task);
1684 
1685 	/* Now do the original reload */
1686 	dmt->suppress_identical_reload = 0;
1687 	r = dm_task_run(dmt);
1688 
1689 	return r;
1690 }
1691 
1692 static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt, unsigned command,
1693 				     unsigned repeat_count)
1694 {
1695 	struct dm_ioctl *dmi;
1696 
1697 	dmi = _flatten(dmt, repeat_count);
1698 	if (!dmi) {
1699 		log_error("Couldn't create ioctl argument.");
1700 		return NULL;
1701 	}
1702 
1703 	if (dmt->type == DM_DEVICE_TABLE)
1704 		dmi->flags |= DM_STATUS_TABLE_FLAG;
1705 
1706 	dmi->flags |= DM_EXISTS_FLAG;	/* FIXME */
1707 
1708 	if (dmt->no_open_count)
1709 		dmi->flags |= DM_SKIP_BDGET_FLAG;
1710 
1711 	/*
1712 	 * Prevent udev vs. libdevmapper race when processing nodes and
1713 	 * symlinks. This can happen when the udev rules are installed and
1714 	 * udev synchronisation code is enabled in libdevmapper but the
1715 	 * software using libdevmapper does not make use of it (by not calling
1716 	 * dm_task_set_cookie before). We need to instruct the udev rules not
1717 	 * to be applied at all in this situation so we can gracefully fallback
1718 	 * to libdevmapper's node and symlink creation code.
1719 	 */
1720 	if (dm_udev_get_sync_support() && !dmt->cookie_set &&
1721 	    (dmt->type == DM_DEVICE_RESUME ||
1722 	     dmt->type == DM_DEVICE_REMOVE ||
1723 	     dmt->type == DM_DEVICE_RENAME)) {
1724 		log_debug("Cookie value is not set while trying to call "
1725 			  "DM_DEVICE_RESUME, DM_DEVICE_REMOVE or DM_DEVICE_RENAME "
1726 			  "ioctl. Please, consider using libdevmapper's udev "
1727 			  "synchronisation interface or disable it explicitly "
1728 			  "by calling dm_udev_set_sync_support(0).");
1729 		log_debug("Switching off device-mapper and all subsystem related "
1730 			  "udev rules. Falling back to libdevmapper node creation.");
1731 		/*
1732 		 * Disable general dm and subsystem rules but keep dm disk rules
1733 		 * if not flagged out explicitly before. We need /dev/disk content
1734 		 * for the software that expects it.
1735 		*/
1736 		dmi->event_nr |= (DM_UDEV_DISABLE_DM_RULES_FLAG |
1737 				  DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG) <<
1738 				 DM_UDEV_FLAGS_SHIFT;
1739 	}
1740 
1741 	log_debug("dm %s %s %s%s%s %s%.0d%s%.0d%s"
1742 		  "%s%c%c%s%s %.0" PRIu64 " %s [%u]",
1743 		  _cmd_data_v4[dmt->type].name,
1744 		  dmi->name, dmi->uuid, dmt->newname ? " " : "",
1745 		  dmt->newname ? dmt->newname : "",
1746 		  dmt->major > 0 ? "(" : "",
1747 		  dmt->major > 0 ? dmt->major : 0,
1748 		  dmt->major > 0 ? ":" : "",
1749 		  dmt->minor > 0 ? dmt->minor : 0,
1750 		  dmt->major > 0 && dmt->minor == 0 ? "0" : "",
1751 		  dmt->major > 0 ? ") " : "",
1752 		  dmt->no_open_count ? 'N' : 'O',
1753 		  dmt->no_flush ? 'N' : 'F',
1754 		  dmt->skip_lockfs ? "S " : "",
1755 		  dmt->query_inactive_table ? "I " : "",
1756 		  dmt->sector, dmt->message ? dmt->message : "",
1757 		  dmi->data_size);
1758 #ifdef DM_IOCTLS
1759 	if (ioctl(_control_fd, command, dmi) < 0) {
1760 		if (errno == ENXIO && ((dmt->type == DM_DEVICE_INFO) ||
1761 				       (dmt->type == DM_DEVICE_MKNODES) ||
1762 				       (dmt->type == DM_DEVICE_STATUS)))
1763 			dmi->flags &= ~DM_EXISTS_FLAG;	/* FIXME */
1764 		else {
1765 			if (_log_suppress)
1766 				log_verbose("device-mapper: %s ioctl "
1767 					    "failed: %s",
1768 				    	    _cmd_data_v4[dmt->type].name,
1769 					    strerror(errno));
1770 			else
1771 				log_error("device-mapper: %s ioctl "
1772 					  "failed: %s",
1773 				    	   _cmd_data_v4[dmt->type].name,
1774 					  strerror(errno));
1775 			dm_free(dmi);
1776 			return NULL;
1777 		}
1778 	}
1779 #else /* Userspace alternative for testing */
1780 #endif
1781 	return dmi;
1782 }
1783 
1784 void dm_task_update_nodes(void)
1785 {
1786 	update_devs();
1787 }
1788 
1789 int dm_task_run(struct dm_task *dmt)
1790 {
1791 	struct dm_ioctl *dmi;
1792 	unsigned command;
1793 	int check_udev;
1794 
1795 #ifdef DM_COMPAT
1796 	if (_dm_version == 1)
1797 		return _dm_task_run_v1(dmt);
1798 #endif
1799 
1800 	if ((unsigned) dmt->type >=
1801 	    (sizeof(_cmd_data_v4) / sizeof(*_cmd_data_v4))) {
1802 		log_error("Internal error: unknown device-mapper task %d",
1803 			  dmt->type);
1804 		return 0;
1805 	}
1806 
1807 	command = _cmd_data_v4[dmt->type].cmd;
1808 
1809 	/* Old-style creation had a table supplied */
1810 	if (dmt->type == DM_DEVICE_CREATE && dmt->head)
1811 		return _create_and_load_v4(dmt);
1812 
1813 	if (dmt->type == DM_DEVICE_MKNODES && !dmt->dev_name &&
1814 	    !dmt->uuid && dmt->major <= 0)
1815 		return _mknodes_v4(dmt);
1816 
1817 	if ((dmt->type == DM_DEVICE_RELOAD) && dmt->suppress_identical_reload)
1818 		return _reload_with_suppression_v4(dmt);
1819 
1820 	if (!_open_control()) {
1821 		_udev_complete(dmt);
1822 		return 0;
1823 	}
1824 
1825 	/* FIXME Detect and warn if cookie set but should not be. */
1826 repeat_ioctl:
1827 	if (!(dmi = _do_dm_ioctl(dmt, command, _ioctl_buffer_double_factor))) {
1828 		_udev_complete(dmt);
1829 		return 0;
1830 	}
1831 
1832 	if (dmi->flags & DM_BUFFER_FULL_FLAG) {
1833 		switch (dmt->type) {
1834 		case DM_DEVICE_LIST_VERSIONS:
1835 		case DM_DEVICE_LIST:
1836 		case DM_DEVICE_DEPS:
1837 		case DM_DEVICE_STATUS:
1838 		case DM_DEVICE_TABLE:
1839 		case DM_DEVICE_WAITEVENT:
1840 			_ioctl_buffer_double_factor++;
1841 			dm_free(dmi);
1842 			goto repeat_ioctl;
1843 		default:
1844 			log_error("WARNING: libdevmapper buffer too small for data");
1845 		}
1846 	}
1847 
1848 	check_udev = dmt->cookie_set &&
1849 		     !(dmt->event_nr >> DM_UDEV_FLAGS_SHIFT &
1850 		       DM_UDEV_DISABLE_DM_RULES_FLAG);
1851 
1852 	switch (dmt->type) {
1853 	case DM_DEVICE_CREATE:
1854 		if (dmt->dev_name && *dmt->dev_name)
1855 			add_dev_node(dmt->dev_name, MAJOR(dmi->dev),
1856 				     MINOR(dmi->dev), dmt->uid, dmt->gid,
1857 				     dmt->mode, check_udev);
1858 		break;
1859 	case DM_DEVICE_REMOVE:
1860 		/* FIXME Kernel needs to fill in dmi->name */
1861 		if (dmt->dev_name)
1862 			rm_dev_node(dmt->dev_name, check_udev);
1863 		break;
1864 
1865 	case DM_DEVICE_RENAME:
1866 		/* FIXME Kernel needs to fill in dmi->name */
1867 		if (dmt->dev_name)
1868 			rename_dev_node(dmt->dev_name, dmt->newname,
1869 					check_udev);
1870 		break;
1871 
1872 	case DM_DEVICE_RESUME:
1873 		/* FIXME Kernel needs to fill in dmi->name */
1874 		set_dev_node_read_ahead(dmt->dev_name, dmt->read_ahead,
1875 					dmt->read_ahead_flags);
1876 		break;
1877 
1878 	case DM_DEVICE_MKNODES:
1879 		if (dmi->flags & DM_EXISTS_FLAG)
1880 			add_dev_node(dmi->name, MAJOR(dmi->dev),
1881 				     MINOR(dmi->dev), dmt->uid,
1882 				     dmt->gid, dmt->mode, 0);
1883 		else if (dmt->dev_name)
1884 			rm_dev_node(dmt->dev_name, 0);
1885 		break;
1886 
1887 	case DM_DEVICE_STATUS:
1888 	case DM_DEVICE_TABLE:
1889 	case DM_DEVICE_WAITEVENT:
1890 		if (!_unmarshal_status(dmt, dmi))
1891 			goto bad;
1892 		break;
1893 	}
1894 
1895 	/* Was structure reused? */
1896 	if (dmt->dmi.v4)
1897 		dm_free(dmt->dmi.v4);
1898 	dmt->dmi.v4 = dmi;
1899 	return 1;
1900 
1901       bad:
1902 	dm_free(dmi);
1903 	return 0;
1904 }
1905 
1906 void dm_lib_release(void)
1907 {
1908 	if (_control_fd != -1) {
1909 		close(_control_fd);
1910 		_control_fd = -1;
1911 	}
1912 	update_devs();
1913 }
1914 
1915 void dm_pools_check_leaks(void);
1916 
1917 void dm_lib_exit(void)
1918 {
1919 	dm_lib_release();
1920 	if (_dm_bitset)
1921 		dm_bitset_destroy(_dm_bitset);
1922 	_dm_bitset = NULL;
1923 	dm_pools_check_leaks();
1924 	dm_dump_memory();
1925 	_version_ok = 1;
1926 	_version_checked = 0;
1927 }
1928