1 /*	$NetBSD: disk_rep.c,v 1.1.1.2 2009/12/02 00:26:50 haad Exp $	*/
2 
3 /*
4  * Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
5  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6  *
7  * This file is part of LVM2.
8  *
9  * This copyrighted material is made available to anyone wishing to use,
10  * modify, copy, or redistribute it subject to the terms and conditions
11  * of the GNU Lesser General Public License v.2.1.
12  *
13  * You should have received a copy of the GNU Lesser General Public License
14  * along with this program; if not, write to the Free Software Foundation,
15  * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17 
18 #include "lib.h"
19 #include "label.h"
20 #include "metadata.h"
21 #include "lvmcache.h"
22 #include "filter.h"
23 #include "xlate.h"
24 #include "disk_rep.h"
25 
26 #include <assert.h>
27 
28 /* FIXME: memcpy might not be portable */
29 #define CPIN_8(x, y, z) {memcpy((x), (y), (z));}
30 #define CPOUT_8(x, y, z) {memcpy((y), (x), (z));}
31 #define CPIN_16(x, y) {(x) = xlate16_be((y));}
32 #define CPOUT_16(x, y) {(y) = xlate16_be((x));}
33 #define CPIN_32(x, y) {(x) = xlate32_be((y));}
34 #define CPOUT_32(x, y) {(y) = xlate32_be((x));}
35 #define CPIN_64(x, y) {(x) = xlate64_be((y));}
36 #define CPOUT_64(x, y) {(y) = xlate64_be((x));}
37 
38 static int __read_pool_disk(const struct format_type *fmt, struct device *dev,
39 			    struct dm_pool *mem __attribute((unused)), struct pool_list *pl,
40 			    const char *vg_name __attribute((unused)))
41 {
42 	char buf[512] __attribute((aligned(8)));
43 
44 	/* FIXME: Need to check the cache here first */
45 	if (!dev_read(dev, UINT64_C(0), 512, buf)) {
46 		log_very_verbose("Failed to read PV data from %s",
47 				 dev_name(dev));
48 		return 0;
49 	}
50 
51 	if (!read_pool_label(pl, fmt->labeller, dev, buf, NULL))
52 		return_0;
53 
54 	return 1;
55 }
56 
57 static void _add_pl_to_list(struct dm_list *head, struct pool_list *data)
58 {
59 	struct pool_list *pl;
60 
61 	dm_list_iterate_items(pl, head) {
62 		if (id_equal(&data->pv_uuid, &pl->pv_uuid)) {
63 			char uuid[ID_LEN + 7] __attribute((aligned(8)));
64 
65 			id_write_format(&pl->pv_uuid, uuid, ID_LEN + 7);
66 
67 			if (!dev_subsystem_part_major(data->dev)) {
68 				log_very_verbose("Ignoring duplicate PV %s on "
69 						 "%s", uuid,
70 						 dev_name(data->dev));
71 				return;
72 			}
73 			log_very_verbose("Duplicate PV %s - using %s %s",
74 					 uuid, dev_subsystem_name(data->dev),
75 					 dev_name(data->dev));
76 			dm_list_del(&pl->list);
77 			break;
78 		}
79 	}
80 	dm_list_add(head, &data->list);
81 }
82 
83 int read_pool_label(struct pool_list *pl, struct labeller *l,
84 		    struct device *dev, char *buf, struct label **label)
85 {
86 	struct lvmcache_info *info;
87 	struct id pvid;
88 	struct id vgid;
89 	char uuid[ID_LEN + 7] __attribute((aligned(8)));
90 	struct pool_disk *pd = &pl->pd;
91 
92 	pool_label_in(pd, buf);
93 
94 	get_pool_pv_uuid(&pvid, pd);
95 	id_write_format(&pvid, uuid, ID_LEN + 7);
96 	log_debug("Calculated uuid %s for %s", uuid, dev_name(dev));
97 
98 	get_pool_vg_uuid(&vgid, pd);
99 	id_write_format(&vgid, uuid, ID_LEN + 7);
100 	log_debug("Calculated uuid %s for %s", uuid, pd->pl_pool_name);
101 
102 	if (!(info = lvmcache_add(l, (char *) &pvid, dev, pd->pl_pool_name,
103 				  (char *) &vgid, 0)))
104 		return_0;
105 	if (label)
106 		*label = info->label;
107 
108 	info->device_size = xlate32_be(pd->pl_blocks) << SECTOR_SHIFT;
109 	dm_list_init(&info->mdas);
110 
111 	info->status &= ~CACHE_INVALID;
112 
113 	pl->dev = dev;
114 	pl->pv = NULL;
115 	memcpy(&pl->pv_uuid, &pvid, sizeof(pvid));
116 
117 	return 1;
118 }
119 
120 /**
121  * pool_label_out - copies a pool_label_t into a char buffer
122  * @pl: ptr to a pool_label_t struct
123  * @buf: ptr to raw space where label info will be copied
124  *
125  * This function is important because it takes care of all of
126  * the endian issues when copying to disk.  This way, when
127  * machines of different architectures are used, they will
128  * be able to interpret ondisk labels correctly.  Always use
129  * this function before writing to disk.
130  */
131 void pool_label_out(struct pool_disk *pl, void *buf)
132 {
133 	struct pool_disk *bufpl = (struct pool_disk *) buf;
134 
135 	CPOUT_64(pl->pl_magic, bufpl->pl_magic);
136 	CPOUT_64(pl->pl_pool_id, bufpl->pl_pool_id);
137 	CPOUT_8(pl->pl_pool_name, bufpl->pl_pool_name, POOL_NAME_SIZE);
138 	CPOUT_32(pl->pl_version, bufpl->pl_version);
139 	CPOUT_32(pl->pl_subpools, bufpl->pl_subpools);
140 	CPOUT_32(pl->pl_sp_id, bufpl->pl_sp_id);
141 	CPOUT_32(pl->pl_sp_devs, bufpl->pl_sp_devs);
142 	CPOUT_32(pl->pl_sp_devid, bufpl->pl_sp_devid);
143 	CPOUT_32(pl->pl_sp_type, bufpl->pl_sp_type);
144 	CPOUT_64(pl->pl_blocks, bufpl->pl_blocks);
145 	CPOUT_32(pl->pl_striping, bufpl->pl_striping);
146 	CPOUT_32(pl->pl_sp_dmepdevs, bufpl->pl_sp_dmepdevs);
147 	CPOUT_32(pl->pl_sp_dmepid, bufpl->pl_sp_dmepid);
148 	CPOUT_32(pl->pl_sp_weight, bufpl->pl_sp_weight);
149 	CPOUT_32(pl->pl_minor, bufpl->pl_minor);
150 	CPOUT_32(pl->pl_padding, bufpl->pl_padding);
151 	CPOUT_8(pl->pl_reserve, bufpl->pl_reserve, 184);
152 }
153 
154 /**
155  * pool_label_in - copies a char buffer into a pool_label_t
156  * @pl: ptr to a pool_label_t struct
157  * @buf: ptr to raw space where label info is copied from
158  *
159  * This function is important because it takes care of all of
160  * the endian issues when information from disk is about to be
161  * used.  This way, when machines of different architectures
162  * are used, they will be able to interpret ondisk labels
163  * correctly.  Always use this function before using labels that
164  * were read from disk.
165  */
166 void pool_label_in(struct pool_disk *pl, void *buf)
167 {
168 	struct pool_disk *bufpl = (struct pool_disk *) buf;
169 
170 	CPIN_64(pl->pl_magic, bufpl->pl_magic);
171 	CPIN_64(pl->pl_pool_id, bufpl->pl_pool_id);
172 	CPIN_8(pl->pl_pool_name, bufpl->pl_pool_name, POOL_NAME_SIZE);
173 	CPIN_32(pl->pl_version, bufpl->pl_version);
174 	CPIN_32(pl->pl_subpools, bufpl->pl_subpools);
175 	CPIN_32(pl->pl_sp_id, bufpl->pl_sp_id);
176 	CPIN_32(pl->pl_sp_devs, bufpl->pl_sp_devs);
177 	CPIN_32(pl->pl_sp_devid, bufpl->pl_sp_devid);
178 	CPIN_32(pl->pl_sp_type, bufpl->pl_sp_type);
179 	CPIN_64(pl->pl_blocks, bufpl->pl_blocks);
180 	CPIN_32(pl->pl_striping, bufpl->pl_striping);
181 	CPIN_32(pl->pl_sp_dmepdevs, bufpl->pl_sp_dmepdevs);
182 	CPIN_32(pl->pl_sp_dmepid, bufpl->pl_sp_dmepid);
183 	CPIN_32(pl->pl_sp_weight, bufpl->pl_sp_weight);
184 	CPIN_32(pl->pl_minor, bufpl->pl_minor);
185 	CPIN_32(pl->pl_padding, bufpl->pl_padding);
186 	CPIN_8(pl->pl_reserve, bufpl->pl_reserve, 184);
187 }
188 
189 static char _calc_char(unsigned int id)
190 {
191 	/*
192 	 * [0-9A-Za-z!#] - 64 printable chars (6-bits)
193 	 */
194 
195 	if (id < 10)
196 		return id + 48;
197 	if (id < 36)
198 		return (id - 10) + 65;
199 	if (id < 62)
200 		return (id - 36) + 97;
201 	if (id == 62)
202 		return '!';
203 	if (id == 63)
204 		return '#';
205 
206 	return '%';
207 }
208 
209 void get_pool_uuid(char *uuid, uint64_t poolid, uint32_t spid, uint32_t devid)
210 {
211 	int i;
212 	unsigned shifter = 0x003F;
213 
214 	assert(ID_LEN == 32);
215 	memset(uuid, 0, ID_LEN);
216 	strcat(uuid, "POOL0000000000");
217 
218 	/* We grab the entire 64 bits (+2 that get shifted in) */
219 	for (i = 13; i < 24; i++) {
220 		uuid[i] = _calc_char(((unsigned) poolid) & shifter);
221 		poolid = poolid >> 6;
222 	}
223 
224 	/* We grab the entire 32 bits (+4 that get shifted in) */
225 	for (i = 24; i < 30; i++) {
226 		uuid[i] = _calc_char((unsigned) (spid & shifter));
227 		spid = spid >> 6;
228 	}
229 
230 	/*
231 	 * Since we can only have 128 devices, we only worry about the
232 	 * last 12 bits
233 	 */
234 	for (i = 30; i < 32; i++) {
235 		uuid[i] = _calc_char((unsigned) (devid & shifter));
236 		devid = devid >> 6;
237 	}
238 
239 }
240 
241 static int _read_vg_pds(const struct format_type *fmt, struct dm_pool *mem,
242 			struct lvmcache_vginfo *vginfo, struct dm_list *head,
243 			uint32_t *devcount)
244 {
245 	struct lvmcache_info *info;
246 	struct pool_list *pl = NULL;
247 	struct dm_pool *tmpmem;
248 
249 	uint32_t sp_count = 0;
250 	uint32_t *sp_devs = NULL;
251 	uint32_t i;
252 
253 	/* FIXME: maybe should return a different error in memory
254 	 * allocation failure */
255 	if (!(tmpmem = dm_pool_create("pool read_vg", 512)))
256 		return_0;
257 
258 	dm_list_iterate_items(info, &vginfo->infos) {
259 		if (info->dev &&
260 		    !(pl = read_pool_disk(fmt, info->dev, mem, vginfo->vgname)))
261 			    break;
262 		/*
263 		 * We need to keep track of the total expected number
264 		 * of devices per subpool
265 		 */
266 		if (!sp_count) {
267 			/* FIXME pl left uninitialised if !info->dev */
268 			sp_count = pl->pd.pl_subpools;
269 			if (!(sp_devs =
270 			      dm_pool_zalloc(tmpmem,
271 					  sizeof(uint32_t) * sp_count))) {
272 				log_error("Unable to allocate %d 32-bit uints",
273 					  sp_count);
274 				dm_pool_destroy(tmpmem);
275 				return 0;
276 			}
277 		}
278 		/*
279 		 * watch out for a pool label with a different subpool
280 		 * count than the original - give up if it does
281 		 */
282 		if (sp_count != pl->pd.pl_subpools)
283 			break;
284 
285 		_add_pl_to_list(head, pl);
286 
287 		if (sp_count > pl->pd.pl_sp_id && sp_devs[pl->pd.pl_sp_id] == 0)
288 			sp_devs[pl->pd.pl_sp_id] = pl->pd.pl_sp_devs;
289 	}
290 
291 	*devcount = 0;
292 	for (i = 0; i < sp_count; i++)
293 		*devcount += sp_devs[i];
294 
295 	dm_pool_destroy(tmpmem);
296 
297 	if (pl && *pl->pd.pl_pool_name)
298 		return 1;
299 
300 	return 0;
301 
302 }
303 
304 int read_pool_pds(const struct format_type *fmt, const char *vg_name,
305 		  struct dm_pool *mem, struct dm_list *pdhead)
306 {
307 	struct lvmcache_vginfo *vginfo;
308 	uint32_t totaldevs;
309 	int full_scan = -1;
310 
311 	do {
312 		/*
313 		 * If the cache scanning doesn't work, this will never work
314 		 */
315 		if (vg_name && (vginfo = vginfo_from_vgname(vg_name, NULL)) &&
316 		    vginfo->infos.n) {
317 
318 			if (_read_vg_pds(fmt, mem, vginfo, pdhead, &totaldevs)) {
319 				/*
320 				 * If we found all the devices we were
321 				 * expecting, return success
322 				 */
323 				if (dm_list_size(pdhead) == totaldevs)
324 					return 1;
325 
326 				/*
327 				 * accept partial pool if we've done a full
328 				 * rescan of the cache
329 				 */
330 				if (full_scan > 0)
331 					return 1;
332 			}
333 		}
334 		/* Failed */
335 		dm_list_init(pdhead);
336 
337 		full_scan++;
338 		if (full_scan > 1) {
339 			log_debug("No devices for vg %s found in cache",
340 				  vg_name);
341 			return 0;
342 		}
343 		lvmcache_label_scan(fmt->cmd, full_scan);
344 
345 	} while (1);
346 
347 }
348 
349 struct pool_list *read_pool_disk(const struct format_type *fmt,
350 				 struct device *dev, struct dm_pool *mem,
351 				 const char *vg_name)
352 {
353 	struct pool_list *pl;
354 
355 	if (!dev_open(dev))
356 		return_NULL;
357 
358 	if (!(pl = dm_pool_zalloc(mem, sizeof(*pl)))) {
359 		log_error("Unable to allocate pool list structure");
360 		return 0;
361 	}
362 
363 	if (!__read_pool_disk(fmt, dev, mem, pl, vg_name))
364 		return_NULL;
365 
366 	if (!dev_close(dev))
367 		stack;
368 
369 	return pl;
370 
371 }
372