1 /*
2  * resource.c
3  *
4  * Code for reading blobs and resources, including compressed WIM resources.
5  */
6 
7 /*
8  * Copyright (C) 2012, 2013, 2015 Eric Biggers
9  *
10  * This file is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU Lesser General Public License as published by the Free
12  * Software Foundation; either version 3 of the License, or (at your option) any
13  * later version.
14  *
15  * This file is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17  * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
18  * details.
19  *
20  * You should have received a copy of the GNU Lesser General Public License
21  * along with this file; if not, see http://www.gnu.org/licenses/.
22  */
23 
24 #ifdef HAVE_CONFIG_H
25 #  include "config.h"
26 #endif
27 
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 
32 #include "wimlib/alloca.h"
33 #include "wimlib/assert.h"
34 #include "wimlib/bitops.h"
35 #include "wimlib/blob_table.h"
36 #include "wimlib/endianness.h"
37 #include "wimlib/error.h"
38 #include "wimlib/file_io.h"
39 #include "wimlib/ntfs_3g.h"
40 #include "wimlib/resource.h"
41 #include "wimlib/sha1.h"
42 #include "wimlib/wim.h"
43 #include "wimlib/win32.h"
44 
45 /*
46  *                         Compressed WIM resources
47  *
48  * A compressed resource in a WIM consists of a sequence of chunks.  Each chunk
49  * decompresses to the same size except possibly for the last, which
50  * decompresses to the remaining size.  Chunks that did not compress to less
51  * than their original size are stored uncompressed.
52  *
53  * We support three variations on this resource format, independently of the
54  * compression type and chunk size which can vary as well:
55  *
56  * - Original resource format: immediately before the compressed chunks, the
57  *   "chunk table" provides the offset, in bytes relative to the end of the
58  *   chunk table, of the start of each compressed chunk, except for the first
59  *   chunk which is omitted as it always has an offset of 0.  Chunk table
60  *   entries are 32-bit for resources < 4 GiB uncompressed and 64-bit for
61  *   resources >= 4 GiB uncompressed.
62  *
63  * - Solid resource format (distinguished by the use of WIM_RESHDR_FLAG_SOLID
64  *   instead of WIM_RESHDR_FLAG_COMPRESSED): similar to the original format, but
65  *   the resource begins with a 16-byte header which specifies the uncompressed
66  *   size of the resource, the compression type, and the chunk size.  (In the
67  *   original format, these values were instead determined from outside the
68  *   resource itself, from the blob table and the WIM file header.) In addition,
69  *   in this format the entries in the chunk table contain compressed chunk
70  *   sizes rather than offsets.  As a consequence of this, the chunk table
71  *   entries are always 32-bit and there is an entry for chunk 0.
72  *
73  * - Pipable resource format (wimlib extension; all resources in a pipable WIM
74  *   have this format): similar to the original format, but the chunk table is
75  *   at the end of the resource rather than the beginning, and each compressed
76  *   chunk is prefixed with its compressed size as a 32-bit integer.  This
77  *   format allows a resource to be written without rewinding.
78  */
79 
80 
81 struct data_range {
82 	u64 offset;
83 	u64 size;
84 };
85 
86 /*
87  * Read data from a compressed WIM resource.
88  *
89  * @rdesc
90  *	Description of the compressed WIM resource to read from.
91  * @ranges
92  *	Nonoverlapping, nonempty ranges of the uncompressed resource data to
93  *	read, sorted by increasing offset.
94  * @num_ranges
95  *	Number of ranges in @ranges; must be at least 1.
96  * @cb
97  *	Structure which provides the consume_chunk callback into which to feed
98  *	the data being read.  Each call provides the next chunk of the requested
99  *	data, uncompressed.  Each chunk will be nonempty and will not cross
100  *	range boundaries but otherwise will be of unspecified size.
101  *
102  * Possible return values:
103  *
104  *	WIMLIB_ERR_SUCCESS (0)
105  *	WIMLIB_ERR_READ			  (errno set)
106  *	WIMLIB_ERR_UNEXPECTED_END_OF_FILE (errno set to EINVAL)
107  *	WIMLIB_ERR_NOMEM		  (errno set to ENOMEM)
108  *	WIMLIB_ERR_DECOMPRESSION	  (errno set to EINVAL)
109  *	WIMLIB_ERR_INVALID_CHUNK_SIZE	  (errno set to EINVAL)
110  *
111  *	or other error code returned by the callback function.
112  */
113 static int
read_compressed_wim_resource(const struct wim_resource_descriptor * const rdesc,const struct data_range * const ranges,const size_t num_ranges,const struct consume_chunk_callback * cb)114 read_compressed_wim_resource(const struct wim_resource_descriptor * const rdesc,
115 			     const struct data_range * const ranges,
116 			     const size_t num_ranges,
117 			     const struct consume_chunk_callback *cb)
118 {
119 	int ret;
120 	u64 *chunk_offsets = NULL;
121 	u8 *ubuf = NULL;
122 	void *cbuf = NULL;
123 	bool chunk_offsets_malloced = false;
124 	bool ubuf_malloced = false;
125 	bool cbuf_malloced = false;
126 	struct wimlib_decompressor *decompressor = NULL;
127 
128 	/* Sanity checks  */
129 	wimlib_assert(num_ranges != 0);
130 	for (size_t i = 0; i < num_ranges; i++) {
131 		wimlib_assert(ranges[i].offset + ranges[i].size > ranges[i].offset &&
132 			      ranges[i].offset + ranges[i].size <= rdesc->uncompressed_size);
133 	}
134 	for (size_t i = 0; i < num_ranges - 1; i++)
135 		wimlib_assert(ranges[i].offset + ranges[i].size <= ranges[i + 1].offset);
136 
137 	/* Get the offsets of the first and last bytes of the read.  */
138 	const u64 first_offset = ranges[0].offset;
139 	const u64 last_offset = ranges[num_ranges - 1].offset + ranges[num_ranges - 1].size - 1;
140 
141 	/* Get the file descriptor for the WIM.  */
142 	struct filedes * const in_fd = &rdesc->wim->in_fd;
143 
144 	/* Determine if we're reading a pipable resource from a pipe or not.  */
145 	const bool is_pipe_read = (rdesc->is_pipable && !filedes_is_seekable(in_fd));
146 
147 	/* Determine if the chunk table is in an alternate format.  */
148 	const bool alt_chunk_table = (rdesc->flags & WIM_RESHDR_FLAG_SOLID)
149 					&& !is_pipe_read;
150 
151 	/* Get the maximum size of uncompressed chunks in this resource, which
152 	 * we require be a power of 2.  */
153 	u64 cur_read_offset = rdesc->offset_in_wim;
154 	int ctype = rdesc->compression_type;
155 	u32 chunk_size = rdesc->chunk_size;
156 	if (alt_chunk_table) {
157 		/* Alternate chunk table format.  Its header specifies the chunk
158 		 * size and compression format.  Note: it could be read here;
159 		 * however, the relevant data was already loaded into @rdesc by
160 		 * read_blob_table().  */
161 		cur_read_offset += sizeof(struct alt_chunk_table_header_disk);
162 	}
163 
164 	if (unlikely(!is_power_of_2(chunk_size))) {
165 		ERROR("Invalid compressed resource: "
166 		      "expected power-of-2 chunk size (got %"PRIu32")",
167 		      chunk_size);
168 		ret = WIMLIB_ERR_INVALID_CHUNK_SIZE;
169 		errno = EINVAL;
170 		goto out_cleanup;
171 	}
172 
173 	/* Get valid decompressor.  */
174 	if (likely(ctype == rdesc->wim->decompressor_ctype &&
175 		   chunk_size == rdesc->wim->decompressor_max_block_size))
176 	{
177 		/* Cached decompressor.  */
178 		decompressor = rdesc->wim->decompressor;
179 		rdesc->wim->decompressor_ctype = WIMLIB_COMPRESSION_TYPE_NONE;
180 		rdesc->wim->decompressor = NULL;
181 	} else {
182 		ret = wimlib_create_decompressor(ctype, chunk_size,
183 						 &decompressor);
184 		if (unlikely(ret)) {
185 			if (ret != WIMLIB_ERR_NOMEM)
186 				errno = EINVAL;
187 			goto out_cleanup;
188 		}
189 	}
190 
191 	const u32 chunk_order = bsr32(chunk_size);
192 
193 	/* Calculate the total number of chunks the resource is divided into.  */
194 	const u64 num_chunks = (rdesc->uncompressed_size + chunk_size - 1) >> chunk_order;
195 
196 	/* Calculate the 0-based indices of the first and last chunks containing
197 	 * data that needs to be passed to the callback.  */
198 	const u64 first_needed_chunk = first_offset >> chunk_order;
199 	const u64 last_needed_chunk = last_offset >> chunk_order;
200 
201 	/* Calculate the 0-based index of the first chunk that actually needs to
202 	 * be read.  This is normally first_needed_chunk, but for pipe reads we
203 	 * must always start from the 0th chunk.  */
204 	const u64 read_start_chunk = (is_pipe_read ? 0 : first_needed_chunk);
205 
206 	/* Calculate the number of chunk offsets that are needed for the chunks
207 	 * being read.  */
208 	const u64 num_needed_chunk_offsets =
209 		last_needed_chunk - read_start_chunk + 1 +
210 		(last_needed_chunk < num_chunks - 1);
211 
212 	/* Calculate the number of entries in the chunk table.  Normally, it's
213 	 * one less than the number of chunks, since the first chunk has no
214 	 * entry.  But in the alternate chunk table format, the chunk entries
215 	 * contain chunk sizes, not offsets, and there is one per chunk.  */
216 	const u64 num_chunk_entries = (alt_chunk_table ? num_chunks : num_chunks - 1);
217 
218 	/* Set the size of each chunk table entry based on the resource's
219 	 * uncompressed size.  */
220 	const u64 chunk_entry_size = get_chunk_entry_size(rdesc->uncompressed_size,
221 							  alt_chunk_table);
222 
223 	/* Calculate the size of the chunk table in bytes.  */
224 	const u64 chunk_table_size = num_chunk_entries * chunk_entry_size;
225 
226 	/* Calculate the size of the chunk table in bytes, including the header
227 	 * in the case of the alternate chunk table format.  */
228 	const u64 chunk_table_full_size =
229 		(alt_chunk_table) ? chunk_table_size + sizeof(struct alt_chunk_table_header_disk)
230 				  : chunk_table_size;
231 
232 	if (!is_pipe_read) {
233 		/* Read the needed chunk table entries into memory and use them
234 		 * to initialize the chunk_offsets array.  */
235 
236 		u64 first_chunk_entry_to_read;
237 		u64 num_chunk_entries_to_read;
238 
239 		if (alt_chunk_table) {
240 			/* The alternate chunk table contains chunk sizes, not
241 			 * offsets, so we always must read all preceding entries
242 			 * in order to determine offsets.  */
243 			first_chunk_entry_to_read = 0;
244 			num_chunk_entries_to_read = last_needed_chunk + 1;
245 		} else {
246 
247 			num_chunk_entries_to_read = last_needed_chunk - read_start_chunk + 1;
248 
249 			/* The first chunk has no explicit chunk table entry.  */
250 			if (read_start_chunk == 0) {
251 				num_chunk_entries_to_read--;
252 				first_chunk_entry_to_read = 0;
253 			} else {
254 				first_chunk_entry_to_read = read_start_chunk - 1;
255 			}
256 
257 			/* Unless we're reading the final chunk of the resource,
258 			 * we need the offset of the chunk following the last
259 			 * needed chunk so that the compressed size of the last
260 			 * needed chunk can be computed.  */
261 			if (last_needed_chunk < num_chunks - 1)
262 				num_chunk_entries_to_read++;
263 		}
264 
265 		const u64 chunk_offsets_alloc_size =
266 			max(num_chunk_entries_to_read,
267 			    num_needed_chunk_offsets) * sizeof(chunk_offsets[0]);
268 
269 		if (unlikely((size_t)chunk_offsets_alloc_size != chunk_offsets_alloc_size)) {
270 			errno = ENOMEM;
271 			goto oom;
272 		}
273 
274 		if (likely(chunk_offsets_alloc_size <= STACK_MAX)) {
275 			chunk_offsets = alloca(chunk_offsets_alloc_size);
276 		} else {
277 			chunk_offsets = MALLOC(chunk_offsets_alloc_size);
278 			if (unlikely(!chunk_offsets))
279 				goto oom;
280 			chunk_offsets_malloced = true;
281 		}
282 
283 		const size_t chunk_table_size_to_read =
284 			num_chunk_entries_to_read * chunk_entry_size;
285 
286 		const u64 file_offset_of_needed_chunk_entries =
287 			cur_read_offset
288 			+ (first_chunk_entry_to_read * chunk_entry_size)
289 			+ (rdesc->is_pipable ? (rdesc->size_in_wim - chunk_table_size) : 0);
290 
291 		void * const chunk_table_data =
292 			(u8*)chunk_offsets +
293 			chunk_offsets_alloc_size -
294 			chunk_table_size_to_read;
295 
296 		ret = full_pread(in_fd, chunk_table_data, chunk_table_size_to_read,
297 				 file_offset_of_needed_chunk_entries);
298 		if (unlikely(ret))
299 			goto read_error;
300 
301 		/* Now fill in chunk_offsets from the entries we have read in
302 		 * chunk_tab_data.  We break aliasing rules here to avoid having
303 		 * to allocate yet another array.  */
304 		typedef le64 _may_alias_attribute aliased_le64_t;
305 		typedef le32 _may_alias_attribute aliased_le32_t;
306 		u64 * chunk_offsets_p = chunk_offsets;
307 
308 		if (alt_chunk_table) {
309 			u64 cur_offset = 0;
310 			aliased_le32_t *raw_entries = chunk_table_data;
311 
312 			for (size_t i = 0; i < num_chunk_entries_to_read; i++) {
313 				u32 entry = le32_to_cpu(raw_entries[i]);
314 				if (i >= read_start_chunk)
315 					*chunk_offsets_p++ = cur_offset;
316 				cur_offset += entry;
317 			}
318 			if (last_needed_chunk < num_chunks - 1)
319 				*chunk_offsets_p = cur_offset;
320 		} else {
321 			if (read_start_chunk == 0)
322 				*chunk_offsets_p++ = 0;
323 
324 			if (chunk_entry_size == 4) {
325 				aliased_le32_t *raw_entries = chunk_table_data;
326 				for (size_t i = 0; i < num_chunk_entries_to_read; i++)
327 					*chunk_offsets_p++ = le32_to_cpu(raw_entries[i]);
328 			} else {
329 				aliased_le64_t *raw_entries = chunk_table_data;
330 				for (size_t i = 0; i < num_chunk_entries_to_read; i++)
331 					*chunk_offsets_p++ = le64_to_cpu(raw_entries[i]);
332 			}
333 		}
334 
335 		/* Set offset to beginning of first chunk to read.  */
336 		cur_read_offset += chunk_offsets[0];
337 		if (rdesc->is_pipable)
338 			cur_read_offset += read_start_chunk * sizeof(struct pwm_chunk_hdr);
339 		else
340 			cur_read_offset += chunk_table_size;
341 	}
342 
343 	/* Allocate buffer for holding the uncompressed data of each chunk.  */
344 	if (chunk_size <= STACK_MAX) {
345 		ubuf = alloca(chunk_size);
346 	} else {
347 		ubuf = MALLOC(chunk_size);
348 		if (unlikely(!ubuf))
349 			goto oom;
350 		ubuf_malloced = true;
351 	}
352 
353 	/* Allocate a temporary buffer for reading compressed chunks, each of
354 	 * which can be at most @chunk_size - 1 bytes.  This excludes compressed
355 	 * chunks that are a full @chunk_size bytes, which are actually stored
356 	 * uncompressed.  */
357 	if (chunk_size - 1 <= STACK_MAX) {
358 		cbuf = alloca(chunk_size - 1);
359 	} else {
360 		cbuf = MALLOC(chunk_size - 1);
361 		if (unlikely(!cbuf))
362 			goto oom;
363 		cbuf_malloced = true;
364 	}
365 
366 	/* Set current data range.  */
367 	const struct data_range *cur_range = ranges;
368 	const struct data_range * const end_range = &ranges[num_ranges];
369 	u64 cur_range_pos = cur_range->offset;
370 	u64 cur_range_end = cur_range->offset + cur_range->size;
371 
372 	/* Read and process each needed chunk.  */
373 	for (u64 i = read_start_chunk; i <= last_needed_chunk; i++) {
374 
375 		/* Calculate uncompressed size of next chunk.  */
376 		u32 chunk_usize;
377 		if ((i == num_chunks - 1) && (rdesc->uncompressed_size & (chunk_size - 1)))
378 			chunk_usize = (rdesc->uncompressed_size & (chunk_size - 1));
379 		else
380 			chunk_usize = chunk_size;
381 
382 		/* Calculate compressed size of next chunk.  */
383 		u32 chunk_csize;
384 		if (is_pipe_read) {
385 			struct pwm_chunk_hdr chunk_hdr;
386 
387 			ret = full_pread(in_fd, &chunk_hdr,
388 					 sizeof(chunk_hdr), cur_read_offset);
389 			if (unlikely(ret))
390 				goto read_error;
391 			chunk_csize = le32_to_cpu(chunk_hdr.compressed_size);
392 		} else {
393 			if (i == num_chunks - 1) {
394 				chunk_csize = rdesc->size_in_wim -
395 					      chunk_table_full_size -
396 					      chunk_offsets[i - read_start_chunk];
397 				if (rdesc->is_pipable)
398 					chunk_csize -= num_chunks * sizeof(struct pwm_chunk_hdr);
399 			} else {
400 				chunk_csize = chunk_offsets[i + 1 - read_start_chunk] -
401 					      chunk_offsets[i - read_start_chunk];
402 			}
403 		}
404 		if (unlikely(chunk_csize == 0 || chunk_csize > chunk_usize)) {
405 			ERROR("Invalid chunk size in compressed resource!");
406 			errno = EINVAL;
407 			ret = WIMLIB_ERR_DECOMPRESSION;
408 			goto out_cleanup;
409 		}
410 		if (rdesc->is_pipable)
411 			cur_read_offset += sizeof(struct pwm_chunk_hdr);
412 
413 		/* Offsets in the uncompressed resource at which this chunk
414 		 * starts and ends.  */
415 		const u64 chunk_start_offset = i << chunk_order;
416 		const u64 chunk_end_offset = chunk_start_offset + chunk_usize;
417 
418 		if (chunk_end_offset <= cur_range_pos) {
419 
420 			/* The next range does not require data in this chunk,
421 			 * so skip it.  */
422 			cur_read_offset += chunk_csize;
423 			if (is_pipe_read) {
424 				u8 dummy;
425 
426 				ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
427 				if (unlikely(ret))
428 					goto read_error;
429 			}
430 		} else {
431 
432 			/* Read the chunk and feed data to the callback
433 			 * function.  */
434 			u8 *read_buf;
435 
436 			if (chunk_csize == chunk_usize)
437 				read_buf = ubuf;
438 			else
439 				read_buf = cbuf;
440 
441 			ret = full_pread(in_fd,
442 					 read_buf,
443 					 chunk_csize,
444 					 cur_read_offset);
445 			if (unlikely(ret))
446 				goto read_error;
447 
448 			if (read_buf == cbuf) {
449 				ret = wimlib_decompress(cbuf,
450 							chunk_csize,
451 							ubuf,
452 							chunk_usize,
453 							decompressor);
454 				if (unlikely(ret)) {
455 					ERROR("Failed to decompress data!");
456 					ret = WIMLIB_ERR_DECOMPRESSION;
457 					errno = EINVAL;
458 					goto out_cleanup;
459 				}
460 			}
461 			cur_read_offset += chunk_csize;
462 
463 			/* At least one range requires data in this chunk.  */
464 			do {
465 				size_t start, end, size;
466 
467 				/* Calculate how many bytes of data should be
468 				 * sent to the callback function, taking into
469 				 * account that data sent to the callback
470 				 * function must not overlap range boundaries.
471 				 */
472 				start = cur_range_pos - chunk_start_offset;
473 				end = min(cur_range_end, chunk_end_offset) - chunk_start_offset;
474 				size = end - start;
475 
476 				ret = consume_chunk(cb, &ubuf[start], size);
477 				if (unlikely(ret))
478 					goto out_cleanup;
479 
480 				cur_range_pos += size;
481 				if (cur_range_pos == cur_range_end) {
482 					/* Advance to next range.  */
483 					if (++cur_range == end_range) {
484 						cur_range_pos = ~0ULL;
485 					} else {
486 						cur_range_pos = cur_range->offset;
487 						cur_range_end = cur_range->offset + cur_range->size;
488 					}
489 				}
490 			} while (cur_range_pos < chunk_end_offset);
491 		}
492 	}
493 
494 	if (is_pipe_read &&
495 	    last_offset == rdesc->uncompressed_size - 1 &&
496 	    chunk_table_size)
497 	{
498 		u8 dummy;
499 		/* If reading a pipable resource from a pipe and the full data
500 		 * was requested, skip the chunk table at the end so that the
501 		 * file descriptor is fully clear of the resource after this
502 		 * returns.  */
503 		cur_read_offset += chunk_table_size;
504 		ret = full_pread(in_fd, &dummy, 1, cur_read_offset - 1);
505 		if (unlikely(ret))
506 			goto read_error;
507 	}
508 	ret = 0;
509 
510 out_cleanup:
511 	if (decompressor) {
512 		wimlib_free_decompressor(rdesc->wim->decompressor);
513 		rdesc->wim->decompressor = decompressor;
514 		rdesc->wim->decompressor_ctype = ctype;
515 		rdesc->wim->decompressor_max_block_size = chunk_size;
516 	}
517 	if (chunk_offsets_malloced)
518 		FREE(chunk_offsets);
519 	if (ubuf_malloced)
520 		FREE(ubuf);
521 	if (cbuf_malloced)
522 		FREE(cbuf);
523 	return ret;
524 
525 oom:
526 	ERROR("Out of memory while reading compressed WIM resource");
527 	ret = WIMLIB_ERR_NOMEM;
528 	goto out_cleanup;
529 
530 read_error:
531 	ERROR_WITH_ERRNO("Error reading data from WIM file");
532 	goto out_cleanup;
533 }
534 
535 /* Read raw data from a file descriptor at the specified offset, feeding the
536  * data in nonempty chunks into the specified callback function.  */
537 static int
read_raw_file_data(struct filedes * in_fd,u64 offset,u64 size,const struct consume_chunk_callback * cb,const tchar * filename)538 read_raw_file_data(struct filedes *in_fd, u64 offset, u64 size,
539 		   const struct consume_chunk_callback *cb,
540 		   const tchar *filename)
541 {
542 	u8 buf[BUFFER_SIZE];
543 	size_t bytes_to_read;
544 	int ret;
545 
546 	while (size) {
547 		bytes_to_read = min(sizeof(buf), size);
548 		ret = full_pread(in_fd, buf, bytes_to_read, offset);
549 		if (unlikely(ret))
550 			goto read_error;
551 		ret = consume_chunk(cb, buf, bytes_to_read);
552 		if (unlikely(ret))
553 			return ret;
554 		size -= bytes_to_read;
555 		offset += bytes_to_read;
556 	}
557 	return 0;
558 
559 read_error:
560 	if (!filename) {
561 		ERROR_WITH_ERRNO("Error reading data from WIM file");
562 	} else if (ret == WIMLIB_ERR_UNEXPECTED_END_OF_FILE) {
563 		ERROR("\"%"TS"\": File was concurrently truncated", filename);
564 		ret = WIMLIB_ERR_CONCURRENT_MODIFICATION_DETECTED;
565 	} else {
566 		ERROR_WITH_ERRNO("\"%"TS"\": Error reading data", filename);
567 	}
568 	return ret;
569 }
570 
571 /* A consume_chunk implementation which simply concatenates all chunks into an
572  * in-memory buffer.  */
573 static int
bufferer_cb(const void * chunk,size_t size,void * _ctx)574 bufferer_cb(const void *chunk, size_t size, void *_ctx)
575 {
576 	void **buf_p = _ctx;
577 
578 	*buf_p = mempcpy(*buf_p, chunk, size);
579 	return 0;
580 }
581 
582 /*
583  * Read @size bytes at @offset in the WIM resource described by @rdesc and feed
584  * the data into the @cb callback function.
585  *
586  * @offset and @size are assumed to have already been validated against the
587  * resource's uncompressed size.
588  *
589  * Returns 0 on success; or the first nonzero value returned by the callback
590  * function; or a nonzero wimlib error code with errno set as well.
591  */
592 static int
read_partial_wim_resource(const struct wim_resource_descriptor * rdesc,const u64 offset,const u64 size,const struct consume_chunk_callback * cb)593 read_partial_wim_resource(const struct wim_resource_descriptor *rdesc,
594 			  const u64 offset, const u64 size,
595 			  const struct consume_chunk_callback *cb)
596 {
597 	if (rdesc->flags & (WIM_RESHDR_FLAG_COMPRESSED |
598 			    WIM_RESHDR_FLAG_SOLID))
599 	{
600 		/* Compressed resource  */
601 		if (unlikely(!size))
602 			return 0;
603 		struct data_range range = {
604 			.offset = offset,
605 			.size = size,
606 		};
607 		return read_compressed_wim_resource(rdesc, &range, 1, cb);
608 	}
609 
610 	/* Uncompressed resource  */
611 	return read_raw_file_data(&rdesc->wim->in_fd,
612 				  rdesc->offset_in_wim + offset,
613 				  size, cb, NULL);
614 }
615 
616 /* Read the specified range of uncompressed data from the specified blob, which
617  * must be located in a WIM file, into the specified buffer.  */
618 int
read_partial_wim_blob_into_buf(const struct blob_descriptor * blob,u64 offset,size_t size,void * buf)619 read_partial_wim_blob_into_buf(const struct blob_descriptor *blob,
620 			       u64 offset, size_t size, void *buf)
621 {
622 	struct consume_chunk_callback cb = {
623 		.func	= bufferer_cb,
624 		.ctx	= &buf,
625 	};
626 	return read_partial_wim_resource(blob->rdesc,
627 					 blob->offset_in_res + offset,
628 					 size,
629 					 &cb);
630 }
631 
632 static int
noop_cb(const void * chunk,size_t size,void * _ctx)633 noop_cb(const void *chunk, size_t size, void *_ctx)
634 {
635 	return 0;
636 }
637 
638 /* Skip over the data of the specified WIM resource.  */
639 int
skip_wim_resource(const struct wim_resource_descriptor * rdesc)640 skip_wim_resource(const struct wim_resource_descriptor *rdesc)
641 {
642 	static const struct consume_chunk_callback cb = {
643 		.func = noop_cb,
644 	};
645 	return read_partial_wim_resource(rdesc, 0,
646 					 rdesc->uncompressed_size, &cb);
647 }
648 
649 static int
read_wim_blob_prefix(const struct blob_descriptor * blob,u64 size,const struct consume_chunk_callback * cb)650 read_wim_blob_prefix(const struct blob_descriptor *blob, u64 size,
651 		     const struct consume_chunk_callback *cb)
652 {
653 	return read_partial_wim_resource(blob->rdesc, blob->offset_in_res,
654 					 size, cb);
655 }
656 
657 /* This function handles reading blob data that is located in an external file,
658  * such as a file that has been added to the WIM image through execution of a
659  * wimlib_add_command.
660  *
661  * This assumes the file can be accessed using the standard POSIX open(),
662  * read(), and close().  On Windows this will not necessarily be the case (since
663  * the file may need FILE_FLAG_BACKUP_SEMANTICS to be opened, or the file may be
664  * encrypted), so Windows uses its own code for its equivalent case.  */
665 static int
read_file_on_disk_prefix(const struct blob_descriptor * blob,u64 size,const struct consume_chunk_callback * cb)666 read_file_on_disk_prefix(const struct blob_descriptor *blob, u64 size,
667 			 const struct consume_chunk_callback *cb)
668 {
669 	int ret;
670 	int raw_fd;
671 	struct filedes fd;
672 
673 	raw_fd = topen(blob->file_on_disk, O_BINARY | O_RDONLY);
674 	if (unlikely(raw_fd < 0)) {
675 		ERROR_WITH_ERRNO("Can't open \"%"TS"\"", blob->file_on_disk);
676 		return WIMLIB_ERR_OPEN;
677 	}
678 	filedes_init(&fd, raw_fd);
679 	ret = read_raw_file_data(&fd, 0, size, cb, blob->file_on_disk);
680 	filedes_close(&fd);
681 	return ret;
682 }
683 
684 #ifdef WITH_FUSE
685 static int
read_staging_file_prefix(const struct blob_descriptor * blob,u64 size,const struct consume_chunk_callback * cb)686 read_staging_file_prefix(const struct blob_descriptor *blob, u64 size,
687 			 const struct consume_chunk_callback *cb)
688 {
689 	int raw_fd;
690 	struct filedes fd;
691 	int ret;
692 
693 	raw_fd = openat(blob->staging_dir_fd, blob->staging_file_name,
694 			O_RDONLY | O_NOFOLLOW);
695 	if (unlikely(raw_fd < 0)) {
696 		ERROR_WITH_ERRNO("Can't open staging file \"%s\"",
697 				 blob->staging_file_name);
698 		return WIMLIB_ERR_OPEN;
699 	}
700 	filedes_init(&fd, raw_fd);
701 	ret = read_raw_file_data(&fd, 0, size, cb, blob->staging_file_name);
702 	filedes_close(&fd);
703 	return ret;
704 }
705 #endif
706 
707 /* This function handles the trivial case of reading blob data that is, in fact,
708  * already located in an in-memory buffer.  */
709 static int
read_buffer_prefix(const struct blob_descriptor * blob,u64 size,const struct consume_chunk_callback * cb)710 read_buffer_prefix(const struct blob_descriptor *blob,
711 		   u64 size, const struct consume_chunk_callback *cb)
712 {
713 	if (unlikely(!size))
714 		return 0;
715 	return consume_chunk(cb, blob->attached_buffer, size);
716 }
717 
718 typedef int (*read_blob_prefix_handler_t)(const struct blob_descriptor *blob,
719 					  u64 size,
720 					  const struct consume_chunk_callback *cb);
721 
722 /*
723  * Read the first @size bytes from a generic "blob", which may be located in any
724  * one of several locations, such as in a WIM resource (possibly compressed), in
725  * an external file, or directly in an in-memory buffer.  The blob data will be
726  * fed to @cb in chunks that are nonempty but otherwise are of unspecified size.
727  *
728  * Returns 0 on success; nonzero on error.  A nonzero value will be returned if
729  * the blob data cannot be successfully read (for a number of different reasons,
730  * depending on the blob location), or if @cb returned nonzero in which case
731  * that error code will be returned.
732  */
733 static int
read_blob_prefix(const struct blob_descriptor * blob,u64 size,const struct consume_chunk_callback * cb)734 read_blob_prefix(const struct blob_descriptor *blob, u64 size,
735 		 const struct consume_chunk_callback *cb)
736 {
737 	static const read_blob_prefix_handler_t handlers[] = {
738 		[BLOB_IN_WIM] = read_wim_blob_prefix,
739 		[BLOB_IN_FILE_ON_DISK] = read_file_on_disk_prefix,
740 		[BLOB_IN_ATTACHED_BUFFER] = read_buffer_prefix,
741 	#ifdef WITH_FUSE
742 		[BLOB_IN_STAGING_FILE] = read_staging_file_prefix,
743 	#endif
744 	#ifdef WITH_NTFS_3G
745 		[BLOB_IN_NTFS_VOLUME] = read_ntfs_attribute_prefix,
746 	#endif
747 	#ifdef __WIN32__
748 		[BLOB_IN_WINDOWS_FILE] = read_windows_file_prefix,
749 	#endif
750 	};
751 	wimlib_assert(blob->blob_location < ARRAY_LEN(handlers)
752 		      && handlers[blob->blob_location] != NULL);
753 	wimlib_assert(size <= blob->size);
754 	return handlers[blob->blob_location](blob, size, cb);
755 }
756 
757 struct blob_chunk_ctx {
758 	const struct blob_descriptor *blob;
759 	const struct read_blob_callbacks *cbs;
760 	u64 offset;
761 };
762 
763 static int
consume_blob_chunk(const void * chunk,size_t size,void * _ctx)764 consume_blob_chunk(const void *chunk, size_t size, void *_ctx)
765 {
766 	struct blob_chunk_ctx *ctx = _ctx;
767 	int ret;
768 
769 	ret = call_continue_blob(ctx->blob, ctx->offset, chunk, size, ctx->cbs);
770 	ctx->offset += size;
771 	return ret;
772 }
773 
774 /* Read the full data of the specified blob, passing the data into the specified
775  * callbacks (all of which are optional).  */
776 int
read_blob_with_cbs(struct blob_descriptor * blob,const struct read_blob_callbacks * cbs)777 read_blob_with_cbs(struct blob_descriptor *blob,
778 		   const struct read_blob_callbacks *cbs)
779 {
780 	int ret;
781 	struct blob_chunk_ctx ctx = {
782 		.blob = blob,
783 		.offset = 0,
784 		.cbs = cbs,
785 	};
786 	struct consume_chunk_callback cb = {
787 		.func = consume_blob_chunk,
788 		.ctx = &ctx,
789 	};
790 
791 	ret = call_begin_blob(blob, cbs);
792 	if (unlikely(ret))
793 		return ret;
794 
795 	ret = read_blob_prefix(blob, blob->size, &cb);
796 
797 	return call_end_blob(blob, ret, cbs);
798 }
799 
800 /* Read the full uncompressed data of the specified blob into the specified
801  * buffer, which must have space for at least blob->size bytes.  The SHA-1
802  * message digest is *not* checked.  */
803 int
read_blob_into_buf(const struct blob_descriptor * blob,void * buf)804 read_blob_into_buf(const struct blob_descriptor *blob, void *buf)
805 {
806 	struct consume_chunk_callback cb = {
807 		.func	= bufferer_cb,
808 		.ctx	= &buf,
809 	};
810 	return read_blob_prefix(blob, blob->size, &cb);
811 }
812 
813 /* Retrieve the full uncompressed data of the specified blob.  A buffer large
814  * enough hold the data is allocated and returned in @buf_ret.  The SHA-1
815  * message digest is *not* checked.  */
816 int
read_blob_into_alloc_buf(const struct blob_descriptor * blob,void ** buf_ret)817 read_blob_into_alloc_buf(const struct blob_descriptor *blob, void **buf_ret)
818 {
819 	int ret;
820 	void *buf;
821 
822 	if (unlikely((size_t)blob->size != blob->size)) {
823 		ERROR("Can't read %"PRIu64" byte blob into memory", blob->size);
824 		return WIMLIB_ERR_NOMEM;
825 	}
826 
827 	buf = MALLOC(blob->size);
828 	if (unlikely(!buf))
829 		return WIMLIB_ERR_NOMEM;
830 
831 	ret = read_blob_into_buf(blob, buf);
832 	if (unlikely(ret)) {
833 		FREE(buf);
834 		return ret;
835 	}
836 
837 	*buf_ret = buf;
838 	return 0;
839 }
840 
841 /* Retrieve the full uncompressed data of a WIM resource specified as a raw
842  * `wim_reshdr' and the corresponding WIM file.  A buffer large enough hold the
843  * data is allocated and returned in @buf_ret.  */
844 int
wim_reshdr_to_data(const struct wim_reshdr * reshdr,WIMStruct * wim,void ** buf_ret)845 wim_reshdr_to_data(const struct wim_reshdr *reshdr, WIMStruct *wim,
846 		   void **buf_ret)
847 {
848 	struct wim_resource_descriptor rdesc;
849 	struct blob_descriptor blob;
850 
851 	wim_reshdr_to_desc_and_blob(reshdr, wim, &rdesc, &blob);
852 
853 	return read_blob_into_alloc_buf(&blob, buf_ret);
854 }
855 
856 /* Calculate the SHA-1 message digest of the uncompressed data of the specified
857  * WIM resource.  */
858 int
wim_reshdr_to_hash(const struct wim_reshdr * reshdr,WIMStruct * wim,u8 hash[SHA1_HASH_SIZE])859 wim_reshdr_to_hash(const struct wim_reshdr *reshdr, WIMStruct *wim,
860 		   u8 hash[SHA1_HASH_SIZE])
861 {
862 	struct wim_resource_descriptor rdesc;
863 	struct blob_descriptor blob;
864 	int ret;
865 
866 	wim_reshdr_to_desc_and_blob(reshdr, wim, &rdesc, &blob);
867 	blob.unhashed = 1;
868 
869 	ret = sha1_blob(&blob);
870 	if (unlikely(ret))
871 		return ret;
872 
873 	copy_hash(hash, blob.hash);
874 	return 0;
875 }
876 
877 struct blobifier_context {
878 	struct read_blob_callbacks cbs;
879 	struct blob_descriptor *cur_blob;
880 	struct blob_descriptor *next_blob;
881 	u64 cur_blob_offset;
882 	struct blob_descriptor *final_blob;
883 	size_t list_head_offset;
884 };
885 
886 static struct blob_descriptor *
next_blob(struct blob_descriptor * blob,size_t list_head_offset)887 next_blob(struct blob_descriptor *blob, size_t list_head_offset)
888 {
889 	struct list_head *cur;
890 
891 	cur = (struct list_head*)((u8*)blob + list_head_offset);
892 
893 	return (struct blob_descriptor*)((u8*)cur->next - list_head_offset);
894 }
895 
896 /*
897  * A consume_chunk implementation that translates raw resource data into blobs,
898  * calling the begin_blob, continue_blob, and end_blob callbacks as appropriate.
899  */
900 static int
blobifier_cb(const void * chunk,size_t size,void * _ctx)901 blobifier_cb(const void *chunk, size_t size, void *_ctx)
902 {
903 	struct blobifier_context *ctx = _ctx;
904 	int ret;
905 
906 	wimlib_assert(ctx->cur_blob != NULL);
907 	wimlib_assert(size <= ctx->cur_blob->size - ctx->cur_blob_offset);
908 
909 	if (ctx->cur_blob_offset == 0) {
910 		/* Starting a new blob.  */
911 		ret = call_begin_blob(ctx->cur_blob, &ctx->cbs);
912 		if (ret)
913 			return ret;
914 	}
915 
916 	ret = call_continue_blob(ctx->cur_blob, ctx->cur_blob_offset,
917 				 chunk, size, &ctx->cbs);
918 	ctx->cur_blob_offset += size;
919 	if (ret)
920 		return ret;
921 
922 	if (ctx->cur_blob_offset == ctx->cur_blob->size) {
923 		/* Finished reading all the data for a blob.  */
924 
925 		ctx->cur_blob_offset = 0;
926 
927 		ret = call_end_blob(ctx->cur_blob, 0, &ctx->cbs);
928 		if (ret)
929 			return ret;
930 
931 		/* Advance to next blob.  */
932 		ctx->cur_blob = ctx->next_blob;
933 		if (ctx->cur_blob != NULL) {
934 			if (ctx->cur_blob != ctx->final_blob)
935 				ctx->next_blob = next_blob(ctx->cur_blob,
936 							   ctx->list_head_offset);
937 			else
938 				ctx->next_blob = NULL;
939 		}
940 	}
941 	return 0;
942 }
943 
944 struct hasher_context {
945 	SHA_CTX sha_ctx;
946 	int flags;
947 	struct read_blob_callbacks cbs;
948 };
949 
950 /* Callback for starting to read a blob while calculating its SHA-1 message
951  * digest.  */
952 static int
hasher_begin_blob(struct blob_descriptor * blob,void * _ctx)953 hasher_begin_blob(struct blob_descriptor *blob, void *_ctx)
954 {
955 	struct hasher_context *ctx = _ctx;
956 
957 	sha1_init(&ctx->sha_ctx);
958 
959 	return call_begin_blob(blob, &ctx->cbs);
960 }
961 
962 /*
963  * A continue_blob() implementation that continues calculating the SHA-1 message
964  * digest of the blob being read, then optionally passes the data on to another
965  * continue_blob() implementation.  This allows checking the SHA-1 message
966  * digest of a blob being extracted, for example.
967  */
968 static int
hasher_continue_blob(const struct blob_descriptor * blob,u64 offset,const void * chunk,size_t size,void * _ctx)969 hasher_continue_blob(const struct blob_descriptor *blob, u64 offset,
970 		     const void *chunk, size_t size, void *_ctx)
971 {
972 	struct hasher_context *ctx = _ctx;
973 
974 	sha1_update(&ctx->sha_ctx, chunk, size);
975 
976 	return call_continue_blob(blob, offset, chunk, size, &ctx->cbs);
977 }
978 
979 static int
report_sha1_mismatch_error(const struct blob_descriptor * blob,const u8 actual_hash[SHA1_HASH_SIZE])980 report_sha1_mismatch_error(const struct blob_descriptor *blob,
981 			   const u8 actual_hash[SHA1_HASH_SIZE])
982 {
983 	tchar expected_hashstr[SHA1_HASH_SIZE * 2 + 1];
984 	tchar actual_hashstr[SHA1_HASH_SIZE * 2 + 1];
985 
986 	wimlib_assert(blob->blob_location != BLOB_NONEXISTENT);
987 	wimlib_assert(blob->blob_location != BLOB_IN_ATTACHED_BUFFER);
988 
989 	sprint_hash(blob->hash, expected_hashstr);
990 	sprint_hash(actual_hash, actual_hashstr);
991 
992 	if (blob_is_in_file(blob)) {
993 		ERROR("A file was concurrently modified!\n"
994 		      "        Path: \"%"TS"\"\n"
995 		      "        Expected SHA-1: %"TS"\n"
996 		      "        Actual SHA-1: %"TS"\n",
997 		      blob_file_path(blob), expected_hashstr, actual_hashstr);
998 		return WIMLIB_ERR_CONCURRENT_MODIFICATION_DETECTED;
999 	} else if (blob->blob_location == BLOB_IN_WIM) {
1000 		const struct wim_resource_descriptor *rdesc = blob->rdesc;
1001 		ERROR("A WIM resource is corrupted!\n"
1002 		      "        WIM file: \"%"TS"\"\n"
1003 		      "        Blob uncompressed size: %"PRIu64"\n"
1004 		      "        Resource offset in WIM: %"PRIu64"\n"
1005 		      "        Resource uncompressed size: %"PRIu64"\n"
1006 		      "        Resource size in WIM: %"PRIu64"\n"
1007 		      "        Resource flags: 0x%x%"TS"\n"
1008 		      "        Resource compression type: %"TS"\n"
1009 		      "        Resource compression chunk size: %"PRIu32"\n"
1010 		      "        Expected SHA-1: %"TS"\n"
1011 		      "        Actual SHA-1: %"TS"\n",
1012 		      rdesc->wim->filename,
1013 		      blob->size,
1014 		      rdesc->offset_in_wim,
1015 		      rdesc->uncompressed_size,
1016 		      rdesc->size_in_wim,
1017 		      (unsigned int)rdesc->flags,
1018 		      (rdesc->is_pipable ? T(", pipable") : T("")),
1019 		      wimlib_get_compression_type_string(
1020 						rdesc->compression_type),
1021 		      rdesc->chunk_size,
1022 		      expected_hashstr, actual_hashstr);
1023 		return WIMLIB_ERR_INVALID_RESOURCE_HASH;
1024 	} else {
1025 		ERROR("File data was concurrently modified!\n"
1026 		      "        Location ID: %d\n"
1027 		      "        Expected SHA-1: %"TS"\n"
1028 		      "        Actual SHA-1: %"TS"\n",
1029 		      (int)blob->blob_location,
1030 		      expected_hashstr, actual_hashstr);
1031 		return WIMLIB_ERR_CONCURRENT_MODIFICATION_DETECTED;
1032 	}
1033 }
1034 
1035 /* Callback for finishing reading a blob while calculating its SHA-1 message
1036  * digest.  */
1037 static int
hasher_end_blob(struct blob_descriptor * blob,int status,void * _ctx)1038 hasher_end_blob(struct blob_descriptor *blob, int status, void *_ctx)
1039 {
1040 	struct hasher_context *ctx = _ctx;
1041 	u8 hash[SHA1_HASH_SIZE];
1042 	int ret;
1043 
1044 	if (unlikely(status)) {
1045 		/* Error occurred; the full blob may not have been read.  */
1046 		ret = status;
1047 		goto out_next_cb;
1048 	}
1049 
1050 	/* Retrieve the final SHA-1 message digest.  */
1051 	sha1_final(hash, &ctx->sha_ctx);
1052 
1053 	/* Set the SHA-1 message digest of the blob, or compare the calculated
1054 	 * value with stored value.  */
1055 	if (blob->unhashed) {
1056 		if (ctx->flags & COMPUTE_MISSING_BLOB_HASHES)
1057 			copy_hash(blob->hash, hash);
1058 	} else if ((ctx->flags & VERIFY_BLOB_HASHES) &&
1059 		   unlikely(!hashes_equal(hash, blob->hash)))
1060 	{
1061 		ret = report_sha1_mismatch_error(blob, hash);
1062 		goto out_next_cb;
1063 	}
1064 	ret = 0;
1065 out_next_cb:
1066 	return call_end_blob(blob, ret, &ctx->cbs);
1067 }
1068 
1069 /* Read the full data of the specified blob, passing the data into the specified
1070  * callbacks (all of which are optional) and either checking or computing the
1071  * SHA-1 message digest of the blob.  */
1072 int
read_blob_with_sha1(struct blob_descriptor * blob,const struct read_blob_callbacks * cbs)1073 read_blob_with_sha1(struct blob_descriptor *blob,
1074 		    const struct read_blob_callbacks *cbs)
1075 {
1076 	struct hasher_context hasher_ctx = {
1077 		.flags = VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES,
1078 		.cbs = *cbs,
1079 	};
1080 	struct read_blob_callbacks hasher_cbs = {
1081 		.begin_blob	= hasher_begin_blob,
1082 		.continue_blob	= hasher_continue_blob,
1083 		.end_blob	= hasher_end_blob,
1084 		.ctx		= &hasher_ctx,
1085 	};
1086 	return read_blob_with_cbs(blob, &hasher_cbs);
1087 }
1088 
1089 static int
read_blobs_in_solid_resource(struct blob_descriptor * first_blob,struct blob_descriptor * last_blob,size_t blob_count,size_t list_head_offset,const struct read_blob_callbacks * sink_cbs)1090 read_blobs_in_solid_resource(struct blob_descriptor *first_blob,
1091 			     struct blob_descriptor *last_blob,
1092 			     size_t blob_count,
1093 			     size_t list_head_offset,
1094 			     const struct read_blob_callbacks *sink_cbs)
1095 {
1096 	struct data_range *ranges;
1097 	bool ranges_malloced;
1098 	struct blob_descriptor *cur_blob;
1099 	size_t i;
1100 	int ret;
1101 	u64 ranges_alloc_size;
1102 
1103 	/* Setup data ranges array (one range per blob to read); this way
1104 	 * read_compressed_wim_resource() does not need to be aware of blobs.
1105 	 */
1106 
1107 	ranges_alloc_size = (u64)blob_count * sizeof(ranges[0]);
1108 
1109 	if (unlikely((size_t)ranges_alloc_size != ranges_alloc_size))
1110 		goto oom;
1111 
1112 	if (ranges_alloc_size <= STACK_MAX) {
1113 		ranges = alloca(ranges_alloc_size);
1114 		ranges_malloced = false;
1115 	} else {
1116 		ranges = MALLOC(ranges_alloc_size);
1117 		if (unlikely(!ranges))
1118 			goto oom;
1119 		ranges_malloced = true;
1120 	}
1121 
1122 	for (i = 0, cur_blob = first_blob;
1123 	     i < blob_count;
1124 	     i++, cur_blob = next_blob(cur_blob, list_head_offset))
1125 	{
1126 		ranges[i].offset = cur_blob->offset_in_res;
1127 		ranges[i].size = cur_blob->size;
1128 	}
1129 
1130 	struct blobifier_context blobifier_ctx = {
1131 		.cbs			= *sink_cbs,
1132 		.cur_blob		= first_blob,
1133 		.next_blob		= next_blob(first_blob, list_head_offset),
1134 		.cur_blob_offset	= 0,
1135 		.final_blob		= last_blob,
1136 		.list_head_offset	= list_head_offset,
1137 	};
1138 	struct consume_chunk_callback cb = {
1139 		.func	= blobifier_cb,
1140 		.ctx	= &blobifier_ctx,
1141 	};
1142 
1143 	ret = read_compressed_wim_resource(first_blob->rdesc, ranges,
1144 					   blob_count, &cb);
1145 
1146 	if (ranges_malloced)
1147 		FREE(ranges);
1148 
1149 	if (unlikely(ret && blobifier_ctx.cur_blob_offset != 0)) {
1150 		ret = call_end_blob(blobifier_ctx.cur_blob, ret,
1151 				    &blobifier_ctx.cbs);
1152 	}
1153 	return ret;
1154 
1155 oom:
1156 	ERROR("Too many blobs in one resource!");
1157 	return WIMLIB_ERR_NOMEM;
1158 }
1159 
1160 /*
1161  * Read a list of blobs, each of which may be in any supported location (e.g.
1162  * in a WIM or in an external file).  This function optimizes the case where
1163  * multiple blobs are combined into a single solid compressed WIM resource by
1164  * reading the blobs in sequential order, only decompressing the solid resource
1165  * one time.
1166  *
1167  * @blob_list
1168  *	List of blobs to read.
1169  * @list_head_offset
1170  *	Offset of the `struct list_head' within each `struct blob_descriptor'
1171  *	that makes up the @blob_list.
1172  * @cbs
1173  *	Callback functions to accept the blob data.
1174  * @flags
1175  *	Bitwise OR of zero or more of the following flags:
1176  *
1177  *	VERIFY_BLOB_HASHES:
1178  *		For all blobs being read that have already had SHA-1 message
1179  *		digests computed, calculate the SHA-1 message digest of the read
1180  *		data and compare it with the previously computed value.  If they
1181  *		do not match, return WIMLIB_ERR_INVALID_RESOURCE_HASH.
1182  *
1183  *	COMPUTE_MISSING_BLOB_HASHES
1184  *		For all blobs being read that have not yet had their SHA-1
1185  *		message digests computed, calculate and save their SHA-1 message
1186  *		digests.
1187  *
1188  *	BLOB_LIST_ALREADY_SORTED
1189  *		@blob_list is already sorted in sequential order for reading.
1190  *
1191  * The callback functions are allowed to delete the current blob from the list
1192  * if necessary.
1193  *
1194  * Returns 0 on success; a nonzero error code on failure.  Failure can occur due
1195  * to an error reading the data or due to an error status being returned by any
1196  * of the callback functions.
1197  */
1198 int
read_blob_list(struct list_head * blob_list,size_t list_head_offset,const struct read_blob_callbacks * cbs,int flags)1199 read_blob_list(struct list_head *blob_list, size_t list_head_offset,
1200 	       const struct read_blob_callbacks *cbs, int flags)
1201 {
1202 	int ret;
1203 	struct list_head *cur, *next;
1204 	struct blob_descriptor *blob;
1205 	struct hasher_context *hasher_ctx;
1206 	struct read_blob_callbacks *sink_cbs;
1207 
1208 	if (!(flags & BLOB_LIST_ALREADY_SORTED)) {
1209 		ret = sort_blob_list_by_sequential_order(blob_list,
1210 							 list_head_offset);
1211 		if (ret)
1212 			return ret;
1213 	}
1214 
1215 	if (flags & (VERIFY_BLOB_HASHES | COMPUTE_MISSING_BLOB_HASHES)) {
1216 		hasher_ctx = alloca(sizeof(*hasher_ctx));
1217 		*hasher_ctx = (struct hasher_context) {
1218 			.flags	= flags,
1219 			.cbs	= *cbs,
1220 		};
1221 		sink_cbs = alloca(sizeof(*sink_cbs));
1222 		*sink_cbs = (struct read_blob_callbacks) {
1223 			.begin_blob	= hasher_begin_blob,
1224 			.continue_blob	= hasher_continue_blob,
1225 			.end_blob	= hasher_end_blob,
1226 			.ctx		= hasher_ctx,
1227 		};
1228 	} else {
1229 		sink_cbs = (struct read_blob_callbacks *)cbs;
1230 	}
1231 
1232 	for (cur = blob_list->next, next = cur->next;
1233 	     cur != blob_list;
1234 	     cur = next, next = cur->next)
1235 	{
1236 		blob = (struct blob_descriptor*)((u8*)cur - list_head_offset);
1237 
1238 		if (blob->blob_location == BLOB_IN_WIM &&
1239 		    blob->size != blob->rdesc->uncompressed_size)
1240 		{
1241 			struct blob_descriptor *blob_next, *blob_last;
1242 			struct list_head *next2;
1243 			size_t blob_count;
1244 
1245 			/* The next blob is a proper sub-sequence of a WIM
1246 			 * resource.  See if there are other blobs in the same
1247 			 * resource that need to be read.  Since
1248 			 * sort_blob_list_by_sequential_order() sorted the blobs
1249 			 * by offset in the WIM, this can be determined by
1250 			 * simply scanning forward in the list.  */
1251 
1252 			blob_last = blob;
1253 			blob_count = 1;
1254 			for (next2 = next;
1255 			     next2 != blob_list
1256 			     && (blob_next = (struct blob_descriptor*)
1257 						((u8*)next2 - list_head_offset),
1258 				 blob_next->blob_location == BLOB_IN_WIM
1259 				 && blob_next->rdesc == blob->rdesc);
1260 			     next2 = next2->next)
1261 			{
1262 				blob_last = blob_next;
1263 				blob_count++;
1264 			}
1265 			if (blob_count > 1) {
1266 				/* Reading multiple blobs combined into a single
1267 				 * WIM resource.  They are in the blob list,
1268 				 * sorted by offset; @blob specifies the first
1269 				 * blob in the resource that needs to be read
1270 				 * and @blob_last specifies the last blob in the
1271 				 * resource that needs to be read.  */
1272 				next = next2;
1273 				ret = read_blobs_in_solid_resource(blob, blob_last,
1274 								   blob_count,
1275 								   list_head_offset,
1276 								   sink_cbs);
1277 				if (ret)
1278 					return ret;
1279 				continue;
1280 			}
1281 		}
1282 
1283 		ret = read_blob_with_cbs(blob, sink_cbs);
1284 		if (unlikely(ret && ret != BEGIN_BLOB_STATUS_SKIP_BLOB))
1285 			return ret;
1286 	}
1287 	return 0;
1288 }
1289 
1290 static int
extract_chunk_to_fd(const void * chunk,size_t size,void * _fd)1291 extract_chunk_to_fd(const void *chunk, size_t size, void *_fd)
1292 {
1293 	struct filedes *fd = _fd;
1294 	int ret = full_write(fd, chunk, size);
1295 	if (unlikely(ret))
1296 		ERROR_WITH_ERRNO("Error writing to file descriptor");
1297 	return ret;
1298 }
1299 
1300 static int
extract_blob_chunk_to_fd(const struct blob_descriptor * blob,u64 offset,const void * chunk,size_t size,void * _fd)1301 extract_blob_chunk_to_fd(const struct blob_descriptor *blob, u64 offset,
1302 			 const void *chunk, size_t size, void *_fd)
1303 {
1304 	return extract_chunk_to_fd(chunk, size, _fd);
1305 }
1306 
1307 /* Extract the first @size bytes of the specified blob to the specified file
1308  * descriptor.  This does *not* check the SHA-1 message digest.  */
1309 int
extract_blob_prefix_to_fd(struct blob_descriptor * blob,u64 size,struct filedes * fd)1310 extract_blob_prefix_to_fd(struct blob_descriptor *blob, u64 size,
1311 			  struct filedes *fd)
1312 {
1313 	struct consume_chunk_callback cb = {
1314 		.func	= extract_chunk_to_fd,
1315 		.ctx	= fd,
1316 	};
1317 	return read_blob_prefix(blob, size, &cb);
1318 }
1319 
1320 /* Extract the full uncompressed contents of the specified blob to the specified
1321  * file descriptor.  This checks the SHA-1 message digest.  */
1322 int
extract_blob_to_fd(struct blob_descriptor * blob,struct filedes * fd)1323 extract_blob_to_fd(struct blob_descriptor *blob, struct filedes *fd)
1324 {
1325 	struct read_blob_callbacks cbs = {
1326 		.continue_blob	= extract_blob_chunk_to_fd,
1327 		.ctx		= fd,
1328 	};
1329 	return read_blob_with_sha1(blob, &cbs);
1330 }
1331 
1332 /* Calculate the SHA-1 message digest of a blob and store it in @blob->hash.  */
1333 int
sha1_blob(struct blob_descriptor * blob)1334 sha1_blob(struct blob_descriptor *blob)
1335 {
1336 	static const struct read_blob_callbacks cbs = {
1337 	};
1338 	return read_blob_with_sha1(blob, &cbs);
1339 }
1340 
1341 /*
1342  * Convert a short WIM resource header to a stand-alone WIM resource descriptor.
1343  *
1344  * Note: for solid resources some fields still need to be overridden.
1345  */
1346 void
wim_reshdr_to_desc(const struct wim_reshdr * reshdr,WIMStruct * wim,struct wim_resource_descriptor * rdesc)1347 wim_reshdr_to_desc(const struct wim_reshdr *reshdr, WIMStruct *wim,
1348 		   struct wim_resource_descriptor *rdesc)
1349 {
1350 	rdesc->wim = wim;
1351 	rdesc->offset_in_wim = reshdr->offset_in_wim;
1352 	rdesc->size_in_wim = reshdr->size_in_wim;
1353 	rdesc->uncompressed_size = reshdr->uncompressed_size;
1354 	INIT_LIST_HEAD(&rdesc->blob_list);
1355 	rdesc->flags = reshdr->flags;
1356 	rdesc->is_pipable = wim_is_pipable(wim);
1357 	if (rdesc->flags & WIM_RESHDR_FLAG_COMPRESSED) {
1358 		rdesc->compression_type = wim->compression_type;
1359 		rdesc->chunk_size = wim->chunk_size;
1360 	} else {
1361 		rdesc->compression_type = WIMLIB_COMPRESSION_TYPE_NONE;
1362 		rdesc->chunk_size = 0;
1363 	}
1364 }
1365 
1366 /*
1367  * Convert the short WIM resource header @reshdr to a stand-alone WIM resource
1368  * descriptor @rdesc, then set @blob to consist of that entire resource.  This
1369  * should only be used for non-solid resources!
1370  */
1371 void
wim_reshdr_to_desc_and_blob(const struct wim_reshdr * reshdr,WIMStruct * wim,struct wim_resource_descriptor * rdesc,struct blob_descriptor * blob)1372 wim_reshdr_to_desc_and_blob(const struct wim_reshdr *reshdr, WIMStruct *wim,
1373 			    struct wim_resource_descriptor *rdesc,
1374 			    struct blob_descriptor *blob)
1375 {
1376 	wim_reshdr_to_desc(reshdr, wim, rdesc);
1377 	blob->size = rdesc->uncompressed_size;
1378 	blob_set_is_located_in_wim_resource(blob, rdesc, 0);
1379 }
1380 
1381 /* Import a WIM resource header from the on-disk format.  */
1382 void
get_wim_reshdr(const struct wim_reshdr_disk * disk_reshdr,struct wim_reshdr * reshdr)1383 get_wim_reshdr(const struct wim_reshdr_disk *disk_reshdr,
1384 	       struct wim_reshdr *reshdr)
1385 {
1386 	reshdr->offset_in_wim = le64_to_cpu(disk_reshdr->offset_in_wim);
1387 	reshdr->size_in_wim = (((u64)disk_reshdr->size_in_wim[0] <<  0) |
1388 			       ((u64)disk_reshdr->size_in_wim[1] <<  8) |
1389 			       ((u64)disk_reshdr->size_in_wim[2] << 16) |
1390 			       ((u64)disk_reshdr->size_in_wim[3] << 24) |
1391 			       ((u64)disk_reshdr->size_in_wim[4] << 32) |
1392 			       ((u64)disk_reshdr->size_in_wim[5] << 40) |
1393 			       ((u64)disk_reshdr->size_in_wim[6] << 48));
1394 	reshdr->uncompressed_size = le64_to_cpu(disk_reshdr->uncompressed_size);
1395 	reshdr->flags = disk_reshdr->flags;
1396 }
1397 
1398 /* Export a WIM resource header to the on-disk format.  */
1399 void
put_wim_reshdr(const struct wim_reshdr * reshdr,struct wim_reshdr_disk * disk_reshdr)1400 put_wim_reshdr(const struct wim_reshdr *reshdr,
1401 	       struct wim_reshdr_disk *disk_reshdr)
1402 {
1403 	disk_reshdr->size_in_wim[0] = reshdr->size_in_wim  >>  0;
1404 	disk_reshdr->size_in_wim[1] = reshdr->size_in_wim  >>  8;
1405 	disk_reshdr->size_in_wim[2] = reshdr->size_in_wim  >> 16;
1406 	disk_reshdr->size_in_wim[3] = reshdr->size_in_wim  >> 24;
1407 	disk_reshdr->size_in_wim[4] = reshdr->size_in_wim  >> 32;
1408 	disk_reshdr->size_in_wim[5] = reshdr->size_in_wim  >> 40;
1409 	disk_reshdr->size_in_wim[6] = reshdr->size_in_wim  >> 48;
1410 	disk_reshdr->flags = reshdr->flags;
1411 	disk_reshdr->offset_in_wim = cpu_to_le64(reshdr->offset_in_wim);
1412 	disk_reshdr->uncompressed_size = cpu_to_le64(reshdr->uncompressed_size);
1413 }
1414