xref: /freebsd/sys/geom/uzip/g_uzip.c (revision 148a8da8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004 Max Khon
5  * Copyright (c) 2014 Juniper Networks, Inc.
6  * Copyright (c) 2006-2016 Maxim Sobolev <sobomax@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/bio.h>
36 #include <sys/endian.h>
37 #include <sys/errno.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
43 #include <sys/systm.h>
44 #include <sys/kthread.h>
45 
46 #include <geom/geom.h>
47 
48 #include <geom/uzip/g_uzip.h>
49 #include <geom/uzip/g_uzip_cloop.h>
50 #include <geom/uzip/g_uzip_softc.h>
51 #include <geom/uzip/g_uzip_dapi.h>
52 #include <geom/uzip/g_uzip_zlib.h>
53 #include <geom/uzip/g_uzip_lzma.h>
54 #include <geom/uzip/g_uzip_wrkthr.h>
55 
56 #include "opt_geom.h"
57 
58 MALLOC_DEFINE(M_GEOM_UZIP, "geom_uzip", "GEOM UZIP data structures");
59 
60 FEATURE(geom_uzip, "GEOM read-only compressed disks support");
61 
62 struct g_uzip_blk {
63         uint64_t offset;
64         uint32_t blen;
65         unsigned char last:1;
66         unsigned char padded:1;
67 #define BLEN_UNDEF      UINT32_MAX
68 };
69 
70 #ifndef ABS
71 #define	ABS(a)			((a) < 0 ? -(a) : (a))
72 #endif
73 
74 #define BLK_IN_RANGE(mcn, bcn, ilen)	\
75     (((bcn) != BLEN_UNDEF) && ( \
76 	((ilen) >= 0 && (mcn >= bcn) && (mcn <= ((intmax_t)(bcn) + (ilen)))) || \
77 	((ilen) < 0 && (mcn <= bcn) && (mcn >= ((intmax_t)(bcn) + (ilen)))) \
78     ))
79 
80 #ifdef GEOM_UZIP_DEBUG
81 # define GEOM_UZIP_DBG_DEFAULT	3
82 #else
83 # define GEOM_UZIP_DBG_DEFAULT	0
84 #endif
85 
86 #define	GUZ_DBG_ERR	1
87 #define	GUZ_DBG_INFO	2
88 #define	GUZ_DBG_IO	3
89 #define	GUZ_DBG_TOC	4
90 
91 #define	GUZ_DEV_SUFX	".uzip"
92 #define	GUZ_DEV_NAME(p)	(p GUZ_DEV_SUFX)
93 
94 static char g_uzip_attach_to[MAXPATHLEN] = {"*"};
95 static char g_uzip_noattach_to[MAXPATHLEN] = {GUZ_DEV_NAME("*")};
96 TUNABLE_STR("kern.geom.uzip.attach_to", g_uzip_attach_to,
97     sizeof(g_uzip_attach_to));
98 TUNABLE_STR("kern.geom.uzip.noattach_to", g_uzip_noattach_to,
99     sizeof(g_uzip_noattach_to));
100 
101 SYSCTL_DECL(_kern_geom);
102 SYSCTL_NODE(_kern_geom, OID_AUTO, uzip, CTLFLAG_RW, 0, "GEOM_UZIP stuff");
103 static u_int g_uzip_debug = GEOM_UZIP_DBG_DEFAULT;
104 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug, CTLFLAG_RWTUN, &g_uzip_debug, 0,
105     "Debug level (0-4)");
106 static u_int g_uzip_debug_block = BLEN_UNDEF;
107 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug_block, CTLFLAG_RWTUN,
108     &g_uzip_debug_block, 0, "Debug operations around specific cluster#");
109 
110 #define	DPRINTF(lvl, a)		\
111 	if ((lvl) <= g_uzip_debug) { \
112 		printf a; \
113 	}
114 #define	DPRINTF_BLK(lvl, cn, a)	\
115 	if ((lvl) <= g_uzip_debug || \
116 	    BLK_IN_RANGE(cn, g_uzip_debug_block, 8) || \
117 	    BLK_IN_RANGE(cn, g_uzip_debug_block, -8)) { \
118 		printf a; \
119 	}
120 #define	DPRINTF_BRNG(lvl, bcn, ecn, a) \
121 	KASSERT(bcn < ecn, ("DPRINTF_BRNG: invalid range (%ju, %ju)", \
122 	    (uintmax_t)bcn, (uintmax_t)ecn)); \
123 	if (((lvl) <= g_uzip_debug) || \
124 	    BLK_IN_RANGE(g_uzip_debug_block, bcn, \
125 	     (intmax_t)ecn - (intmax_t)bcn)) { \
126 		printf a; \
127 	}
128 
129 #define	UZIP_CLASS_NAME	"UZIP"
130 
131 /*
132  * Maximum allowed valid block size (to prevent foot-shooting)
133  */
134 #define	MAX_BLKSZ	(MAXPHYS)
135 
136 static char CLOOP_MAGIC_START[] = "#!/bin/sh\n";
137 
138 static void g_uzip_read_done(struct bio *bp);
139 static void g_uzip_do(struct g_uzip_softc *, struct bio *bp);
140 
141 static void
142 g_uzip_softc_free(struct g_uzip_softc *sc, struct g_geom *gp)
143 {
144 
145 	if (gp != NULL) {
146 		DPRINTF(GUZ_DBG_INFO, ("%s: %d requests, %d cached\n",
147 		    gp->name, sc->req_total, sc->req_cached));
148 	}
149 
150 	mtx_lock(&sc->queue_mtx);
151 	sc->wrkthr_flags |= GUZ_SHUTDOWN;
152 	wakeup(sc);
153 	while (!(sc->wrkthr_flags & GUZ_EXITING)) {
154 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "guzfree",
155 		    hz / 10);
156 	}
157 	mtx_unlock(&sc->queue_mtx);
158 
159 	sc->dcp->free(sc->dcp);
160 	free(sc->toc, M_GEOM_UZIP);
161 	mtx_destroy(&sc->queue_mtx);
162 	mtx_destroy(&sc->last_mtx);
163 	free(sc->last_buf, M_GEOM_UZIP);
164 	free(sc, M_GEOM_UZIP);
165 }
166 
167 static int
168 g_uzip_cached(struct g_geom *gp, struct bio *bp)
169 {
170 	struct g_uzip_softc *sc;
171 	off_t ofs;
172 	size_t blk, blkofs, usz;
173 
174 	sc = gp->softc;
175 	ofs = bp->bio_offset + bp->bio_completed;
176 	blk = ofs / sc->blksz;
177 	mtx_lock(&sc->last_mtx);
178 	if (blk == sc->last_blk) {
179 		blkofs = ofs % sc->blksz;
180 		usz = sc->blksz - blkofs;
181 		if (bp->bio_resid < usz)
182 			usz = bp->bio_resid;
183 		memcpy(bp->bio_data + bp->bio_completed, sc->last_buf + blkofs,
184 		    usz);
185 		sc->req_cached++;
186 		mtx_unlock(&sc->last_mtx);
187 
188 		DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: offset=%jd: got %jd bytes "
189 		    "from cache\n", __func__, gp->name, bp, (intmax_t)ofs,
190 		    (intmax_t)usz));
191 
192 		bp->bio_completed += usz;
193 		bp->bio_resid -= usz;
194 
195 		if (bp->bio_resid == 0) {
196 			g_io_deliver(bp, 0);
197 			return (1);
198 		}
199 	} else
200 		mtx_unlock(&sc->last_mtx);
201 
202 	return (0);
203 }
204 
205 #define BLK_ENDS(sc, bi)	((sc)->toc[(bi)].offset + \
206     (sc)->toc[(bi)].blen)
207 
208 #define BLK_IS_CONT(sc, bi)	(BLK_ENDS((sc), (bi) - 1) == \
209     (sc)->toc[(bi)].offset)
210 #define	BLK_IS_NIL(sc, bi)	((sc)->toc[(bi)].blen == 0)
211 
212 #define TOFF_2_BOFF(sc, pp, bi)	    ((sc)->toc[(bi)].offset - \
213     (sc)->toc[(bi)].offset % (pp)->sectorsize)
214 #define	TLEN_2_BLEN(sc, pp, bp, ei) roundup(BLK_ENDS((sc), (ei)) - \
215     (bp)->bio_offset, (pp)->sectorsize)
216 
217 static int
218 g_uzip_request(struct g_geom *gp, struct bio *bp)
219 {
220 	struct g_uzip_softc *sc;
221 	struct bio *bp2;
222 	struct g_consumer *cp;
223 	struct g_provider *pp;
224 	off_t ofs, start_blk_ofs;
225 	size_t i, start_blk, end_blk, zsize;
226 
227 	if (g_uzip_cached(gp, bp) != 0)
228 		return (1);
229 
230 	sc = gp->softc;
231 
232 	cp = LIST_FIRST(&gp->consumer);
233 	pp = cp->provider;
234 
235 	ofs = bp->bio_offset + bp->bio_completed;
236 	start_blk = ofs / sc->blksz;
237 	KASSERT(start_blk < sc->nblocks, ("start_blk out of range"));
238 	end_blk = howmany(ofs + bp->bio_resid, sc->blksz);
239 	KASSERT(end_blk <= sc->nblocks, ("end_blk out of range"));
240 
241 	for (; BLK_IS_NIL(sc, start_blk) && start_blk < end_blk; start_blk++) {
242 		/* Fill in any leading Nil blocks */
243 		start_blk_ofs = ofs % sc->blksz;
244 		zsize = MIN(sc->blksz - start_blk_ofs, bp->bio_resid);
245 		DPRINTF_BLK(GUZ_DBG_IO, start_blk, ("%s/%s: %p/%ju: "
246 		    "filling %ju zero bytes\n", __func__, gp->name, gp,
247 		    (uintmax_t)bp->bio_completed, (uintmax_t)zsize));
248 		bzero(bp->bio_data + bp->bio_completed, zsize);
249 		bp->bio_completed += zsize;
250 		bp->bio_resid -= zsize;
251 		ofs += zsize;
252 	}
253 
254 	if (start_blk == end_blk) {
255 		KASSERT(bp->bio_resid == 0, ("bp->bio_resid is invalid"));
256 		/*
257 		 * No non-Nil data is left, complete request immediately.
258 		 */
259 		DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: all done returning %ju "
260 		    "bytes\n", __func__, gp->name, gp,
261 		    (uintmax_t)bp->bio_completed));
262 		g_io_deliver(bp, 0);
263 		return (1);
264 	}
265 
266 	for (i = start_blk + 1; i < end_blk; i++) {
267 		/* Trim discontinuous areas if any */
268 		if (!BLK_IS_CONT(sc, i)) {
269 			end_blk = i;
270 			break;
271 		}
272 	}
273 
274 	DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: "
275 	    "start=%u (%ju[%jd]), end=%u (%ju)\n", __func__, gp->name, bp,
276 	    (u_int)start_blk, (uintmax_t)sc->toc[start_blk].offset,
277 	    (intmax_t)sc->toc[start_blk].blen,
278 	    (u_int)end_blk, (uintmax_t)BLK_ENDS(sc, end_blk - 1)));
279 
280 	bp2 = g_clone_bio(bp);
281 	if (bp2 == NULL) {
282 		g_io_deliver(bp, ENOMEM);
283 		return (1);
284 	}
285 	bp2->bio_done = g_uzip_read_done;
286 
287 	bp2->bio_offset = TOFF_2_BOFF(sc, pp, start_blk);
288 	while (1) {
289 		bp2->bio_length = TLEN_2_BLEN(sc, pp, bp2, end_blk - 1);
290 		if (bp2->bio_length <= MAXPHYS) {
291 			break;
292 		}
293 		if (end_blk == (start_blk + 1)) {
294 			break;
295 		}
296 		end_blk--;
297 	}
298 
299 	DPRINTF(GUZ_DBG_IO, ("%s/%s: bp2->bio_length = %jd, "
300 	    "bp2->bio_offset = %jd\n", __func__, gp->name,
301 	    (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset));
302 
303 	bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT);
304 	if (bp2->bio_data == NULL) {
305 		g_destroy_bio(bp2);
306 		g_io_deliver(bp, ENOMEM);
307 		return (1);
308 	}
309 
310 	DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: "
311 	    "reading %jd bytes from offset %jd\n", __func__, gp->name, bp,
312 	    (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset));
313 
314 	g_io_request(bp2, cp);
315 	return (0);
316 }
317 
318 static void
319 g_uzip_read_done(struct bio *bp)
320 {
321 	struct bio *bp2;
322 	struct g_geom *gp;
323 	struct g_uzip_softc *sc;
324 
325 	bp2 = bp->bio_parent;
326 	gp = bp2->bio_to->geom;
327 	sc = gp->softc;
328 
329 	mtx_lock(&sc->queue_mtx);
330 	bioq_disksort(&sc->bio_queue, bp);
331 	mtx_unlock(&sc->queue_mtx);
332 	wakeup(sc);
333 }
334 
335 static int
336 g_uzip_memvcmp(const void *memory, unsigned char val, size_t size)
337 {
338 	const u_char *mm;
339 
340 	mm = (const u_char *)memory;
341 	return (*mm == val) && memcmp(mm, mm + 1, size - 1) == 0;
342 }
343 
344 static void
345 g_uzip_do(struct g_uzip_softc *sc, struct bio *bp)
346 {
347 	struct bio *bp2;
348 	struct g_provider *pp;
349 	struct g_consumer *cp;
350 	struct g_geom *gp;
351 	char *data, *data2;
352 	off_t ofs;
353 	size_t blk, blkofs, len, ulen, firstblk;
354 	int err;
355 
356 	bp2 = bp->bio_parent;
357 	gp = bp2->bio_to->geom;
358 
359 	cp = LIST_FIRST(&gp->consumer);
360 	pp = cp->provider;
361 
362 	bp2->bio_error = bp->bio_error;
363 	if (bp2->bio_error != 0)
364 		goto done;
365 
366 	/* Make sure there's forward progress. */
367 	if (bp->bio_completed == 0) {
368 		bp2->bio_error = ECANCELED;
369 		goto done;
370 	}
371 
372 	ofs = bp2->bio_offset + bp2->bio_completed;
373 	firstblk = blk = ofs / sc->blksz;
374 	blkofs = ofs % sc->blksz;
375 	data = bp->bio_data + sc->toc[blk].offset % pp->sectorsize;
376 	data2 = bp2->bio_data + bp2->bio_completed;
377 	while (bp->bio_completed && bp2->bio_resid) {
378 		if (blk > firstblk && !BLK_IS_CONT(sc, blk)) {
379 			DPRINTF_BLK(GUZ_DBG_IO, blk, ("%s/%s: %p: backref'ed "
380 			    "cluster #%u requested, looping around\n",
381 			    __func__, gp->name, bp2, (u_int)blk));
382 			goto done;
383 		}
384 		ulen = MIN(sc->blksz - blkofs, bp2->bio_resid);
385 		len = sc->toc[blk].blen;
386 		DPRINTF(GUZ_DBG_IO, ("%s/%s: %p/%ju: data2=%p, ulen=%u, "
387 		    "data=%p, len=%u\n", __func__, gp->name, gp,
388 		    bp->bio_completed, data2, (u_int)ulen, data, (u_int)len));
389 		if (len == 0) {
390 			/* All zero block: no cache update */
391 zero_block:
392 			bzero(data2, ulen);
393 		} else if (len <= bp->bio_completed) {
394 			mtx_lock(&sc->last_mtx);
395 			err = sc->dcp->decompress(sc->dcp, gp->name, data,
396 			    len, sc->last_buf);
397 			if (err != 0 && sc->toc[blk].last != 0) {
398 				/*
399 				 * Last block decompression has failed, check
400 				 * if it's just zero padding.
401 				 */
402 				if (g_uzip_memvcmp(data, '\0', len) == 0) {
403 					sc->toc[blk].blen = 0;
404 					sc->last_blk = -1;
405 					mtx_unlock(&sc->last_mtx);
406 					len = 0;
407 					goto zero_block;
408 				}
409 			}
410 			if (err != 0) {
411 				sc->last_blk = -1;
412 				mtx_unlock(&sc->last_mtx);
413 				bp2->bio_error = EILSEQ;
414 				DPRINTF(GUZ_DBG_ERR, ("%s/%s: decompress"
415 				    "(%p, %ju, %ju) failed\n", __func__,
416 				    gp->name, sc->dcp, (uintmax_t)blk,
417 				    (uintmax_t)len));
418 				goto done;
419 			}
420 			sc->last_blk = blk;
421 			memcpy(data2, sc->last_buf + blkofs, ulen);
422 			mtx_unlock(&sc->last_mtx);
423 			err = sc->dcp->rewind(sc->dcp, gp->name);
424 			if (err != 0) {
425 				bp2->bio_error = EILSEQ;
426 				DPRINTF(GUZ_DBG_ERR, ("%s/%s: rewind(%p) "
427 				    "failed\n", __func__, gp->name, sc->dcp));
428 				goto done;
429 			}
430 			data += len;
431 		} else
432 			break;
433 
434 		data2 += ulen;
435 		bp2->bio_completed += ulen;
436 		bp2->bio_resid -= ulen;
437 		bp->bio_completed -= len;
438 		blkofs = 0;
439 		blk++;
440 	}
441 
442 done:
443 	/* Finish processing the request. */
444 	free(bp->bio_data, M_GEOM_UZIP);
445 	g_destroy_bio(bp);
446 	if (bp2->bio_error != 0 || bp2->bio_resid == 0)
447 		g_io_deliver(bp2, bp2->bio_error);
448 	else
449 		g_uzip_request(gp, bp2);
450 }
451 
452 static void
453 g_uzip_start(struct bio *bp)
454 {
455 	struct g_provider *pp;
456 	struct g_geom *gp;
457 	struct g_uzip_softc *sc;
458 
459 	pp = bp->bio_to;
460 	gp = pp->geom;
461 
462 	DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: cmd=%d, offset=%jd, length=%jd, "
463 	    "buffer=%p\n", __func__, gp->name, bp, bp->bio_cmd,
464 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length, bp->bio_data));
465 
466 	sc = gp->softc;
467 	sc->req_total++;
468 
469 	if (bp->bio_cmd == BIO_GETATTR) {
470 		struct bio *bp2;
471 		struct g_consumer *cp;
472 		struct g_geom *gp;
473 		struct g_provider *pp;
474 
475 		/* pass on MNT:* requests and ignore others */
476 		if (strncmp(bp->bio_attribute, "MNT:", 4) == 0) {
477 			bp2 = g_clone_bio(bp);
478 			if (bp2 == NULL) {
479 				g_io_deliver(bp, ENOMEM);
480 				return;
481 			}
482 			bp2->bio_done = g_std_done;
483 			pp = bp->bio_to;
484 			gp = pp->geom;
485 			cp = LIST_FIRST(&gp->consumer);
486 			g_io_request(bp2, cp);
487 			return;
488 		}
489 	}
490 	if (bp->bio_cmd != BIO_READ) {
491 		g_io_deliver(bp, EOPNOTSUPP);
492 		return;
493 	}
494 
495 	bp->bio_resid = bp->bio_length;
496 	bp->bio_completed = 0;
497 
498 	g_uzip_request(gp, bp);
499 }
500 
501 static void
502 g_uzip_orphan(struct g_consumer *cp)
503 {
504 	struct g_geom *gp;
505 
506 	g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, cp->provider->name);
507 	g_topology_assert();
508 
509 	gp = cp->geom;
510 	g_uzip_softc_free(gp->softc, gp);
511 	gp->softc = NULL;
512 	g_wither_geom(gp, ENXIO);
513 }
514 
515 static int
516 g_uzip_access(struct g_provider *pp, int dr, int dw, int de)
517 {
518 	struct g_geom *gp;
519 	struct g_consumer *cp;
520 
521 	gp = pp->geom;
522 	cp = LIST_FIRST(&gp->consumer);
523 	KASSERT (cp != NULL, ("g_uzip_access but no consumer"));
524 
525 	if (cp->acw + dw > 0)
526 		return (EROFS);
527 
528 	return (g_access(cp, dr, dw, de));
529 }
530 
531 static void
532 g_uzip_spoiled(struct g_consumer *cp)
533 {
534 	struct g_geom *gp;
535 
536 	G_VALID_CONSUMER(cp);
537 	gp = cp->geom;
538 	g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, gp->name);
539 	g_topology_assert();
540 
541 	g_uzip_softc_free(gp->softc, gp);
542 	gp->softc = NULL;
543 	g_wither_geom(gp, ENXIO);
544 }
545 
546 static int
547 g_uzip_parse_toc(struct g_uzip_softc *sc, struct g_provider *pp,
548     struct g_geom *gp)
549 {
550 	uint32_t i, j, backref_to;
551 	uint64_t max_offset, min_offset;
552 	struct g_uzip_blk *last_blk;
553 
554 	min_offset = sizeof(struct cloop_header) +
555 	    (sc->nblocks + 1) * sizeof(uint64_t);
556 	max_offset = sc->toc[0].offset - 1;
557 	last_blk = &sc->toc[0];
558 	for (i = 0; i < sc->nblocks; i++) {
559 		/* First do some bounds checking */
560 		if ((sc->toc[i].offset < min_offset) ||
561 		    (sc->toc[i].offset > pp->mediasize)) {
562 			goto error_offset;
563 		}
564 		DPRINTF_BLK(GUZ_DBG_IO, i, ("%s: cluster #%u "
565 		    "offset=%ju max_offset=%ju\n", gp->name,
566 		    (u_int)i, (uintmax_t)sc->toc[i].offset,
567 		    (uintmax_t)max_offset));
568 		backref_to = BLEN_UNDEF;
569 		if (sc->toc[i].offset < max_offset) {
570 			/*
571 			 * For the backref'ed blocks search already parsed
572 			 * TOC entries for the matching offset and copy the
573 			 * size from matched entry.
574 			 */
575 			for (j = 0; j <= i; j++) {
576                                 if (sc->toc[j].offset == sc->toc[i].offset &&
577 				    !BLK_IS_NIL(sc, j)) {
578                                         break;
579                                 }
580                                 if (j != i) {
581 					continue;
582 				}
583 				DPRINTF(GUZ_DBG_ERR, ("%s: cannot match "
584 				    "backref'ed offset at cluster #%u\n",
585 				    gp->name, i));
586 				return (-1);
587 			}
588 			sc->toc[i].blen = sc->toc[j].blen;
589 			backref_to = j;
590 		} else {
591 			last_blk = &sc->toc[i];
592 			/*
593 			 * For the "normal blocks" seek forward until we hit
594 			 * block whose offset is larger than ours and assume
595 			 * it's going to be the next one.
596 			 */
597 			for (j = i + 1; j < sc->nblocks; j++) {
598 				if (sc->toc[j].offset > max_offset) {
599 					break;
600 				}
601 			}
602 			sc->toc[i].blen = sc->toc[j].offset -
603 			    sc->toc[i].offset;
604 			if (BLK_ENDS(sc, i) > pp->mediasize) {
605 				DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u "
606 				    "extends past media boundary (%ju > %ju)\n",
607 				    gp->name, (u_int)i,
608 				    (uintmax_t)BLK_ENDS(sc, i),
609 				    (intmax_t)pp->mediasize));
610 				return (-1);
611 			}
612 			KASSERT(max_offset <= sc->toc[i].offset, (
613 			    "%s: max_offset is incorrect: %ju",
614 			    gp->name, (uintmax_t)max_offset));
615 			max_offset = BLK_ENDS(sc, i) - 1;
616 		}
617 		DPRINTF_BLK(GUZ_DBG_TOC, i, ("%s: cluster #%u, original %u "
618 		    "bytes, in %u bytes", gp->name, i, sc->blksz,
619 		    sc->toc[i].blen));
620 		if (backref_to != BLEN_UNDEF) {
621 			DPRINTF_BLK(GUZ_DBG_TOC, i, (" (->#%u)",
622 			    (u_int)backref_to));
623 		}
624 		DPRINTF_BLK(GUZ_DBG_TOC, i, ("\n"));
625 	}
626 	last_blk->last = 1;
627 	/* Do a second pass to validate block lengths */
628 	for (i = 0; i < sc->nblocks; i++) {
629 		if (sc->toc[i].blen > sc->dcp->max_blen) {
630 			if (sc->toc[i].last == 0) {
631 				DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u "
632 				    "length (%ju) exceeds "
633 				    "max_blen (%ju)\n", gp->name, i,
634 				    (uintmax_t)sc->toc[i].blen,
635 				    (uintmax_t)sc->dcp->max_blen));
636 				return (-1);
637 			}
638 			DPRINTF(GUZ_DBG_INFO, ("%s: cluster #%u extra "
639 			    "padding is detected, trimmed to %ju\n",
640 			    gp->name, i, (uintmax_t)sc->dcp->max_blen));
641 			    sc->toc[i].blen = sc->dcp->max_blen;
642 			sc->toc[i].padded = 1;
643 		}
644 	}
645 	return (0);
646 
647 error_offset:
648 	DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u: invalid offset %ju, "
649 	    "min_offset=%ju mediasize=%jd\n", gp->name, (u_int)i,
650 	    sc->toc[i].offset, min_offset, pp->mediasize));
651 	return (-1);
652 }
653 
654 static struct g_geom *
655 g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags)
656 {
657 	int error;
658 	uint32_t i, total_offsets, offsets_read, blk;
659 	void *buf;
660 	struct cloop_header *header;
661 	struct g_consumer *cp;
662 	struct g_geom *gp;
663 	struct g_provider *pp2;
664 	struct g_uzip_softc *sc;
665 	enum {
666 		G_UZIP = 1,
667 		G_ULZMA
668 	} type;
669 
670 	g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name);
671 	g_topology_assert();
672 
673 	/* Skip providers that are already open for writing. */
674 	if (pp->acw > 0)
675 		return (NULL);
676 
677 	if ((fnmatch(g_uzip_attach_to, pp->name, 0) != 0) ||
678 	    (fnmatch(g_uzip_noattach_to, pp->name, 0) == 0)) {
679 		DPRINTF(GUZ_DBG_INFO, ("%s(%s,%s), ignoring\n", __func__,
680 		    mp->name, pp->name));
681 		return (NULL);
682 	}
683 
684 	buf = NULL;
685 
686 	/*
687 	 * Create geom instance.
688 	 */
689 	gp = g_new_geomf(mp, GUZ_DEV_NAME("%s"), pp->name);
690 	cp = g_new_consumer(gp);
691 	error = g_attach(cp, pp);
692 	if (error == 0)
693 		error = g_access(cp, 1, 0, 0);
694 	if (error) {
695 		goto e1;
696 	}
697 	g_topology_unlock();
698 
699 	/*
700 	 * Read cloop header, look for CLOOP magic, perform
701 	 * other validity checks.
702 	 */
703 	DPRINTF(GUZ_DBG_INFO, ("%s: media sectorsize %u, mediasize %jd\n",
704 	    gp->name, pp->sectorsize, (intmax_t)pp->mediasize));
705 	buf = g_read_data(cp, 0, pp->sectorsize, NULL);
706 	if (buf == NULL)
707 		goto e2;
708 	header = (struct cloop_header *) buf;
709 	if (strncmp(header->magic, CLOOP_MAGIC_START,
710 	    sizeof(CLOOP_MAGIC_START) - 1) != 0) {
711 		DPRINTF(GUZ_DBG_ERR, ("%s: no CLOOP magic\n", gp->name));
712 		goto e3;
713 	}
714 
715 	switch (header->magic[CLOOP_OFS_COMPR]) {
716 	case CLOOP_COMP_LZMA:
717 	case CLOOP_COMP_LZMA_DDP:
718 		type = G_ULZMA;
719 		if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_LZMA) {
720 			DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
721 			    gp->name));
722 			goto e3;
723 		}
724 		DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_LZMA image found\n",
725 		    gp->name));
726 		break;
727 	case CLOOP_COMP_LIBZ:
728 	case CLOOP_COMP_LIBZ_DDP:
729 		type = G_UZIP;
730 		if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_ZLIB) {
731 			DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
732 			    gp->name));
733 			goto e3;
734 		}
735 		DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZLIB image found\n",
736 		    gp->name));
737 		break;
738 	default:
739 		DPRINTF(GUZ_DBG_ERR, ("%s: unsupported image type\n",
740 		    gp->name));
741                 goto e3;
742         }
743 
744 	/*
745 	 * Initialize softc and read offsets.
746 	 */
747 	sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO);
748 	gp->softc = sc;
749 	sc->blksz = ntohl(header->blksz);
750 	sc->nblocks = ntohl(header->nblocks);
751 	if (sc->blksz % 512 != 0) {
752 		printf("%s: block size (%u) should be multiple of 512.\n",
753 		    gp->name, sc->blksz);
754 		goto e4;
755 	}
756 	if (sc->blksz > MAX_BLKSZ) {
757 		printf("%s: block size (%u) should not be larger than %d.\n",
758 		    gp->name, sc->blksz, MAX_BLKSZ);
759 	}
760 	total_offsets = sc->nblocks + 1;
761 	if (sizeof(struct cloop_header) +
762 	    total_offsets * sizeof(uint64_t) > pp->mediasize) {
763 		printf("%s: media too small for %u blocks\n",
764 		    gp->name, sc->nblocks);
765 		goto e4;
766 	}
767 	sc->toc = malloc(total_offsets * sizeof(struct g_uzip_blk),
768 	    M_GEOM_UZIP, M_WAITOK | M_ZERO);
769 	offsets_read = MIN(total_offsets,
770 	    (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t));
771 	for (i = 0; i < offsets_read; i++) {
772 		sc->toc[i].offset = be64toh(((uint64_t *) (header + 1))[i]);
773 		sc->toc[i].blen = BLEN_UNDEF;
774 	}
775 	DPRINTF(GUZ_DBG_INFO, ("%s: %u offsets in the first sector\n",
776 	       gp->name, offsets_read));
777 	for (blk = 1; offsets_read < total_offsets; blk++) {
778 		uint32_t nread;
779 
780 		free(buf, M_GEOM);
781 		buf = g_read_data(
782 		    cp, blk * pp->sectorsize, pp->sectorsize, NULL);
783 		if (buf == NULL)
784 			goto e5;
785 		nread = MIN(total_offsets - offsets_read,
786 		     pp->sectorsize / sizeof(uint64_t));
787 		DPRINTF(GUZ_DBG_TOC, ("%s: %u offsets read from sector %d\n",
788 		    gp->name, nread, blk));
789 		for (i = 0; i < nread; i++) {
790 			sc->toc[offsets_read + i].offset =
791 			    be64toh(((uint64_t *) buf)[i]);
792 			sc->toc[offsets_read + i].blen = BLEN_UNDEF;
793 		}
794 		offsets_read += nread;
795 	}
796 	free(buf, M_GEOM);
797 	buf = NULL;
798 	offsets_read -= 1;
799 	DPRINTF(GUZ_DBG_INFO, ("%s: done reading %u block offsets from %u "
800 	    "sectors\n", gp->name, offsets_read, blk));
801 	if (sc->nblocks != offsets_read) {
802 		DPRINTF(GUZ_DBG_ERR, ("%s: read %s offsets than expected "
803 		    "blocks\n", gp->name,
804 		    sc->nblocks < offsets_read ? "more" : "less"));
805 		goto e5;
806 	}
807 
808 	if (type == G_UZIP) {
809 		sc->dcp = g_uzip_zlib_ctor(sc->blksz);
810 	} else {
811 		sc->dcp = g_uzip_lzma_ctor(sc->blksz);
812 	}
813 	if (sc->dcp == NULL) {
814 		goto e5;
815 	}
816 
817 	/*
818 	 * "Fake" last+1 block, to make it easier for the TOC parser to
819 	 * iterate without making the last element a special case.
820 	 */
821 	sc->toc[sc->nblocks].offset = pp->mediasize;
822 	/* Massage TOC (table of contents), make sure it is sound */
823 	if (g_uzip_parse_toc(sc, pp, gp) != 0) {
824 		DPRINTF(GUZ_DBG_ERR, ("%s: TOC error\n", gp->name));
825 		goto e6;
826 	}
827 	mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF);
828 	mtx_init(&sc->queue_mtx, "geom_uzip wrkthread", NULL, MTX_DEF);
829 	bioq_init(&sc->bio_queue);
830 	sc->last_blk = -1;
831 	sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK);
832 	sc->req_total = 0;
833 	sc->req_cached = 0;
834 
835 	sc->uzip_do = &g_uzip_do;
836 
837 	error = kproc_create(g_uzip_wrkthr, sc, &sc->procp, 0, 0, "%s",
838 	    gp->name);
839 	if (error != 0) {
840 		goto e7;
841 	}
842 
843 	g_topology_lock();
844 	pp2 = g_new_providerf(gp, "%s", gp->name);
845 	pp2->sectorsize = 512;
846 	pp2->mediasize = (off_t)sc->nblocks * sc->blksz;
847 	pp2->stripesize = pp->stripesize;
848 	pp2->stripeoffset = pp->stripeoffset;
849 	g_error_provider(pp2, 0);
850 	g_access(cp, -1, 0, 0);
851 
852 	DPRINTF(GUZ_DBG_INFO, ("%s: taste ok (%d, %ju), (%ju, %ju), %x\n",
853 	    gp->name, pp2->sectorsize, (uintmax_t)pp2->mediasize,
854 	    (uintmax_t)pp2->stripeoffset, (uintmax_t)pp2->stripesize, pp2->flags));
855 	DPRINTF(GUZ_DBG_INFO, ("%s: %u x %u blocks\n", gp->name, sc->nblocks,
856 	    sc->blksz));
857 	return (gp);
858 
859 e7:
860 	free(sc->last_buf, M_GEOM);
861 	mtx_destroy(&sc->queue_mtx);
862 	mtx_destroy(&sc->last_mtx);
863 e6:
864 	sc->dcp->free(sc->dcp);
865 e5:
866 	free(sc->toc, M_GEOM);
867 e4:
868 	free(gp->softc, M_GEOM_UZIP);
869 e3:
870 	if (buf != NULL) {
871 		free(buf, M_GEOM);
872 	}
873 e2:
874 	g_topology_lock();
875 	g_access(cp, -1, 0, 0);
876 e1:
877 	g_detach(cp);
878 	g_destroy_consumer(cp);
879 	g_destroy_geom(gp);
880 
881 	return (NULL);
882 }
883 
884 static int
885 g_uzip_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
886 {
887 	struct g_provider *pp;
888 
889 	KASSERT(gp != NULL, ("NULL geom"));
890 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, gp->name);
891 	g_topology_assert();
892 
893 	if (gp->softc == NULL) {
894 		DPRINTF(GUZ_DBG_ERR, ("%s(%s): gp->softc == NULL\n", __func__,
895 		    gp->name));
896 		return (ENXIO);
897 	}
898 
899 	pp = LIST_FIRST(&gp->provider);
900 	KASSERT(pp != NULL, ("NULL provider"));
901 	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
902 		return (EBUSY);
903 
904 	g_uzip_softc_free(gp->softc, gp);
905 	gp->softc = NULL;
906 	g_wither_geom(gp, ENXIO);
907 
908 	return (0);
909 }
910 
911 static struct g_class g_uzip_class = {
912 	.name = UZIP_CLASS_NAME,
913 	.version = G_VERSION,
914 	.taste = g_uzip_taste,
915 	.destroy_geom = g_uzip_destroy_geom,
916 
917 	.start = g_uzip_start,
918 	.orphan = g_uzip_orphan,
919 	.access = g_uzip_access,
920 	.spoiled = g_uzip_spoiled,
921 };
922 
923 DECLARE_GEOM_CLASS(g_uzip_class, g_uzip);
924 MODULE_DEPEND(g_uzip, xz, 1, 1, 1);
925 MODULE_DEPEND(g_uzip, zlib, 1, 1, 1);
926 MODULE_VERSION(geom_uzip, 0);
927