FreeBSD/Linux Kernel Cross Reference
sys/geom/uzip/g_uzip.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004 Max Khon
5 * Copyright (c) 2014 Juniper Networks, Inc.
6 * Copyright (c) 2006-2016 Maxim Sobolev <sobomax@FreeBSD.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_geom.h"
35 #include "opt_zstdio.h"
36
37 #include <sys/param.h>
38 #include <sys/bio.h>
39 #include <sys/endian.h>
40 #include <sys/errno.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/malloc.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 #include <sys/kthread.h>
48
49 #include <geom/geom.h>
50
51 #include <geom/uzip/g_uzip.h>
52 #include <geom/uzip/g_uzip_cloop.h>
53 #include <geom/uzip/g_uzip_softc.h>
54 #include <geom/uzip/g_uzip_dapi.h>
55 #include <geom/uzip/g_uzip_zlib.h>
56 #include <geom/uzip/g_uzip_lzma.h>
57 #ifdef ZSTDIO
58 #include <geom/uzip/g_uzip_zstd.h>
59 #endif
60 #include <geom/uzip/g_uzip_wrkthr.h>
61
62 MALLOC_DEFINE(M_GEOM_UZIP, "geom_uzip", "GEOM UZIP data structures");
63
64 FEATURE(geom_uzip, "GEOM read-only compressed disks support");
65
66 struct g_uzip_blk {
67 uint64_t offset;
68 uint32_t blen;
69 unsigned char last:1;
70 unsigned char padded:1;
71 #define BLEN_UNDEF UINT32_MAX
72 };
73
74 #ifndef ABS
75 #define ABS(a) ((a) < 0 ? -(a) : (a))
76 #endif
77
78 #define BLK_IN_RANGE(mcn, bcn, ilen) \
79 (((bcn) != BLEN_UNDEF) && ( \
80 ((ilen) >= 0 && (mcn >= bcn) && (mcn <= ((intmax_t)(bcn) + (ilen)))) || \
81 ((ilen) < 0 && (mcn <= bcn) && (mcn >= ((intmax_t)(bcn) + (ilen)))) \
82 ))
83
84 #ifdef GEOM_UZIP_DEBUG
85 # define GEOM_UZIP_DBG_DEFAULT 3
86 #else
87 # define GEOM_UZIP_DBG_DEFAULT 0
88 #endif
89
90 #define GUZ_DBG_ERR 1
91 #define GUZ_DBG_INFO 2
92 #define GUZ_DBG_IO 3
93 #define GUZ_DBG_TOC 4
94
95 #define GUZ_DEV_SUFX ".uzip"
96 #define GUZ_DEV_NAME(p) (p GUZ_DEV_SUFX)
97
98 static char g_uzip_attach_to[MAXPATHLEN] = {"*"};
99 static char g_uzip_noattach_to[MAXPATHLEN] = {GUZ_DEV_NAME("*")};
100 TUNABLE_STR("kern.geom.uzip.attach_to", g_uzip_attach_to,
101 sizeof(g_uzip_attach_to));
102 TUNABLE_STR("kern.geom.uzip.noattach_to", g_uzip_noattach_to,
103 sizeof(g_uzip_noattach_to));
104
105 SYSCTL_DECL(_kern_geom);
106 SYSCTL_NODE(_kern_geom, OID_AUTO, uzip, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
107 "GEOM_UZIP stuff");
108 static u_int g_uzip_debug = GEOM_UZIP_DBG_DEFAULT;
109 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug, CTLFLAG_RWTUN, &g_uzip_debug, 0,
110 "Debug level (0-4)");
111 static u_int g_uzip_debug_block = BLEN_UNDEF;
112 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug_block, CTLFLAG_RWTUN,
113 &g_uzip_debug_block, 0, "Debug operations around specific cluster#");
114
115 #define DPRINTF(lvl, a) \
116 if ((lvl) <= g_uzip_debug) { \
117 printf a; \
118 }
119 #define DPRINTF_BLK(lvl, cn, a) \
120 if ((lvl) <= g_uzip_debug || \
121 BLK_IN_RANGE(cn, g_uzip_debug_block, 8) || \
122 BLK_IN_RANGE(cn, g_uzip_debug_block, -8)) { \
123 printf a; \
124 }
125 #define DPRINTF_BRNG(lvl, bcn, ecn, a) \
126 KASSERT(bcn < ecn, ("DPRINTF_BRNG: invalid range (%ju, %ju)", \
127 (uintmax_t)bcn, (uintmax_t)ecn)); \
128 if (((lvl) <= g_uzip_debug) || \
129 BLK_IN_RANGE(g_uzip_debug_block, bcn, \
130 (intmax_t)ecn - (intmax_t)bcn)) { \
131 printf a; \
132 }
133
134 #define UZIP_CLASS_NAME "UZIP"
135
136 /*
137 * Maximum allowed valid block size (to prevent foot-shooting)
138 */
139 #define MAX_BLKSZ (maxphys)
140
141 static char CLOOP_MAGIC_START[] = "#!/bin/sh\n";
142
143 static void g_uzip_read_done(struct bio *bp);
144 static void g_uzip_do(struct g_uzip_softc *, struct bio *bp);
145
146 static void
147 g_uzip_softc_free(struct g_geom *gp)
148 {
149 struct g_uzip_softc *sc = gp->softc;
150
151 DPRINTF(GUZ_DBG_INFO, ("%s: %d requests, %d cached\n",
152 gp->name, sc->req_total, sc->req_cached));
153
154 mtx_lock(&sc->queue_mtx);
155 sc->wrkthr_flags |= GUZ_SHUTDOWN;
156 wakeup(sc);
157 while (!(sc->wrkthr_flags & GUZ_EXITING)) {
158 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "guzfree",
159 hz / 10);
160 }
161 mtx_unlock(&sc->queue_mtx);
162
163 sc->dcp->free(sc->dcp);
164 free(sc->toc, M_GEOM_UZIP);
165 mtx_destroy(&sc->queue_mtx);
166 mtx_destroy(&sc->last_mtx);
167 free(sc->last_buf, M_GEOM_UZIP);
168 free(sc, M_GEOM_UZIP);
169 gp->softc = NULL;
170 }
171
172 static int
173 g_uzip_cached(struct g_geom *gp, struct bio *bp)
174 {
175 struct g_uzip_softc *sc;
176 off_t ofs;
177 size_t blk, blkofs, usz;
178
179 sc = gp->softc;
180 ofs = bp->bio_offset + bp->bio_completed;
181 blk = ofs / sc->blksz;
182 mtx_lock(&sc->last_mtx);
183 if (blk == sc->last_blk) {
184 blkofs = ofs % sc->blksz;
185 usz = sc->blksz - blkofs;
186 if (bp->bio_resid < usz)
187 usz = bp->bio_resid;
188 memcpy(bp->bio_data + bp->bio_completed, sc->last_buf + blkofs,
189 usz);
190 sc->req_cached++;
191 mtx_unlock(&sc->last_mtx);
192
193 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: offset=%jd: got %jd bytes "
194 "from cache\n", __func__, gp->name, bp, (intmax_t)ofs,
195 (intmax_t)usz));
196
197 bp->bio_completed += usz;
198 bp->bio_resid -= usz;
199
200 if (bp->bio_resid == 0) {
201 g_io_deliver(bp, 0);
202 return (1);
203 }
204 } else
205 mtx_unlock(&sc->last_mtx);
206
207 return (0);
208 }
209
210 #define BLK_ENDS(sc, bi) ((sc)->toc[(bi)].offset + \
211 (sc)->toc[(bi)].blen)
212
213 #define BLK_IS_CONT(sc, bi) (BLK_ENDS((sc), (bi) - 1) == \
214 (sc)->toc[(bi)].offset)
215 #define BLK_IS_NIL(sc, bi) ((sc)->toc[(bi)].blen == 0)
216
217 #define TOFF_2_BOFF(sc, pp, bi) ((sc)->toc[(bi)].offset - \
218 (sc)->toc[(bi)].offset % (pp)->sectorsize)
219 #define TLEN_2_BLEN(sc, pp, bp, ei) roundup(BLK_ENDS((sc), (ei)) - \
220 (bp)->bio_offset, (pp)->sectorsize)
221
222 static int
223 g_uzip_request(struct g_geom *gp, struct bio *bp)
224 {
225 struct g_uzip_softc *sc;
226 struct bio *bp2;
227 struct g_consumer *cp;
228 struct g_provider *pp;
229 off_t ofs, start_blk_ofs;
230 size_t i, start_blk, end_blk, zsize;
231
232 if (g_uzip_cached(gp, bp) != 0)
233 return (1);
234
235 sc = gp->softc;
236
237 cp = LIST_FIRST(&gp->consumer);
238 pp = cp->provider;
239
240 ofs = bp->bio_offset + bp->bio_completed;
241 start_blk = ofs / sc->blksz;
242 KASSERT(start_blk < sc->nblocks, ("start_blk out of range"));
243 end_blk = howmany(ofs + bp->bio_resid, sc->blksz);
244 KASSERT(end_blk <= sc->nblocks, ("end_blk out of range"));
245
246 for (; BLK_IS_NIL(sc, start_blk) && start_blk < end_blk; start_blk++) {
247 /* Fill in any leading Nil blocks */
248 start_blk_ofs = ofs % sc->blksz;
249 zsize = MIN(sc->blksz - start_blk_ofs, bp->bio_resid);
250 DPRINTF_BLK(GUZ_DBG_IO, start_blk, ("%s/%s: %p/%ju: "
251 "filling %ju zero bytes\n", __func__, gp->name, gp,
252 (uintmax_t)bp->bio_completed, (uintmax_t)zsize));
253 bzero(bp->bio_data + bp->bio_completed, zsize);
254 bp->bio_completed += zsize;
255 bp->bio_resid -= zsize;
256 ofs += zsize;
257 }
258
259 if (start_blk == end_blk) {
260 KASSERT(bp->bio_resid == 0, ("bp->bio_resid is invalid"));
261 /*
262 * No non-Nil data is left, complete request immediately.
263 */
264 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: all done returning %ju "
265 "bytes\n", __func__, gp->name, gp,
266 (uintmax_t)bp->bio_completed));
267 g_io_deliver(bp, 0);
268 return (1);
269 }
270
271 for (i = start_blk + 1; i < end_blk; i++) {
272 /* Trim discontinuous areas if any */
273 if (!BLK_IS_CONT(sc, i)) {
274 end_blk = i;
275 break;
276 }
277 }
278
279 DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: "
280 "start=%u (%ju[%jd]), end=%u (%ju)\n", __func__, gp->name, bp,
281 (u_int)start_blk, (uintmax_t)sc->toc[start_blk].offset,
282 (intmax_t)sc->toc[start_blk].blen,
283 (u_int)end_blk, (uintmax_t)BLK_ENDS(sc, end_blk - 1)));
284
285 bp2 = g_clone_bio(bp);
286 if (bp2 == NULL) {
287 g_io_deliver(bp, ENOMEM);
288 return (1);
289 }
290 bp2->bio_done = g_uzip_read_done;
291
292 bp2->bio_offset = TOFF_2_BOFF(sc, pp, start_blk);
293 while (1) {
294 bp2->bio_length = TLEN_2_BLEN(sc, pp, bp2, end_blk - 1);
295 if (bp2->bio_length <= maxphys) {
296 break;
297 }
298 if (end_blk == (start_blk + 1)) {
299 break;
300 }
301 end_blk--;
302 }
303
304 DPRINTF(GUZ_DBG_IO, ("%s/%s: bp2->bio_length = %jd, "
305 "bp2->bio_offset = %jd\n", __func__, gp->name,
306 (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset));
307
308 bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT);
309 if (bp2->bio_data == NULL) {
310 g_destroy_bio(bp2);
311 g_io_deliver(bp, ENOMEM);
312 return (1);
313 }
314
315 DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: "
316 "reading %jd bytes from offset %jd\n", __func__, gp->name, bp,
317 (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset));
318
319 g_io_request(bp2, cp);
320 return (0);
321 }
322
323 static void
324 g_uzip_read_done(struct bio *bp)
325 {
326 struct bio *bp2;
327 struct g_geom *gp;
328 struct g_uzip_softc *sc;
329
330 bp2 = bp->bio_parent;
331 gp = bp2->bio_to->geom;
332 sc = gp->softc;
333
334 mtx_lock(&sc->queue_mtx);
335 bioq_disksort(&sc->bio_queue, bp);
336 mtx_unlock(&sc->queue_mtx);
337 wakeup(sc);
338 }
339
340 static int
341 g_uzip_memvcmp(const void *memory, unsigned char val, size_t size)
342 {
343 const u_char *mm;
344
345 mm = (const u_char *)memory;
346 return (*mm == val) && memcmp(mm, mm + 1, size - 1) == 0;
347 }
348
349 static void
350 g_uzip_do(struct g_uzip_softc *sc, struct bio *bp)
351 {
352 struct bio *bp2;
353 struct g_provider *pp;
354 struct g_consumer *cp;
355 struct g_geom *gp;
356 char *data, *data2;
357 off_t ofs;
358 size_t blk, blkofs, len, ulen, firstblk;
359 int err;
360
361 bp2 = bp->bio_parent;
362 gp = bp2->bio_to->geom;
363
364 cp = LIST_FIRST(&gp->consumer);
365 pp = cp->provider;
366
367 bp2->bio_error = bp->bio_error;
368 if (bp2->bio_error != 0)
369 goto done;
370
371 /* Make sure there's forward progress. */
372 if (bp->bio_completed == 0) {
373 bp2->bio_error = ECANCELED;
374 goto done;
375 }
376
377 ofs = bp2->bio_offset + bp2->bio_completed;
378 firstblk = blk = ofs / sc->blksz;
379 blkofs = ofs % sc->blksz;
380 data = bp->bio_data + sc->toc[blk].offset % pp->sectorsize;
381 data2 = bp2->bio_data + bp2->bio_completed;
382 while (bp->bio_completed && bp2->bio_resid) {
383 if (blk > firstblk && !BLK_IS_CONT(sc, blk)) {
384 DPRINTF_BLK(GUZ_DBG_IO, blk, ("%s/%s: %p: backref'ed "
385 "cluster #%u requested, looping around\n",
386 __func__, gp->name, bp2, (u_int)blk));
387 goto done;
388 }
389 ulen = MIN(sc->blksz - blkofs, bp2->bio_resid);
390 len = sc->toc[blk].blen;
391 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p/%ju: data2=%p, ulen=%u, "
392 "data=%p, len=%u\n", __func__, gp->name, gp,
393 bp->bio_completed, data2, (u_int)ulen, data, (u_int)len));
394 if (len == 0) {
395 /* All zero block: no cache update */
396 zero_block:
397 bzero(data2, ulen);
398 } else if (len <= bp->bio_completed) {
399 mtx_lock(&sc->last_mtx);
400 err = sc->dcp->decompress(sc->dcp, gp->name, data,
401 len, sc->last_buf);
402 if (err != 0 && sc->toc[blk].last != 0) {
403 /*
404 * Last block decompression has failed, check
405 * if it's just zero padding.
406 */
407 if (g_uzip_memvcmp(data, '\0', len) == 0) {
408 sc->toc[blk].blen = 0;
409 sc->last_blk = -1;
410 mtx_unlock(&sc->last_mtx);
411 len = 0;
412 goto zero_block;
413 }
414 }
415 if (err != 0) {
416 sc->last_blk = -1;
417 mtx_unlock(&sc->last_mtx);
418 bp2->bio_error = EILSEQ;
419 DPRINTF(GUZ_DBG_ERR, ("%s/%s: decompress"
420 "(%p, %ju, %ju) failed\n", __func__,
421 gp->name, sc->dcp, (uintmax_t)blk,
422 (uintmax_t)len));
423 goto done;
424 }
425 sc->last_blk = blk;
426 memcpy(data2, sc->last_buf + blkofs, ulen);
427 mtx_unlock(&sc->last_mtx);
428 err = sc->dcp->rewind(sc->dcp, gp->name);
429 if (err != 0) {
430 bp2->bio_error = EILSEQ;
431 DPRINTF(GUZ_DBG_ERR, ("%s/%s: rewind(%p) "
432 "failed\n", __func__, gp->name, sc->dcp));
433 goto done;
434 }
435 data += len;
436 } else
437 break;
438
439 data2 += ulen;
440 bp2->bio_completed += ulen;
441 bp2->bio_resid -= ulen;
442 bp->bio_completed -= len;
443 blkofs = 0;
444 blk++;
445 }
446
447 done:
448 /* Finish processing the request. */
449 free(bp->bio_data, M_GEOM_UZIP);
450 g_destroy_bio(bp);
451 if (bp2->bio_error != 0 || bp2->bio_resid == 0)
452 g_io_deliver(bp2, bp2->bio_error);
453 else
454 g_uzip_request(gp, bp2);
455 }
456
457 static void
458 g_uzip_start(struct bio *bp)
459 {
460 struct g_provider *pp;
461 struct g_geom *gp;
462 struct g_uzip_softc *sc;
463
464 pp = bp->bio_to;
465 gp = pp->geom;
466
467 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: cmd=%d, offset=%jd, length=%jd, "
468 "buffer=%p\n", __func__, gp->name, bp, bp->bio_cmd,
469 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length, bp->bio_data));
470
471 sc = gp->softc;
472 sc->req_total++;
473
474 if (bp->bio_cmd == BIO_GETATTR) {
475 struct bio *bp2;
476 struct g_consumer *cp;
477 struct g_geom *gp;
478 struct g_provider *pp;
479
480 /* pass on MNT:* requests and ignore others */
481 if (strncmp(bp->bio_attribute, "MNT:", 4) == 0) {
482 bp2 = g_clone_bio(bp);
483 if (bp2 == NULL) {
484 g_io_deliver(bp, ENOMEM);
485 return;
486 }
487 bp2->bio_done = g_std_done;
488 pp = bp->bio_to;
489 gp = pp->geom;
490 cp = LIST_FIRST(&gp->consumer);
491 g_io_request(bp2, cp);
492 return;
493 }
494 }
495 if (bp->bio_cmd != BIO_READ) {
496 g_io_deliver(bp, EOPNOTSUPP);
497 return;
498 }
499
500 bp->bio_resid = bp->bio_length;
501 bp->bio_completed = 0;
502
503 g_uzip_request(gp, bp);
504 }
505
506 static void
507 g_uzip_orphan(struct g_consumer *cp)
508 {
509 struct g_geom *gp;
510
511 g_topology_assert();
512 G_VALID_CONSUMER(cp);
513 gp = cp->geom;
514 g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, gp->name);
515 g_wither_geom(gp, ENXIO);
516
517 /*
518 * We can safely free the softc now if there are no accesses,
519 * otherwise g_uzip_access() will do that after the last close.
520 */
521 if ((cp->acr + cp->acw + cp->ace) == 0)
522 g_uzip_softc_free(gp);
523 }
524
525 static void
526 g_uzip_spoiled(struct g_consumer *cp)
527 {
528
529 g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, cp->geom->name);
530 cp->flags |= G_CF_ORPHAN;
531 g_uzip_orphan(cp);
532 }
533
534 static int
535 g_uzip_access(struct g_provider *pp, int dr, int dw, int de)
536 {
537 struct g_geom *gp;
538 struct g_consumer *cp;
539 int error;
540
541 gp = pp->geom;
542 cp = LIST_FIRST(&gp->consumer);
543 KASSERT (cp != NULL, ("g_uzip_access but no consumer"));
544
545 if (cp->acw + dw > 0)
546 return (EROFS);
547
548 error = g_access(cp, dr, dw, de);
549
550 /*
551 * Free the softc if all providers have been closed and this geom
552 * is being removed.
553 */
554 if (error == 0 && (gp->flags & G_GEOM_WITHER) != 0 &&
555 (cp->acr + cp->acw + cp->ace) == 0)
556 g_uzip_softc_free(gp);
557
558 return (error);
559 }
560
561 static int
562 g_uzip_parse_toc(struct g_uzip_softc *sc, struct g_provider *pp,
563 struct g_geom *gp)
564 {
565 uint32_t i, j, backref_to;
566 uint64_t max_offset, min_offset;
567 struct g_uzip_blk *last_blk;
568
569 min_offset = sizeof(struct cloop_header) +
570 (sc->nblocks + 1) * sizeof(uint64_t);
571 max_offset = sc->toc[0].offset - 1;
572 last_blk = &sc->toc[0];
573 for (i = 0; i < sc->nblocks; i++) {
574 /* First do some bounds checking */
575 if ((sc->toc[i].offset < min_offset) ||
576 (sc->toc[i].offset > pp->mediasize)) {
577 goto error_offset;
578 }
579 DPRINTF_BLK(GUZ_DBG_IO, i, ("%s: cluster #%u "
580 "offset=%ju max_offset=%ju\n", gp->name,
581 (u_int)i, (uintmax_t)sc->toc[i].offset,
582 (uintmax_t)max_offset));
583 backref_to = BLEN_UNDEF;
584 if (sc->toc[i].offset < max_offset) {
585 /*
586 * For the backref'ed blocks search already parsed
587 * TOC entries for the matching offset and copy the
588 * size from matched entry.
589 */
590 for (j = 0; j <= i; j++) {
591 if (sc->toc[j].offset == sc->toc[i].offset &&
592 !BLK_IS_NIL(sc, j)) {
593 break;
594 }
595 if (j != i) {
596 continue;
597 }
598 DPRINTF(GUZ_DBG_ERR, ("%s: cannot match "
599 "backref'ed offset at cluster #%u\n",
600 gp->name, i));
601 return (-1);
602 }
603 sc->toc[i].blen = sc->toc[j].blen;
604 backref_to = j;
605 } else {
606 last_blk = &sc->toc[i];
607 /*
608 * For the "normal blocks" seek forward until we hit
609 * block whose offset is larger than ours and assume
610 * it's going to be the next one.
611 */
612 for (j = i + 1; j < sc->nblocks + 1; j++) {
613 if (sc->toc[j].offset > max_offset) {
614 break;
615 }
616 }
617 sc->toc[i].blen = sc->toc[j].offset -
618 sc->toc[i].offset;
619 if (BLK_ENDS(sc, i) > pp->mediasize) {
620 DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u "
621 "extends past media boundary (%ju > %ju)\n",
622 gp->name, (u_int)i,
623 (uintmax_t)BLK_ENDS(sc, i),
624 (intmax_t)pp->mediasize));
625 return (-1);
626 }
627 KASSERT(max_offset <= sc->toc[i].offset, (
628 "%s: max_offset is incorrect: %ju",
629 gp->name, (uintmax_t)max_offset));
630 max_offset = BLK_ENDS(sc, i) - 1;
631 }
632 DPRINTF_BLK(GUZ_DBG_TOC, i, ("%s: cluster #%u, original %u "
633 "bytes, in %u bytes", gp->name, i, sc->blksz,
634 sc->toc[i].blen));
635 if (backref_to != BLEN_UNDEF) {
636 DPRINTF_BLK(GUZ_DBG_TOC, i, (" (->#%u)",
637 (u_int)backref_to));
638 }
639 DPRINTF_BLK(GUZ_DBG_TOC, i, ("\n"));
640 }
641 last_blk->last = 1;
642 /* Do a second pass to validate block lengths */
643 for (i = 0; i < sc->nblocks; i++) {
644 if (sc->toc[i].blen > sc->dcp->max_blen) {
645 if (sc->toc[i].last == 0) {
646 DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u "
647 "length (%ju) exceeds "
648 "max_blen (%ju)\n", gp->name, i,
649 (uintmax_t)sc->toc[i].blen,
650 (uintmax_t)sc->dcp->max_blen));
651 return (-1);
652 }
653 DPRINTF(GUZ_DBG_INFO, ("%s: cluster #%u extra "
654 "padding is detected, trimmed to %ju\n",
655 gp->name, i, (uintmax_t)sc->dcp->max_blen));
656 sc->toc[i].blen = sc->dcp->max_blen;
657 sc->toc[i].padded = 1;
658 }
659 }
660 return (0);
661
662 error_offset:
663 DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u: invalid offset %ju, "
664 "min_offset=%ju mediasize=%jd\n", gp->name, (u_int)i,
665 sc->toc[i].offset, min_offset, pp->mediasize));
666 return (-1);
667 }
668
669 static struct g_geom *
670 g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags)
671 {
672 int error;
673 uint32_t i, total_offsets, offsets_read, blk;
674 void *buf;
675 struct cloop_header *header;
676 struct g_consumer *cp;
677 struct g_geom *gp;
678 struct g_provider *pp2;
679 struct g_uzip_softc *sc;
680 struct g_geom_alias *gap;
681 enum {
682 G_UZIP = 1,
683 G_ULZMA,
684 G_ZSTD,
685 } type;
686 char cloop_version;
687
688 g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name);
689 g_topology_assert();
690
691 /* Skip providers that are already open for writing. */
692 if (pp->acw > 0)
693 return (NULL);
694
695 if ((fnmatch(g_uzip_attach_to, pp->name, 0) != 0) ||
696 (fnmatch(g_uzip_noattach_to, pp->name, 0) == 0)) {
697 DPRINTF(GUZ_DBG_INFO, ("%s(%s,%s), ignoring\n", __func__,
698 mp->name, pp->name));
699 return (NULL);
700 }
701
702 buf = NULL;
703
704 /*
705 * Create geom instance.
706 */
707 gp = g_new_geomf(mp, GUZ_DEV_NAME("%s"), pp->name);
708 cp = g_new_consumer(gp);
709 error = g_attach(cp, pp);
710 if (error != 0)
711 goto e0;
712 error = g_access(cp, 1, 0, 0);
713 if (error)
714 goto e1;
715 g_topology_unlock();
716
717 /*
718 * Read cloop header, look for CLOOP magic, perform
719 * other validity checks.
720 */
721 DPRINTF(GUZ_DBG_INFO, ("%s: media sectorsize %u, mediasize %jd\n",
722 gp->name, pp->sectorsize, (intmax_t)pp->mediasize));
723 buf = g_read_data(cp, 0, pp->sectorsize, NULL);
724 if (buf == NULL)
725 goto e2;
726 header = (struct cloop_header *) buf;
727 if (strncmp(header->magic, CLOOP_MAGIC_START,
728 sizeof(CLOOP_MAGIC_START) - 1) != 0) {
729 DPRINTF(GUZ_DBG_ERR, ("%s: no CLOOP magic\n", gp->name));
730 goto e3;
731 }
732
733 cloop_version = header->magic[CLOOP_OFS_VERSN];
734 switch (header->magic[CLOOP_OFS_COMPR]) {
735 case CLOOP_COMP_LZMA:
736 case CLOOP_COMP_LZMA_DDP:
737 type = G_ULZMA;
738 if (cloop_version < CLOOP_MINVER_LZMA) {
739 DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
740 gp->name));
741 goto e3;
742 }
743 DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_LZMA image found\n",
744 gp->name));
745 break;
746 case CLOOP_COMP_LIBZ:
747 case CLOOP_COMP_LIBZ_DDP:
748 type = G_UZIP;
749 if (cloop_version < CLOOP_MINVER_ZLIB) {
750 DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
751 gp->name));
752 goto e3;
753 }
754 DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZLIB image found\n",
755 gp->name));
756 break;
757 case CLOOP_COMP_ZSTD:
758 case CLOOP_COMP_ZSTD_DDP:
759 if (cloop_version < CLOOP_MINVER_ZSTD) {
760 DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
761 gp->name));
762 goto e3;
763 }
764 #ifdef ZSTDIO
765 DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZSTD image found.\n",
766 gp->name));
767 type = G_ZSTD;
768 #else
769 DPRINTF(GUZ_DBG_ERR, ("%s: GEOM_UZIP_ZSTD image found, but "
770 "this kernel was configured with Zstd disabled.\n",
771 gp->name));
772 goto e3;
773 #endif
774 break;
775 default:
776 DPRINTF(GUZ_DBG_ERR, ("%s: unsupported image type\n",
777 gp->name));
778 goto e3;
779 }
780
781 /*
782 * Initialize softc and read offsets.
783 */
784 sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO);
785 gp->softc = sc;
786 sc->blksz = ntohl(header->blksz);
787 sc->nblocks = ntohl(header->nblocks);
788 if (sc->blksz % 512 != 0) {
789 printf("%s: block size (%u) should be multiple of 512.\n",
790 gp->name, sc->blksz);
791 goto e4;
792 }
793 if (sc->blksz > MAX_BLKSZ) {
794 printf("%s: block size (%u) should not be larger than %lu.\n",
795 gp->name, sc->blksz, MAX_BLKSZ);
796 }
797 total_offsets = sc->nblocks + 1;
798 if (sizeof(struct cloop_header) +
799 total_offsets * sizeof(uint64_t) > pp->mediasize) {
800 printf("%s: media too small for %u blocks\n",
801 gp->name, sc->nblocks);
802 goto e4;
803 }
804 sc->toc = malloc(total_offsets * sizeof(struct g_uzip_blk),
805 M_GEOM_UZIP, M_WAITOK | M_ZERO);
806 offsets_read = MIN(total_offsets,
807 (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t));
808 for (i = 0; i < offsets_read; i++) {
809 sc->toc[i].offset = be64toh(((uint64_t *) (header + 1))[i]);
810 sc->toc[i].blen = BLEN_UNDEF;
811 }
812 DPRINTF(GUZ_DBG_INFO, ("%s: %u offsets in the first sector\n",
813 gp->name, offsets_read));
814
815 /*
816 * The following invalidates the "header" pointer into the first
817 * block's "buf."
818 */
819 header = NULL;
820
821 for (blk = 1; offsets_read < total_offsets; blk++) {
822 uint32_t nread;
823
824 free(buf, M_GEOM);
825 buf = g_read_data(
826 cp, blk * pp->sectorsize, pp->sectorsize, NULL);
827 if (buf == NULL)
828 goto e5;
829 nread = MIN(total_offsets - offsets_read,
830 pp->sectorsize / sizeof(uint64_t));
831 DPRINTF(GUZ_DBG_TOC, ("%s: %u offsets read from sector %d\n",
832 gp->name, nread, blk));
833 for (i = 0; i < nread; i++) {
834 sc->toc[offsets_read + i].offset =
835 be64toh(((uint64_t *) buf)[i]);
836 sc->toc[offsets_read + i].blen = BLEN_UNDEF;
837 }
838 offsets_read += nread;
839 }
840 free(buf, M_GEOM);
841 buf = NULL;
842 offsets_read -= 1;
843 DPRINTF(GUZ_DBG_INFO, ("%s: done reading %u block offsets from %u "
844 "sectors\n", gp->name, offsets_read, blk));
845 if (sc->nblocks != offsets_read) {
846 DPRINTF(GUZ_DBG_ERR, ("%s: read %s offsets than expected "
847 "blocks\n", gp->name,
848 sc->nblocks < offsets_read ? "more" : "less"));
849 goto e5;
850 }
851
852 switch (type) {
853 case G_UZIP:
854 sc->dcp = g_uzip_zlib_ctor(sc->blksz);
855 break;
856 case G_ULZMA:
857 sc->dcp = g_uzip_lzma_ctor(sc->blksz);
858 break;
859 #ifdef ZSTDIO
860 case G_ZSTD:
861 sc->dcp = g_uzip_zstd_ctor(sc->blksz);
862 break;
863 #endif
864 default:
865 goto e5;
866 }
867
868 /*
869 * The last+1 block was not always initialized by earlier versions of
870 * mkuzip(8). However, *if* it is initialized, the difference between
871 * its offset and the prior block's offset represents the length of the
872 * final real compressed block, and this is significant to the
873 * decompressor.
874 */
875 if (cloop_version >= CLOOP_MINVER_RELIABLE_LASTBLKSZ &&
876 sc->toc[sc->nblocks].offset != 0) {
877 if (sc->toc[sc->nblocks].offset > pp->mediasize) {
878 DPRINTF(GUZ_DBG_ERR,
879 ("%s: bogus n+1 offset %ju > mediasize %ju\n",
880 gp->name, (uintmax_t)sc->toc[sc->nblocks].offset,
881 (uintmax_t)pp->mediasize));
882 goto e6;
883 }
884 } else {
885 sc->toc[sc->nblocks].offset = pp->mediasize;
886 }
887 /* Massage TOC (table of contents), make sure it is sound */
888 if (g_uzip_parse_toc(sc, pp, gp) != 0) {
889 DPRINTF(GUZ_DBG_ERR, ("%s: TOC error\n", gp->name));
890 goto e6;
891 }
892 mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF);
893 mtx_init(&sc->queue_mtx, "geom_uzip wrkthread", NULL, MTX_DEF);
894 bioq_init(&sc->bio_queue);
895 sc->last_blk = -1;
896 sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK);
897 sc->req_total = 0;
898 sc->req_cached = 0;
899
900 sc->uzip_do = &g_uzip_do;
901
902 error = kproc_create(g_uzip_wrkthr, sc, &sc->procp, 0, 0, "%s",
903 gp->name);
904 if (error != 0) {
905 goto e7;
906 }
907
908 g_topology_lock();
909 pp2 = g_new_providerf(gp, "%s", gp->name);
910 pp2->sectorsize = 512;
911 pp2->mediasize = (off_t)sc->nblocks * sc->blksz;
912 pp2->stripesize = pp->stripesize;
913 pp2->stripeoffset = pp->stripeoffset;
914 LIST_FOREACH(gap, &pp->aliases, ga_next)
915 g_provider_add_alias(pp2, GUZ_DEV_NAME("%s"), gap->ga_alias);
916 g_error_provider(pp2, 0);
917 g_access(cp, -1, 0, 0);
918
919 DPRINTF(GUZ_DBG_INFO, ("%s: taste ok (%d, %ju), (%ju, %ju), %x\n",
920 gp->name, pp2->sectorsize, (uintmax_t)pp2->mediasize,
921 (uintmax_t)pp2->stripeoffset, (uintmax_t)pp2->stripesize, pp2->flags));
922 DPRINTF(GUZ_DBG_INFO, ("%s: %u x %u blocks\n", gp->name, sc->nblocks,
923 sc->blksz));
924 return (gp);
925
926 e7:
927 free(sc->last_buf, M_GEOM);
928 mtx_destroy(&sc->queue_mtx);
929 mtx_destroy(&sc->last_mtx);
930 e6:
931 sc->dcp->free(sc->dcp);
932 e5:
933 free(sc->toc, M_GEOM);
934 e4:
935 free(gp->softc, M_GEOM_UZIP);
936 e3:
937 if (buf != NULL) {
938 free(buf, M_GEOM);
939 }
940 e2:
941 g_topology_lock();
942 g_access(cp, -1, 0, 0);
943 e1:
944 g_detach(cp);
945 e0:
946 g_destroy_consumer(cp);
947 g_destroy_geom(gp);
948
949 return (NULL);
950 }
951
952 static int
953 g_uzip_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
954 {
955 struct g_provider *pp;
956
957 KASSERT(gp != NULL, ("NULL geom"));
958 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, gp->name);
959 g_topology_assert();
960
961 if (gp->softc == NULL) {
962 DPRINTF(GUZ_DBG_ERR, ("%s(%s): gp->softc == NULL\n", __func__,
963 gp->name));
964 return (ENXIO);
965 }
966
967 pp = LIST_FIRST(&gp->provider);
968 KASSERT(pp != NULL, ("NULL provider"));
969 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
970 return (EBUSY);
971
972 g_wither_geom(gp, ENXIO);
973 g_uzip_softc_free(gp);
974 return (0);
975 }
976
977 static struct g_class g_uzip_class = {
978 .name = UZIP_CLASS_NAME,
979 .version = G_VERSION,
980 .taste = g_uzip_taste,
981 .destroy_geom = g_uzip_destroy_geom,
982
983 .start = g_uzip_start,
984 .orphan = g_uzip_orphan,
985 .access = g_uzip_access,
986 .spoiled = g_uzip_spoiled,
987 };
988
989 DECLARE_GEOM_CLASS(g_uzip_class, g_uzip);
990 MODULE_DEPEND(g_uzip, xz, 1, 1, 1);
991 MODULE_DEPEND(g_uzip, zlib, 1, 1, 1);
992 MODULE_VERSION(geom_uzip, 0);
Cache object: dad12ae8283345fa4e5756c1654fd5fc
|