Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 1 | /* vi: set sw=4 ts=4: */ |
Denys Vlasenko | 2ab9403 | 2017-10-05 15:33:28 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). |
Denys Vlasenko | ebe6d9d | 2017-10-05 14:40:24 +0200 | [diff] [blame] | 4 | * |
| 5 | * Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), |
| 6 | * which also acknowledges contributions by Mike Burrows, David Wheeler, |
| 7 | * Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, |
| 8 | * Robert Sedgewick, and Jon L. Bentley. |
| 9 | * |
| 10 | * Licensed under GPLv2 or later, see file LICENSE in this source tree. |
| 11 | */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 12 | /* |
| 13 | Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). |
| 14 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 15 | More efficient reading of Huffman codes, a streamlined read_bunzip() |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 16 | function, and various other tweaks. In (limited) tests, approximately |
| 17 | 20% faster than bzcat on x86 and about 10% faster on arm. |
| 18 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 19 | Note that about 2/3 of the time is spent in read_bunzip() reversing |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 20 | the Burrows-Wheeler transformation. Much of that time is delay |
| 21 | resulting from cache misses. |
| 22 | |
Denys Vlasenko | 5d49b72 | 2010-10-29 19:26:38 +0200 | [diff] [blame] | 23 | (2010 update by vda: profiled "bzcat <84mbyte.bz2 >/dev/null" |
| 24 | on x86-64 CPU with L2 > 1M: get_next_block is hotter than read_bunzip: |
| 25 | %time seconds calls function |
| 26 | 71.01 12.69 444 get_next_block |
| 27 | 28.65 5.12 93065 read_bunzip |
| 28 | 00.22 0.04 7736490 get_bits |
| 29 | 00.11 0.02 47 dealloc_bunzip |
| 30 | 00.00 0.00 93018 full_write |
| 31 | ...) |
| 32 | |
| 33 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 34 | I would ask that anyone benefiting from this work, especially those |
| 35 | using it in commercial products, consider making a donation to my local |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 36 | non-profit hospice organization (www.hospiceacadiana.com) in the name of |
Bernhard Reutner-Fischer | cfb53df | 2006-04-02 21:50:01 +0000 | [diff] [blame] | 37 | the woman I loved, Toni W. Hagan, who passed away Feb. 12, 2003. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 38 | |
| 39 | Manuel |
| 40 | */ |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 41 | #include "libbb.h" |
Denys Vlasenko | d184a72 | 2011-09-22 12:45:14 +0200 | [diff] [blame] | 42 | #include "bb_archive.h" |
Bernhard Reutner-Fischer | cfb53df | 2006-04-02 21:50:01 +0000 | [diff] [blame] | 43 | |
Denys Vlasenko | 932e233 | 2013-10-06 22:53:14 +0200 | [diff] [blame] | 44 | #if 0 |
| 45 | # define dbg(...) bb_error_msg(__VA_ARGS__) |
| 46 | #else |
| 47 | # define dbg(...) ((void)0) |
| 48 | #endif |
| 49 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 50 | /* Constants for Huffman coding */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 51 | #define MAX_GROUPS 6 |
| 52 | #define GROUP_SIZE 50 /* 64 would have been more efficient */ |
| 53 | #define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */ |
| 54 | #define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ |
| 55 | #define SYMBOL_RUNA 0 |
| 56 | #define SYMBOL_RUNB 1 |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 57 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 58 | /* Status return values */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 59 | #define RETVAL_OK 0 |
Denys Vlasenko | 932e233 | 2013-10-06 22:53:14 +0200 | [diff] [blame] | 60 | #define RETVAL_LAST_BLOCK (dbg("%d", __LINE__), -1) |
| 61 | #define RETVAL_NOT_BZIP_DATA (dbg("%d", __LINE__), -2) |
| 62 | #define RETVAL_UNEXPECTED_INPUT_EOF (dbg("%d", __LINE__), -3) |
| 63 | #define RETVAL_SHORT_WRITE (dbg("%d", __LINE__), -4) |
| 64 | #define RETVAL_DATA_ERROR (dbg("%d", __LINE__), -5) |
| 65 | #define RETVAL_OUT_OF_MEMORY (dbg("%d", __LINE__), -6) |
| 66 | #define RETVAL_OBSOLETE_INPUT (dbg("%d", __LINE__), -7) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 67 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 68 | /* Other housekeeping constants */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 69 | #define IOBUF_SIZE 4096 |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 70 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 71 | /* This is what we know about each Huffman coding group */ |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 72 | struct group_data { |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 73 | /* We have an extra slot at the end of limit[] for a sentinel value. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 74 | int limit[MAX_HUFCODE_BITS+1], base[MAX_HUFCODE_BITS], permute[MAX_SYMBOLS]; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 75 | int minLen, maxLen; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 76 | }; |
| 77 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 78 | /* Structure holding all the housekeeping data, including IO buffers and |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 79 | * memory that persists between calls to bunzip |
| 80 | * Found the most used member: |
| 81 | * cat this_file.c | sed -e 's/"/ /g' -e "s/'/ /g" | xargs -n1 \ |
| 82 | * | grep 'bd->' | sed 's/^.*bd->/bd->/' | sort | $PAGER |
| 83 | * and moved it (inbufBitCount) to offset 0. |
| 84 | */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 85 | struct bunzip_data { |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 86 | /* I/O tracking data (file handles, buffers, positions, etc.) */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 87 | unsigned inbufBitCount, inbufBits; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 88 | int in_fd, out_fd, inbufCount, inbufPos /*, outbufPos*/; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 89 | uint8_t *inbuf /*,*outbuf*/; |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 90 | |
| 91 | /* State for interrupting output loop */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 92 | int writeCopies, writePos, writeRunCountdown, writeCount; |
| 93 | int writeCurrent; /* actually a uint8_t */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 94 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 95 | /* The CRC values stored in the block header and calculated from the data */ |
Rob Landley | c57ec37 | 2006-04-10 17:07:15 +0000 | [diff] [blame] | 96 | uint32_t headerCRC, totalCRC, writeCRC; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 97 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 98 | /* Intermediate buffer and its size (in bytes) */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 99 | uint32_t *dbuf; |
| 100 | unsigned dbufSize; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 101 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 102 | /* For I/O error handling */ |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 103 | jmp_buf *jmpbuf; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 104 | |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 105 | /* Big things go last (register-relative addressing can be larger for big offsets) */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 106 | uint32_t crc32Table[256]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 107 | uint8_t selectors[32768]; /* nSelectors=15 bits */ |
Denys Vlasenko | fb132e4 | 2010-10-29 11:46:52 +0200 | [diff] [blame] | 108 | struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 109 | }; |
Ron Yorston | c339c7f | 2018-11-02 14:14:31 +0100 | [diff] [blame] | 110 | typedef struct bunzip_data bunzip_data; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 111 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 112 | |
| 113 | /* Return the next nnn bits of input. All reads from the compressed input |
| 114 | are done through this function. All reads are big endian */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 115 | static unsigned get_bits(bunzip_data *bd, int bits_wanted) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 116 | { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 117 | unsigned bits = 0; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 118 | /* Cache bd->inbufBitCount in a CPU register (hopefully): */ |
| 119 | int bit_count = bd->inbufBitCount; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 120 | |
| 121 | /* If we need to get more data from the byte buffer, do so. (Loop getting |
| 122 | one byte at a time to enforce endianness and avoid unaligned access.) */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 123 | while (bit_count < bits_wanted) { |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 124 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 125 | /* If we need to read more data from file into byte buffer, do so */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 126 | if (bd->inbufPos == bd->inbufCount) { |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 127 | /* if "no input fd" case: in_fd == -1, read fails, we jump */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 128 | bd->inbufCount = read(bd->in_fd, bd->inbuf, IOBUF_SIZE); |
| 129 | if (bd->inbufCount <= 0) |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 130 | longjmp(*bd->jmpbuf, RETVAL_UNEXPECTED_INPUT_EOF); |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 131 | bd->inbufPos = 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 132 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 133 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 134 | /* Avoid 32-bit overflow (dump bit buffer to top of output) */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 135 | if (bit_count >= 24) { |
Rostislav Skudnov | 8762512 | 2017-02-01 18:35:13 +0000 | [diff] [blame] | 136 | bits = bd->inbufBits & ((1U << bit_count) - 1); |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 137 | bits_wanted -= bit_count; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 138 | bits <<= bits_wanted; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 139 | bit_count = 0; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 140 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 141 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 142 | /* Grab next 8 bits of input from buffer. */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 143 | bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 144 | bit_count += 8; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 145 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 146 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 147 | /* Calculate result */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 148 | bit_count -= bits_wanted; |
| 149 | bd->inbufBitCount = bit_count; |
| 150 | bits |= (bd->inbufBits >> bit_count) & ((1 << bits_wanted) - 1); |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 151 | |
| 152 | return bits; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 153 | } |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 154 | //#define get_bits(bd, n) (dbg("%d:get_bits()", __LINE__), get_bits(bd, n)) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 155 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 156 | /* Unpacks the next block and sets up for the inverse Burrows-Wheeler step. */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 157 | static int get_next_block(bunzip_data *bd) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 158 | { |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 159 | int groupCount, selector, |
Denys Vlasenko | 0402cb3 | 2017-10-22 18:23:23 +0200 | [diff] [blame] | 160 | i, j, symCount, symTotal, nSelectors, byteCount[256]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 161 | uint8_t uc, symToByte[256], mtfSymbol[256], *selectors; |
| 162 | uint32_t *dbuf; |
Rostislav Skudnov | 8762512 | 2017-02-01 18:35:13 +0000 | [diff] [blame] | 163 | unsigned origPtr, t; |
Denys Vlasenko | 0402cb3 | 2017-10-22 18:23:23 +0200 | [diff] [blame] | 164 | unsigned dbufCount, runPos; |
| 165 | unsigned runCnt = runCnt; /* for compiler */ |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 166 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 167 | dbuf = bd->dbuf; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 168 | selectors = bd->selectors; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 169 | |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 170 | /* In bbox, we are ok with aborting through setjmp which is set up in start_bunzip */ |
| 171 | #if 0 |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 172 | /* Reset longjmp I/O error handling */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 173 | i = setjmp(bd->jmpbuf); |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 174 | if (i) return i; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 175 | #endif |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 176 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 177 | /* Read in header signature and CRC, then validate signature. |
| 178 | (last block signature means CRC is for whole file, return now) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 179 | i = get_bits(bd, 24); |
| 180 | j = get_bits(bd, 24); |
| 181 | bd->headerCRC = get_bits(bd, 32); |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 182 | if ((i == 0x177245) && (j == 0x385090)) |
| 183 | return RETVAL_LAST_BLOCK; |
| 184 | if ((i != 0x314159) || (j != 0x265359)) |
| 185 | return RETVAL_NOT_BZIP_DATA; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 186 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 187 | /* We can add support for blockRandomised if anybody complains. There was |
| 188 | some code for this in busybox 1.0.0-pre3, but nobody ever noticed that |
| 189 | it didn't actually work. */ |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 190 | if (get_bits(bd, 1)) |
| 191 | return RETVAL_OBSOLETE_INPUT; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 192 | origPtr = get_bits(bd, 24); |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 193 | if (origPtr > bd->dbufSize) |
| 194 | return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 195 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 196 | /* mapping table: if some byte values are never used (encoding things |
| 197 | like ascii text), the compression code removes the gaps to have fewer |
| 198 | symbols to deal with, and writes a sparse bitfield indicating which |
| 199 | values were present. We make a translation table to convert the symbols |
| 200 | back to the corresponding bytes. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 201 | symTotal = 0; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 202 | i = 0; |
| 203 | t = get_bits(bd, 16); |
| 204 | do { |
| 205 | if (t & (1 << 15)) { |
| 206 | unsigned inner_map = get_bits(bd, 16); |
| 207 | do { |
| 208 | if (inner_map & (1 << 15)) |
| 209 | symToByte[symTotal++] = i; |
| 210 | inner_map <<= 1; |
| 211 | i++; |
| 212 | } while (i & 15); |
| 213 | i -= 16; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 214 | } |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 215 | t <<= 1; |
| 216 | i += 16; |
| 217 | } while (i < 256); |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 218 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 219 | /* How many different Huffman coding groups does this block use? */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 220 | groupCount = get_bits(bd, 3); |
| 221 | if (groupCount < 2 || groupCount > MAX_GROUPS) |
| 222 | return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 223 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 224 | /* nSelectors: Every GROUP_SIZE many symbols we select a new Huffman coding |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 225 | group. Read in the group selector list, which is stored as MTF encoded |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 226 | bit runs. (MTF=Move To Front, as each value is used it's moved to the |
| 227 | start of the list.) */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 228 | for (i = 0; i < groupCount; i++) |
| 229 | mtfSymbol[i] = i; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 230 | nSelectors = get_bits(bd, 15); |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 231 | if (!nSelectors) |
| 232 | return RETVAL_DATA_ERROR; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 233 | for (i = 0; i < nSelectors; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 234 | uint8_t tmp_byte; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 235 | /* Get next value */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 236 | int n = 0; |
| 237 | while (get_bits(bd, 1)) { |
Denys Vlasenko | 58d998d | 2019-05-23 14:54:13 +0200 | [diff] [blame] | 238 | n++; |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 239 | if (n >= groupCount) |
| 240 | return RETVAL_DATA_ERROR; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 241 | } |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 242 | /* Decode MTF to get the next selector */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 243 | tmp_byte = mtfSymbol[n]; |
| 244 | while (--n >= 0) |
| 245 | mtfSymbol[n + 1] = mtfSymbol[n]; |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 246 | //We catch it later, in the second loop where we use selectors[i]. |
| 247 | //Maybe this is a better place, though? |
| 248 | // if (tmp_byte >= groupCount) { |
| 249 | // dbg("%d: selectors[%d]:%d groupCount:%d", |
| 250 | // __LINE__, i, tmp_byte, groupCount); |
| 251 | // return RETVAL_DATA_ERROR; |
| 252 | // } |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 253 | mtfSymbol[0] = selectors[i] = tmp_byte; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 254 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 255 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 256 | /* Read the Huffman coding tables for each group, which code for symTotal |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 257 | literal symbols, plus two run symbols (RUNA, RUNB) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 258 | symCount = symTotal + 2; |
| 259 | for (j = 0; j < groupCount; j++) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 260 | uint8_t length[MAX_SYMBOLS]; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 261 | /* 8 bits is ALMOST enough for temp[], see below */ |
| 262 | unsigned temp[MAX_HUFCODE_BITS+1]; |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 263 | struct group_data *hufGroup; |
| 264 | int *base, *limit; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 265 | int minLen, maxLen, pp, len_m1; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 266 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 267 | /* Read Huffman code lengths for each symbol. They're stored in |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 268 | a way similar to mtf; record a starting value for the first symbol, |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 269 | and an offset from the previous value for every symbol after that. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 270 | (Subtracting 1 before the loop and then adding it back at the end is |
| 271 | an optimization that makes the test inside the loop simpler: symbol |
| 272 | length 0 becomes negative, so an unsigned inequality catches it.) */ |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 273 | len_m1 = get_bits(bd, 5) - 1; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 274 | for (i = 0; i < symCount; i++) { |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 275 | for (;;) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 276 | int two_bits; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 277 | if ((unsigned)len_m1 > (MAX_HUFCODE_BITS-1)) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 278 | return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 279 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 280 | /* If first bit is 0, stop. Else second bit indicates whether |
| 281 | to increment or decrement the value. Optimization: grab 2 |
| 282 | bits and unget the second if the first was 0. */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 283 | two_bits = get_bits(bd, 2); |
| 284 | if (two_bits < 2) { |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 285 | bd->inbufBitCount++; |
| 286 | break; |
| 287 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 288 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 289 | /* Add one if second bit 1, else subtract 1. Avoids if/else */ |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 290 | len_m1 += (((two_bits+1) & 2) - 1); |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 291 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 292 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 293 | /* Correct for the initial -1, to get the final symbol length */ |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 294 | length[i] = len_m1 + 1; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 295 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 296 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 297 | /* Find largest and smallest lengths in this group */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 298 | minLen = maxLen = length[0]; |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 299 | for (i = 1; i < symCount; i++) { |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 300 | if (length[i] > maxLen) |
| 301 | maxLen = length[i]; |
| 302 | else if (length[i] < minLen) |
| 303 | minLen = length[i]; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 304 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 305 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 306 | /* Calculate permute[], base[], and limit[] tables from length[]. |
| 307 | * |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 308 | * permute[] is the lookup table for converting Huffman coded symbols |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 309 | * into decoded symbols. base[] is the amount to subtract from the |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 310 | * value of a Huffman symbol of a given length when using permute[]. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 311 | * |
| 312 | * limit[] indicates the largest numerical value a symbol with a given |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 313 | * number of bits can have. This is how the Huffman codes can vary in |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 314 | * length: each code with a value>limit[length] needs another bit. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 315 | */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 316 | hufGroup = bd->groups + j; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 317 | hufGroup->minLen = minLen; |
| 318 | hufGroup->maxLen = maxLen; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 319 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 320 | /* Note that minLen can't be smaller than 1, so we adjust the base |
| 321 | and limit array pointers so we're not always wasting the first |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 322 | entry. We do this again when using them (during symbol decoding). */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 323 | base = hufGroup->base - 1; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 324 | limit = hufGroup->limit - 1; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 325 | |
Denys Vlasenko | 10ad622 | 2017-04-17 16:13:32 +0200 | [diff] [blame] | 326 | /* Calculate permute[]. Concurrently, initialize temp[] and limit[]. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 327 | pp = 0; |
| 328 | for (i = minLen; i <= maxLen; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 329 | int k; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 330 | temp[i] = limit[i] = 0; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 331 | for (k = 0; k < symCount; k++) |
| 332 | if (length[k] == i) |
| 333 | hufGroup->permute[pp++] = k; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 334 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 335 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 336 | /* Count symbols coded for at each bit length */ |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 337 | /* NB: in pathological cases, temp[8] can end ip being 256. |
| 338 | * That's why uint8_t is too small for temp[]. */ |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 339 | for (i = 0; i < symCount; i++) |
| 340 | temp[length[i]]++; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 341 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 342 | /* Calculate limit[] (the largest symbol-coding value at each bit |
| 343 | * length, which is (previous limit<<1)+symbols at this level), and |
| 344 | * base[] (number of symbols to ignore at each bit length, which is |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 345 | * limit minus the cumulative count of symbols coded for already). */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 346 | pp = t = 0; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 347 | for (i = minLen; i < maxLen;) { |
| 348 | unsigned temp_i = temp[i]; |
| 349 | |
| 350 | pp += temp_i; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 351 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 352 | /* We read the largest possible symbol size and then unget bits |
| 353 | after determining how many we need, and those extra bits could |
| 354 | be set to anything. (They're noise from future symbols.) At |
| 355 | each level we're really only interested in the first few bits, |
| 356 | so here we set all the trailing to-be-ignored bits to 1 so they |
| 357 | don't affect the value>limit[length] comparison. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 358 | limit[i] = (pp << (maxLen - i)) - 1; |
| 359 | pp <<= 1; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 360 | t += temp_i; |
| 361 | base[++i] = pp - t; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 362 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 363 | limit[maxLen] = pp + temp[maxLen] - 1; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 364 | limit[maxLen+1] = INT_MAX; /* Sentinel value for reading next sym. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 365 | base[minLen] = 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 366 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 367 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 368 | /* We've finished reading and digesting the block header. Now read this |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 369 | block's Huffman coded symbols from the file and undo the Huffman coding |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 370 | and run length encoding, saving the result into dbuf[dbufCount++] = uc */ |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 371 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 372 | /* Initialize symbol occurrence counters and symbol Move To Front table */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 373 | /*memset(byteCount, 0, sizeof(byteCount)); - smaller, but slower */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 374 | for (i = 0; i < 256; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 375 | byteCount[i] = 0; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 376 | mtfSymbol[i] = (uint8_t)i; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 377 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 378 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 379 | /* Loop through compressed symbols. */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 380 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 381 | runPos = dbufCount = selector = 0; |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 382 | for (;;) { |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 383 | struct group_data *hufGroup; |
| 384 | int *base, *limit; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 385 | int nextSym; |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 386 | uint8_t ngrp; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 387 | |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 388 | /* Fetch next Huffman coding group from list. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 389 | symCount = GROUP_SIZE - 1; |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 390 | if (selector >= nSelectors) |
| 391 | return RETVAL_DATA_ERROR; |
| 392 | ngrp = selectors[selector++]; |
| 393 | if (ngrp >= groupCount) { |
| 394 | dbg("%d selectors[%d]:%d groupCount:%d", |
| 395 | __LINE__, selector-1, ngrp, groupCount); |
| 396 | return RETVAL_DATA_ERROR; |
| 397 | } |
| 398 | hufGroup = bd->groups + ngrp; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 399 | base = hufGroup->base - 1; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 400 | limit = hufGroup->limit - 1; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 401 | |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 402 | continue_this_group: |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 403 | /* Read next Huffman-coded symbol. */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 404 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 405 | /* Note: It is far cheaper to read maxLen bits and back up than it is |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 406 | to read minLen bits and then add additional bit at a time, testing |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 407 | as we go. Because there is a trailing last block (with file CRC), |
| 408 | there is no danger of the overread causing an unexpected EOF for a |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 409 | valid compressed file. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 410 | */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 411 | if (1) { |
| 412 | /* As a further optimization, we do the read inline |
| 413 | (falling back to a call to get_bits if the buffer runs dry). |
| 414 | */ |
| 415 | int new_cnt; |
| 416 | while ((new_cnt = bd->inbufBitCount - hufGroup->maxLen) < 0) { |
| 417 | /* bd->inbufBitCount < hufGroup->maxLen */ |
| 418 | if (bd->inbufPos == bd->inbufCount) { |
| 419 | nextSym = get_bits(bd, hufGroup->maxLen); |
| 420 | goto got_huff_bits; |
| 421 | } |
| 422 | bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; |
| 423 | bd->inbufBitCount += 8; |
| 424 | }; |
| 425 | bd->inbufBitCount = new_cnt; /* "bd->inbufBitCount -= hufGroup->maxLen;" */ |
| 426 | nextSym = (bd->inbufBits >> new_cnt) & ((1 << hufGroup->maxLen) - 1); |
| 427 | got_huff_bits: ; |
| 428 | } else { /* unoptimized equivalent */ |
| 429 | nextSym = get_bits(bd, hufGroup->maxLen); |
| 430 | } |
| 431 | /* Figure how many bits are in next symbol and unget extras */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 432 | i = hufGroup->minLen; |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 433 | while (nextSym > limit[i]) |
| 434 | ++i; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 435 | j = hufGroup->maxLen - i; |
| 436 | if (j < 0) |
| 437 | return RETVAL_DATA_ERROR; |
| 438 | bd->inbufBitCount += j; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 439 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 440 | /* Huffman decode value to get nextSym (with bounds checking) */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 441 | nextSym = (nextSym >> j) - base[i]; |
| 442 | if ((unsigned)nextSym >= MAX_SYMBOLS) |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 443 | return RETVAL_DATA_ERROR; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 444 | nextSym = hufGroup->permute[nextSym]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 445 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 446 | /* We have now decoded the symbol, which indicates either a new literal |
| 447 | byte, or a repeated run of the most recent literal byte. First, |
| 448 | check if nextSym indicates a repeated run, and if so loop collecting |
| 449 | how many times to repeat the last literal. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 450 | if ((unsigned)nextSym <= SYMBOL_RUNB) { /* RUNA or RUNB */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 451 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 452 | /* If this is the start of a new run, zero out counter */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 453 | if (runPos == 0) { |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 454 | runPos = 1; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 455 | runCnt = 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 456 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 457 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 458 | /* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at |
| 459 | each bit position, add 1 or 2 instead. For example, |
| 460 | 1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2. |
| 461 | You can make any bit pattern that way using 1 less symbol than |
| 462 | the basic or 0/1 method (except all bits 0, which would use no |
| 463 | symbols, but a run of length 0 doesn't mean anything in this |
| 464 | context). Thus space is saved. */ |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 465 | runCnt += (runPos << nextSym); /* +runPos if RUNA; +2*runPos if RUNB */ |
Denys Vlasenko | 0402cb3 | 2017-10-22 18:23:23 +0200 | [diff] [blame] | 466 | //The 32-bit overflow of runCnt wasn't yet seen, but probably can happen. |
| 467 | //This would be the fix (catches too large count way before it can overflow): |
| 468 | // if (runCnt > bd->dbufSize) { |
| 469 | // dbg("runCnt:%u > dbufSize:%u RETVAL_DATA_ERROR", |
| 470 | // runCnt, bd->dbufSize); |
| 471 | // return RETVAL_DATA_ERROR; |
| 472 | // } |
| 473 | if (runPos < bd->dbufSize) runPos <<= 1; |
Rob Landley | a8b98d6 | 2004-11-16 12:07:04 +0000 | [diff] [blame] | 474 | goto end_of_huffman_loop; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 475 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 476 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 477 | /* When we hit the first non-run symbol after a run, we now know |
| 478 | how many times to repeat the last literal, so append that many |
| 479 | copies to our buffer of decoded symbols (dbuf) now. (The last |
| 480 | literal used is the one at the head of the mtfSymbol array.) */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 481 | if (runPos != 0) { |
| 482 | uint8_t tmp_byte; |
Denys Vlasenko | 0402cb3 | 2017-10-22 18:23:23 +0200 | [diff] [blame] | 483 | if (dbufCount + runCnt > bd->dbufSize) { |
| 484 | dbg("dbufCount:%u+runCnt:%u %u > dbufSize:%u RETVAL_DATA_ERROR", |
| 485 | dbufCount, runCnt, dbufCount + runCnt, bd->dbufSize); |
Denys Vlasenko | 932e233 | 2013-10-06 22:53:14 +0200 | [diff] [blame] | 486 | return RETVAL_DATA_ERROR; |
| 487 | } |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 488 | tmp_byte = symToByte[mtfSymbol[0]]; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 489 | byteCount[tmp_byte] += runCnt; |
Denys Vlasenko | 0402cb3 | 2017-10-22 18:23:23 +0200 | [diff] [blame] | 490 | while ((int)--runCnt >= 0) |
| 491 | dbuf[dbufCount++] = (uint32_t)tmp_byte; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 492 | runPos = 0; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 493 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 494 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 495 | /* Is this the terminating symbol? */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 496 | if (nextSym > symTotal) break; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 497 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 498 | /* At this point, nextSym indicates a new literal character. Subtract |
| 499 | one to get the position in the MTF array at which this literal is |
| 500 | currently to be found. (Note that the result can't be -1 or 0, |
| 501 | because 0 and 1 are RUNA and RUNB. But another instance of the |
| 502 | first symbol in the mtf array, position 0, would have been handled |
| 503 | as part of a run above. Therefore 1 unused mtf position minus |
| 504 | 2 non-literal nextSym values equals -1.) */ |
Denys Vlasenko | 0402cb3 | 2017-10-22 18:23:23 +0200 | [diff] [blame] | 505 | if (dbufCount >= bd->dbufSize) return RETVAL_DATA_ERROR; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 506 | i = nextSym - 1; |
| 507 | uc = mtfSymbol[i]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 508 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 509 | /* Adjust the MTF array. Since we typically expect to move only a |
| 510 | * small number of symbols, and are bound by 256 in any case, using |
| 511 | * memmove here would typically be bigger and slower due to function |
| 512 | * call overhead and other assorted setup costs. */ |
Eric Andersen | 1acfb72 | 2003-10-18 01:59:46 +0000 | [diff] [blame] | 513 | do { |
| 514 | mtfSymbol[i] = mtfSymbol[i-1]; |
| 515 | } while (--i); |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 516 | mtfSymbol[0] = uc; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 517 | uc = symToByte[uc]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 518 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 519 | /* We have our literal byte. Save it into dbuf. */ |
| 520 | byteCount[uc]++; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 521 | dbuf[dbufCount++] = (uint32_t)uc; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 522 | |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 523 | /* Skip group initialization if we're not done with this group. Done |
| 524 | * this way to avoid compiler warning. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 525 | end_of_huffman_loop: |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 526 | if (--symCount >= 0) goto continue_this_group; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 527 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 528 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 529 | /* At this point, we've read all the Huffman-coded symbols (and repeated |
Denis Vlasenko | 246b5c3 | 2007-04-10 17:18:12 +0000 | [diff] [blame] | 530 | runs) for this block from the input stream, and decoded them into the |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 531 | intermediate buffer. There are dbufCount many decoded bytes in dbuf[]. |
| 532 | Now undo the Burrows-Wheeler transform on dbuf. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 533 | See http://dogma.net/markn/articles/bwt/bwt.htm |
| 534 | */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 535 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 536 | /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 537 | j = 0; |
| 538 | for (i = 0; i < 256; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 539 | int tmp_count = j + byteCount[i]; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 540 | byteCount[i] = j; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 541 | j = tmp_count; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 542 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 543 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 544 | /* Figure out what order dbuf would be in if we sorted it. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 545 | for (i = 0; i < dbufCount; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 546 | uint8_t tmp_byte = (uint8_t)dbuf[i]; |
| 547 | int tmp_count = byteCount[tmp_byte]; |
| 548 | dbuf[tmp_count] |= (i << 8); |
| 549 | byteCount[tmp_byte] = tmp_count + 1; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 550 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 551 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 552 | /* Decode first byte by hand to initialize "previous" byte. Note that it |
| 553 | doesn't get output, and if the first three characters are identical |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 554 | it doesn't qualify as a run (hence writeRunCountdown=5). */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 555 | if (dbufCount) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 556 | uint32_t tmp; |
Denis Vlasenko | 6b06cb8 | 2008-05-15 21:30:45 +0000 | [diff] [blame] | 557 | if ((int)origPtr >= dbufCount) return RETVAL_DATA_ERROR; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 558 | tmp = dbuf[origPtr]; |
| 559 | bd->writeCurrent = (uint8_t)tmp; |
| 560 | bd->writePos = (tmp >> 8); |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 561 | bd->writeRunCountdown = 5; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 562 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 563 | bd->writeCount = dbufCount; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 564 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 565 | return RETVAL_OK; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 566 | } |
| 567 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 568 | /* Undo Burrows-Wheeler transform on intermediate buffer to produce output. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 569 | If start_bunzip was initialized with out_fd=-1, then up to len bytes of |
| 570 | data are written to outbuf. Return value is number of bytes written or |
| 571 | error (all errors are negative numbers). If out_fd!=-1, outbuf and len |
| 572 | are ignored, data is written to out_fd and return is RETVAL_OK or error. |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 573 | |
| 574 | NB: read_bunzip returns < 0 on error, or the number of *unfilled* bytes |
| 575 | in outbuf. IOW: on EOF returns len ("all bytes are not filled"), not 0. |
| 576 | (Why? This allows to get rid of one local variable) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 577 | */ |
Denys Vlasenko | 97c2a6d | 2018-11-02 14:20:54 +0100 | [diff] [blame] | 578 | static int read_bunzip(bunzip_data *bd, char *outbuf, int len) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 579 | { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 580 | const uint32_t *dbuf; |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 581 | int pos, current, previous; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 582 | uint32_t CRC; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 583 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 584 | /* If we already have error/end indicator, return it */ |
| 585 | if (bd->writeCount < 0) |
| 586 | return bd->writeCount; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 587 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 588 | dbuf = bd->dbuf; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 589 | |
| 590 | /* Register-cached state (hopefully): */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 591 | pos = bd->writePos; |
| 592 | current = bd->writeCurrent; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 593 | CRC = bd->writeCRC; /* small loss on x86-32 (not enough regs), win on x86-64 */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 594 | |
| 595 | /* We will always have pending decoded data to write into the output |
| 596 | buffer unless this is the very first call (in which case we haven't |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 597 | Huffman-decoded a block into the intermediate buffer yet). */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 598 | if (bd->writeCopies) { |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 599 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 600 | dec_writeCopies: |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 601 | /* Inside the loop, writeCopies means extra copies (beyond 1) */ |
| 602 | --bd->writeCopies; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 603 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 604 | /* Loop outputting bytes */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 605 | for (;;) { |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 606 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 607 | /* If the output buffer is full, save cached state and return */ |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 608 | if (--len < 0) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 609 | /* Unlikely branch. |
| 610 | * Use of "goto" instead of keeping code here |
| 611 | * helps compiler to realize this. */ |
| 612 | goto outbuf_full; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 613 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 614 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 615 | /* Write next byte into output buffer, updating CRC */ |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 616 | *outbuf++ = current; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 617 | CRC = (CRC << 8) ^ bd->crc32Table[(CRC >> 24) ^ current]; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 618 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 619 | /* Loop now if we're outputting multiple copies of this byte */ |
| 620 | if (bd->writeCopies) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 621 | /* Unlikely branch */ |
| 622 | /*--bd->writeCopies;*/ |
| 623 | /*continue;*/ |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 624 | /* Same, but (ab)using other existing --writeCopies operation |
| 625 | * (and this if() compiles into just test+branch pair): */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 626 | goto dec_writeCopies; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 627 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 628 | decode_next_byte: |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 629 | if (--bd->writeCount < 0) |
| 630 | break; /* input block is fully consumed, need next one */ |
| 631 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 632 | /* Follow sequence vector to undo Burrows-Wheeler transform */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 633 | previous = current; |
| 634 | pos = dbuf[pos]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 635 | current = (uint8_t)pos; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 636 | pos >>= 8; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 637 | |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 638 | /* After 3 consecutive copies of the same byte, the 4th |
| 639 | * is a repeat count. We count down from 4 instead |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 640 | * of counting up because testing for non-zero is faster */ |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 641 | if (--bd->writeRunCountdown != 0) { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 642 | if (current != previous) |
| 643 | bd->writeRunCountdown = 4; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 644 | } else { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 645 | /* Unlikely branch */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 646 | /* We have a repeated run, this byte indicates the count */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 647 | bd->writeCopies = current; |
| 648 | current = previous; |
| 649 | bd->writeRunCountdown = 5; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 650 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 651 | /* Sometimes there are just 3 bytes (run length 0) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 652 | if (!bd->writeCopies) goto decode_next_byte; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 653 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 654 | /* Subtract the 1 copy we'd output anyway to get extras */ |
| 655 | --bd->writeCopies; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 656 | } |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 657 | } /* for(;;) */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 658 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 659 | /* Decompression of this input block completed successfully */ |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 660 | bd->writeCRC = CRC = ~CRC; |
| 661 | bd->totalCRC = ((bd->totalCRC << 1) | (bd->totalCRC >> 31)) ^ CRC; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 662 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 663 | /* If this block had a CRC error, force file level CRC error */ |
| 664 | if (CRC != bd->headerCRC) { |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 665 | bd->totalCRC = bd->headerCRC + 1; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 666 | return RETVAL_LAST_BLOCK; |
| 667 | } |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 668 | } |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 669 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 670 | /* Refill the intermediate buffer by Huffman-decoding next block of input */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 671 | { |
| 672 | int r = get_next_block(bd); |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 673 | if (r) { /* error/end */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 674 | bd->writeCount = r; |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 675 | return (r != RETVAL_LAST_BLOCK) ? r : len; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 676 | } |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 677 | } |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 678 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 679 | CRC = ~0; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 680 | pos = bd->writePos; |
| 681 | current = bd->writeCurrent; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 682 | goto decode_next_byte; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 683 | |
| 684 | outbuf_full: |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 685 | /* Output buffer is full, save cached state and return */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 686 | bd->writePos = pos; |
| 687 | bd->writeCurrent = current; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 688 | bd->writeCRC = CRC; |
| 689 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 690 | bd->writeCopies++; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 691 | |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 692 | return 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 693 | } |
| 694 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 695 | /* Allocate the structure, read file header. If in_fd==-1, inbuf must contain |
| 696 | a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are |
| 697 | ignored, and data is read from file handle into temporary buffer. */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 698 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 699 | /* Because bunzip2 is used for help text unpacking, and because bb_show_usage() |
| 700 | should work for NOFORK applets too, we must be extremely careful to not leak |
| 701 | any allocations! */ |
Ron Yorston | c339c7f | 2018-11-02 14:14:31 +0100 | [diff] [blame] | 702 | static int FAST_FUNC start_bunzip( |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 703 | void *jmpbuf, |
| 704 | bunzip_data **bdp, |
| 705 | int in_fd, |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 706 | const void *inbuf, int len) |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 707 | { |
| 708 | bunzip_data *bd; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 709 | unsigned i; |
| 710 | enum { |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 711 | BZh0 = ('B' << 24) + ('Z' << 16) + ('h' << 8) + '0', |
| 712 | h0 = ('h' << 8) + '0', |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 713 | }; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 714 | |
| 715 | /* Figure out how much data to allocate */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 716 | i = sizeof(bunzip_data); |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 717 | if (in_fd != -1) |
| 718 | i += IOBUF_SIZE; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 719 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 720 | /* Allocate bunzip_data. Most fields initialize to zero. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 721 | bd = *bdp = xzalloc(i); |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 722 | |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 723 | bd->jmpbuf = jmpbuf; |
| 724 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 725 | /* Setup input buffer */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 726 | bd->in_fd = in_fd; |
| 727 | if (-1 == in_fd) { |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 728 | /* in this case, bd->inbuf is read-only */ |
| 729 | bd->inbuf = (void*)inbuf; /* cast away const-ness */ |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 730 | } else { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 731 | bd->inbuf = (uint8_t*)(bd + 1); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 732 | memcpy(bd->inbuf, inbuf, len); |
| 733 | } |
| 734 | bd->inbufCount = len; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 735 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 736 | /* Init the CRC32 table (big endian) */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 737 | crc32_filltable(bd->crc32Table, 1); |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 738 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 739 | /* Ensure that file starts with "BZh['1'-'9']." */ |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 740 | /* Update: now caller verifies 1st two bytes, makes .gz/.bz2 |
| 741 | * integration easier */ |
| 742 | /* was: */ |
| 743 | /* i = get_bits(bd, 32); */ |
| 744 | /* if ((unsigned)(i - BZh0 - 1) >= 9) return RETVAL_NOT_BZIP_DATA; */ |
| 745 | i = get_bits(bd, 16); |
| 746 | if ((unsigned)(i - h0 - 1) >= 9) return RETVAL_NOT_BZIP_DATA; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 747 | |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 748 | /* Fourth byte (ascii '1'-'9') indicates block size in units of 100k of |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 749 | uncompressed data. Allocate intermediate buffer for block. */ |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 750 | /* bd->dbufSize = 100000 * (i - BZh0); */ |
| 751 | bd->dbufSize = 100000 * (i - h0); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 752 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 753 | /* Cannot use xmalloc - may leak bd in NOFORK case! */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 754 | bd->dbuf = malloc_or_warn(bd->dbufSize * sizeof(bd->dbuf[0])); |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 755 | if (!bd->dbuf) { |
| 756 | free(bd); |
| 757 | xfunc_die(); |
| 758 | } |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 759 | return RETVAL_OK; |
| 760 | } |
| 761 | |
Ron Yorston | c339c7f | 2018-11-02 14:14:31 +0100 | [diff] [blame] | 762 | static void FAST_FUNC dealloc_bunzip(bunzip_data *bd) |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 763 | { |
Denis Vlasenko | 4b924f3 | 2007-05-30 00:29:55 +0000 | [diff] [blame] | 764 | free(bd->dbuf); |
| 765 | free(bd); |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 766 | } |
| 767 | |
| 768 | |
| 769 | /* Decompress src_fd to dst_fd. Stops at end of bzip data, not end of file. */ |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 770 | IF_DESKTOP(long long) int FAST_FUNC |
Denys Vlasenko | b4c11c1 | 2014-12-07 00:44:00 +0100 | [diff] [blame] | 771 | unpack_bz2_stream(transformer_state_t *xstate) |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 772 | { |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 773 | IF_DESKTOP(long long total_written = 0;) |
Denys Vlasenko | 4d4d1a0 | 2010-11-01 02:19:47 +0100 | [diff] [blame] | 774 | bunzip_data *bd; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 775 | char *outbuf; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 776 | int i; |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 777 | unsigned len; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 778 | |
Denys Vlasenko | b4c11c1 | 2014-12-07 00:44:00 +0100 | [diff] [blame] | 779 | if (check_signature16(xstate, BZIP2_MAGIC)) |
Denys Vlasenko | 8a6a2f9 | 2012-03-06 16:27:48 +0100 | [diff] [blame] | 780 | return -1; |
| 781 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 782 | outbuf = xmalloc(IOBUF_SIZE); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 783 | len = 0; |
| 784 | while (1) { /* "Process one BZ... stream" loop */ |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 785 | jmp_buf jmpbuf; |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 786 | |
Denys Vlasenko | 38ccd6a | 2018-04-08 20:02:01 +0200 | [diff] [blame] | 787 | /* Setup for I/O error handling via longjmp */ |
| 788 | i = setjmp(jmpbuf); |
| 789 | if (i == 0) |
| 790 | i = start_bunzip(&jmpbuf, &bd, xstate->src_fd, outbuf + 2, len); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 791 | |
| 792 | if (i == 0) { |
| 793 | while (1) { /* "Produce some output bytes" loop */ |
| 794 | i = read_bunzip(bd, outbuf, IOBUF_SIZE); |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 795 | if (i < 0) /* error? */ |
| 796 | break; |
| 797 | i = IOBUF_SIZE - i; /* number of bytes produced */ |
| 798 | if (i == 0) /* EOF? */ |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 799 | break; |
Denys Vlasenko | b4c11c1 | 2014-12-07 00:44:00 +0100 | [diff] [blame] | 800 | if (i != transformer_write(xstate, outbuf, i)) { |
Denys Vlasenko | 8531c43 | 2010-11-01 01:38:54 +0100 | [diff] [blame] | 801 | i = RETVAL_SHORT_WRITE; |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 802 | goto release_mem; |
| 803 | } |
| 804 | IF_DESKTOP(total_written += i;) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 805 | } |
| 806 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 807 | |
Denys Vlasenko | c531b9a | 2011-10-31 01:05:16 +0100 | [diff] [blame] | 808 | if (i != RETVAL_LAST_BLOCK |
| 809 | /* Observed case when i == RETVAL_OK: |
| 810 | * "bzcat z.bz2", where "z.bz2" is a bzipped zero-length file |
| 811 | * (to be exact, z.bz2 is exactly these 14 bytes: |
Denys Vlasenko | 97c2a6d | 2018-11-02 14:20:54 +0100 | [diff] [blame] | 812 | * 42 5a 68 39 17 72 45 38 50 90 00 00 00 00). |
Denys Vlasenko | c531b9a | 2011-10-31 01:05:16 +0100 | [diff] [blame] | 813 | */ |
| 814 | && i != RETVAL_OK |
| 815 | ) { |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 816 | bb_error_msg("bunzip error %d", i); |
| 817 | break; |
| 818 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 819 | if (bd->headerCRC != bd->totalCRC) { |
James Byrne | 6937487 | 2019-07-02 11:35:03 +0200 | [diff] [blame] | 820 | bb_simple_error_msg("CRC error"); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 821 | break; |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 822 | } |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 823 | |
| 824 | /* Successfully unpacked one BZ stream */ |
| 825 | i = RETVAL_OK; |
| 826 | |
| 827 | /* Do we have "BZ..." after last processed byte? |
| 828 | * pbzip2 (parallelized bzip2) produces such files. |
| 829 | */ |
| 830 | len = bd->inbufCount - bd->inbufPos; |
| 831 | memcpy(outbuf, &bd->inbuf[bd->inbufPos], len); |
| 832 | if (len < 2) { |
Denys Vlasenko | b4c11c1 | 2014-12-07 00:44:00 +0100 | [diff] [blame] | 833 | if (safe_read(xstate->src_fd, outbuf + len, 2 - len) != 2 - len) |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 834 | break; |
| 835 | len = 2; |
| 836 | } |
| 837 | if (*(uint16_t*)outbuf != BZIP2_MAGIC) /* "BZ"? */ |
| 838 | break; |
| 839 | dealloc_bunzip(bd); |
| 840 | len -= 2; |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 841 | } |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 842 | |
| 843 | release_mem: |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 844 | dealloc_bunzip(bd); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 845 | free(outbuf); |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 846 | |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 847 | return i ? i : IF_DESKTOP(total_written) + 0; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 848 | } |
| 849 | |
Ron Yorston | c339c7f | 2018-11-02 14:14:31 +0100 | [diff] [blame] | 850 | char* FAST_FUNC |
| 851 | unpack_bz2_data(const char *packed, int packed_len, int unpacked_len) |
| 852 | { |
| 853 | char *outbuf = NULL; |
| 854 | bunzip_data *bd; |
| 855 | int i; |
| 856 | jmp_buf jmpbuf; |
| 857 | |
| 858 | /* Setup for I/O error handling via longjmp */ |
| 859 | i = setjmp(jmpbuf); |
| 860 | if (i == 0) { |
| 861 | i = start_bunzip(&jmpbuf, |
| 862 | &bd, |
| 863 | /* src_fd: */ -1, |
| 864 | /* inbuf: */ packed, |
| 865 | /* len: */ packed_len |
| 866 | ); |
| 867 | } |
| 868 | /* read_bunzip can longjmp and end up here with i != 0 |
| 869 | * on read data errors! Not trivial */ |
| 870 | if (i == 0) { |
| 871 | /* Cannot use xmalloc: will leak bd in NOFORK case! */ |
| 872 | outbuf = malloc_or_warn(unpacked_len); |
| 873 | if (outbuf) |
| 874 | read_bunzip(bd, outbuf, unpacked_len); |
| 875 | } |
| 876 | dealloc_bunzip(bd); |
| 877 | return outbuf; |
| 878 | } |
| 879 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 880 | #ifdef TESTING |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 881 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 882 | static char *const bunzip_errors[] = { |
| 883 | NULL, "Bad file checksum", "Not bzip data", |
| 884 | "Unexpected input EOF", "Unexpected output EOF", "Data error", |
| 885 | "Out of memory", "Obsolete (pre 0.9.5) bzip format not supported" |
| 886 | }; |
Glenn L McGrath | 237ae42 | 2002-11-03 14:05:15 +0000 | [diff] [blame] | 887 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 888 | /* Dumb little test thing, decompress stdin to stdout */ |
Bernhard Reutner-Fischer | febe3c4 | 2007-04-04 20:52:03 +0000 | [diff] [blame] | 889 | int main(int argc, char **argv) |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 890 | { |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 891 | char c; |
| 892 | |
Denys Vlasenko | 8a6a2f9 | 2012-03-06 16:27:48 +0100 | [diff] [blame] | 893 | int i = unpack_bz2_stream(0, 1); |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 894 | if (i < 0) |
Denis Vlasenko | f5d8c90 | 2008-06-26 14:32:57 +0000 | [diff] [blame] | 895 | fprintf(stderr, "%s\n", bunzip_errors[-i]); |
Bernhard Reutner-Fischer | 5e25ddb | 2008-05-19 09:48:17 +0000 | [diff] [blame] | 896 | else if (read(STDIN_FILENO, &c, 1)) |
Denis Vlasenko | f5d8c90 | 2008-06-26 14:32:57 +0000 | [diff] [blame] | 897 | fprintf(stderr, "Trailing garbage ignored\n"); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 898 | return -i; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 899 | } |
| 900 | #endif |