Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 1 | /* vi: set sw=4 ts=4: */ |
Rob Landley | e66c7ef | 2006-04-14 19:25:01 +0000 | [diff] [blame] | 2 | /* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). |
Denys Vlasenko | ebe6d9d | 2017-10-05 14:40:24 +0200 | [diff] [blame^] | 3 | * |
| 4 | * Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), |
| 5 | * which also acknowledges contributions by Mike Burrows, David Wheeler, |
| 6 | * Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, |
| 7 | * Robert Sedgewick, and Jon L. Bentley. |
| 8 | * |
| 9 | * Licensed under GPLv2 or later, see file LICENSE in this source tree. |
| 10 | */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 11 | /* |
| 12 | Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). |
| 13 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 14 | More efficient reading of Huffman codes, a streamlined read_bunzip() |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 15 | function, and various other tweaks. In (limited) tests, approximately |
| 16 | 20% faster than bzcat on x86 and about 10% faster on arm. |
| 17 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 18 | Note that about 2/3 of the time is spent in read_bunzip() reversing |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 19 | the Burrows-Wheeler transformation. Much of that time is delay |
| 20 | resulting from cache misses. |
| 21 | |
Denys Vlasenko | 5d49b72 | 2010-10-29 19:26:38 +0200 | [diff] [blame] | 22 | (2010 update by vda: profiled "bzcat <84mbyte.bz2 >/dev/null" |
| 23 | on x86-64 CPU with L2 > 1M: get_next_block is hotter than read_bunzip: |
| 24 | %time seconds calls function |
| 25 | 71.01 12.69 444 get_next_block |
| 26 | 28.65 5.12 93065 read_bunzip |
| 27 | 00.22 0.04 7736490 get_bits |
| 28 | 00.11 0.02 47 dealloc_bunzip |
| 29 | 00.00 0.00 93018 full_write |
| 30 | ...) |
| 31 | |
| 32 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 33 | I would ask that anyone benefiting from this work, especially those |
| 34 | using it in commercial products, consider making a donation to my local |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 35 | non-profit hospice organization (www.hospiceacadiana.com) in the name of |
Bernhard Reutner-Fischer | cfb53df | 2006-04-02 21:50:01 +0000 | [diff] [blame] | 36 | the woman I loved, Toni W. Hagan, who passed away Feb. 12, 2003. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 37 | |
| 38 | Manuel |
| 39 | */ |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 40 | #include "libbb.h" |
Denys Vlasenko | d184a72 | 2011-09-22 12:45:14 +0200 | [diff] [blame] | 41 | #include "bb_archive.h" |
Bernhard Reutner-Fischer | cfb53df | 2006-04-02 21:50:01 +0000 | [diff] [blame] | 42 | |
Denys Vlasenko | 932e233 | 2013-10-06 22:53:14 +0200 | [diff] [blame] | 43 | #if 0 |
| 44 | # define dbg(...) bb_error_msg(__VA_ARGS__) |
| 45 | #else |
| 46 | # define dbg(...) ((void)0) |
| 47 | #endif |
| 48 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 49 | /* Constants for Huffman coding */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 50 | #define MAX_GROUPS 6 |
| 51 | #define GROUP_SIZE 50 /* 64 would have been more efficient */ |
| 52 | #define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */ |
| 53 | #define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ |
| 54 | #define SYMBOL_RUNA 0 |
| 55 | #define SYMBOL_RUNB 1 |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 56 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 57 | /* Status return values */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 58 | #define RETVAL_OK 0 |
Denys Vlasenko | 932e233 | 2013-10-06 22:53:14 +0200 | [diff] [blame] | 59 | #define RETVAL_LAST_BLOCK (dbg("%d", __LINE__), -1) |
| 60 | #define RETVAL_NOT_BZIP_DATA (dbg("%d", __LINE__), -2) |
| 61 | #define RETVAL_UNEXPECTED_INPUT_EOF (dbg("%d", __LINE__), -3) |
| 62 | #define RETVAL_SHORT_WRITE (dbg("%d", __LINE__), -4) |
| 63 | #define RETVAL_DATA_ERROR (dbg("%d", __LINE__), -5) |
| 64 | #define RETVAL_OUT_OF_MEMORY (dbg("%d", __LINE__), -6) |
| 65 | #define RETVAL_OBSOLETE_INPUT (dbg("%d", __LINE__), -7) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 66 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 67 | /* Other housekeeping constants */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 68 | #define IOBUF_SIZE 4096 |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 69 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 70 | /* This is what we know about each Huffman coding group */ |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 71 | struct group_data { |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 72 | /* We have an extra slot at the end of limit[] for a sentinel value. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 73 | int limit[MAX_HUFCODE_BITS+1], base[MAX_HUFCODE_BITS], permute[MAX_SYMBOLS]; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 74 | int minLen, maxLen; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 75 | }; |
| 76 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 77 | /* Structure holding all the housekeeping data, including IO buffers and |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 78 | * memory that persists between calls to bunzip |
| 79 | * Found the most used member: |
| 80 | * cat this_file.c | sed -e 's/"/ /g' -e "s/'/ /g" | xargs -n1 \ |
| 81 | * | grep 'bd->' | sed 's/^.*bd->/bd->/' | sort | $PAGER |
| 82 | * and moved it (inbufBitCount) to offset 0. |
| 83 | */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 84 | struct bunzip_data { |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 85 | /* I/O tracking data (file handles, buffers, positions, etc.) */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 86 | unsigned inbufBitCount, inbufBits; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 87 | int in_fd, out_fd, inbufCount, inbufPos /*, outbufPos*/; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 88 | uint8_t *inbuf /*,*outbuf*/; |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 89 | |
| 90 | /* State for interrupting output loop */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 91 | int writeCopies, writePos, writeRunCountdown, writeCount; |
| 92 | int writeCurrent; /* actually a uint8_t */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 93 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 94 | /* The CRC values stored in the block header and calculated from the data */ |
Rob Landley | c57ec37 | 2006-04-10 17:07:15 +0000 | [diff] [blame] | 95 | uint32_t headerCRC, totalCRC, writeCRC; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 96 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 97 | /* Intermediate buffer and its size (in bytes) */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 98 | uint32_t *dbuf; |
| 99 | unsigned dbufSize; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 100 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 101 | /* For I/O error handling */ |
| 102 | jmp_buf jmpbuf; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 103 | |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 104 | /* Big things go last (register-relative addressing can be larger for big offsets) */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 105 | uint32_t crc32Table[256]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 106 | uint8_t selectors[32768]; /* nSelectors=15 bits */ |
Denys Vlasenko | fb132e4 | 2010-10-29 11:46:52 +0200 | [diff] [blame] | 107 | struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 108 | }; |
| 109 | /* typedef struct bunzip_data bunzip_data; -- done in .h file */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 110 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 111 | |
| 112 | /* Return the next nnn bits of input. All reads from the compressed input |
| 113 | are done through this function. All reads are big endian */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 114 | static unsigned get_bits(bunzip_data *bd, int bits_wanted) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 115 | { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 116 | unsigned bits = 0; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 117 | /* Cache bd->inbufBitCount in a CPU register (hopefully): */ |
| 118 | int bit_count = bd->inbufBitCount; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 119 | |
| 120 | /* If we need to get more data from the byte buffer, do so. (Loop getting |
| 121 | one byte at a time to enforce endianness and avoid unaligned access.) */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 122 | while (bit_count < bits_wanted) { |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 123 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 124 | /* If we need to read more data from file into byte buffer, do so */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 125 | if (bd->inbufPos == bd->inbufCount) { |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 126 | /* if "no input fd" case: in_fd == -1, read fails, we jump */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 127 | bd->inbufCount = read(bd->in_fd, bd->inbuf, IOBUF_SIZE); |
| 128 | if (bd->inbufCount <= 0) |
| 129 | longjmp(bd->jmpbuf, RETVAL_UNEXPECTED_INPUT_EOF); |
| 130 | bd->inbufPos = 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 131 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 132 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 133 | /* Avoid 32-bit overflow (dump bit buffer to top of output) */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 134 | if (bit_count >= 24) { |
Rostislav Skudnov | 8762512 | 2017-02-01 18:35:13 +0000 | [diff] [blame] | 135 | bits = bd->inbufBits & ((1U << bit_count) - 1); |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 136 | bits_wanted -= bit_count; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 137 | bits <<= bits_wanted; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 138 | bit_count = 0; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 139 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 140 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 141 | /* Grab next 8 bits of input from buffer. */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 142 | bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 143 | bit_count += 8; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 144 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 145 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 146 | /* Calculate result */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 147 | bit_count -= bits_wanted; |
| 148 | bd->inbufBitCount = bit_count; |
| 149 | bits |= (bd->inbufBits >> bit_count) & ((1 << bits_wanted) - 1); |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 150 | |
| 151 | return bits; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 152 | } |
| 153 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 154 | /* Unpacks the next block and sets up for the inverse Burrows-Wheeler step. */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 155 | static int get_next_block(bunzip_data *bd) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 156 | { |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 157 | struct group_data *hufGroup; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 158 | int dbufCount, dbufSize, groupCount, *base, *limit, selector, |
Rostislav Skudnov | 8762512 | 2017-02-01 18:35:13 +0000 | [diff] [blame] | 159 | i, j, runPos, symCount, symTotal, nSelectors, byteCount[256]; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 160 | int runCnt = runCnt; /* for compiler */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 161 | uint8_t uc, symToByte[256], mtfSymbol[256], *selectors; |
| 162 | uint32_t *dbuf; |
Rostislav Skudnov | 8762512 | 2017-02-01 18:35:13 +0000 | [diff] [blame] | 163 | unsigned origPtr, t; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 164 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 165 | dbuf = bd->dbuf; |
| 166 | dbufSize = bd->dbufSize; |
| 167 | selectors = bd->selectors; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 168 | |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 169 | /* In bbox, we are ok with aborting through setjmp which is set up in start_bunzip */ |
| 170 | #if 0 |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 171 | /* Reset longjmp I/O error handling */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 172 | i = setjmp(bd->jmpbuf); |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 173 | if (i) return i; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 174 | #endif |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 175 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 176 | /* Read in header signature and CRC, then validate signature. |
| 177 | (last block signature means CRC is for whole file, return now) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 178 | i = get_bits(bd, 24); |
| 179 | j = get_bits(bd, 24); |
| 180 | bd->headerCRC = get_bits(bd, 32); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 181 | if ((i == 0x177245) && (j == 0x385090)) return RETVAL_LAST_BLOCK; |
| 182 | if ((i != 0x314159) || (j != 0x265359)) return RETVAL_NOT_BZIP_DATA; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 183 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 184 | /* We can add support for blockRandomised if anybody complains. There was |
| 185 | some code for this in busybox 1.0.0-pre3, but nobody ever noticed that |
| 186 | it didn't actually work. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 187 | if (get_bits(bd, 1)) return RETVAL_OBSOLETE_INPUT; |
| 188 | origPtr = get_bits(bd, 24); |
Denis Vlasenko | 6b06cb8 | 2008-05-15 21:30:45 +0000 | [diff] [blame] | 189 | if ((int)origPtr > dbufSize) return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 190 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 191 | /* mapping table: if some byte values are never used (encoding things |
| 192 | like ascii text), the compression code removes the gaps to have fewer |
| 193 | symbols to deal with, and writes a sparse bitfield indicating which |
| 194 | values were present. We make a translation table to convert the symbols |
| 195 | back to the corresponding bytes. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 196 | symTotal = 0; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 197 | i = 0; |
| 198 | t = get_bits(bd, 16); |
| 199 | do { |
| 200 | if (t & (1 << 15)) { |
| 201 | unsigned inner_map = get_bits(bd, 16); |
| 202 | do { |
| 203 | if (inner_map & (1 << 15)) |
| 204 | symToByte[symTotal++] = i; |
| 205 | inner_map <<= 1; |
| 206 | i++; |
| 207 | } while (i & 15); |
| 208 | i -= 16; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 209 | } |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 210 | t <<= 1; |
| 211 | i += 16; |
| 212 | } while (i < 256); |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 213 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 214 | /* How many different Huffman coding groups does this block use? */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 215 | groupCount = get_bits(bd, 3); |
| 216 | if (groupCount < 2 || groupCount > MAX_GROUPS) |
| 217 | return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 218 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 219 | /* nSelectors: Every GROUP_SIZE many symbols we select a new Huffman coding |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 220 | group. Read in the group selector list, which is stored as MTF encoded |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 221 | bit runs. (MTF=Move To Front, as each value is used it's moved to the |
| 222 | start of the list.) */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 223 | for (i = 0; i < groupCount; i++) |
| 224 | mtfSymbol[i] = i; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 225 | nSelectors = get_bits(bd, 15); |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 226 | if (!nSelectors) |
| 227 | return RETVAL_DATA_ERROR; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 228 | for (i = 0; i < nSelectors; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 229 | uint8_t tmp_byte; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 230 | /* Get next value */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 231 | int n = 0; |
| 232 | while (get_bits(bd, 1)) { |
| 233 | if (n >= groupCount) return RETVAL_DATA_ERROR; |
| 234 | n++; |
| 235 | } |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 236 | /* Decode MTF to get the next selector */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 237 | tmp_byte = mtfSymbol[n]; |
| 238 | while (--n >= 0) |
| 239 | mtfSymbol[n + 1] = mtfSymbol[n]; |
| 240 | mtfSymbol[0] = selectors[i] = tmp_byte; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 241 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 242 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 243 | /* Read the Huffman coding tables for each group, which code for symTotal |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 244 | literal symbols, plus two run symbols (RUNA, RUNB) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 245 | symCount = symTotal + 2; |
| 246 | for (j = 0; j < groupCount; j++) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 247 | uint8_t length[MAX_SYMBOLS]; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 248 | /* 8 bits is ALMOST enough for temp[], see below */ |
| 249 | unsigned temp[MAX_HUFCODE_BITS+1]; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 250 | int minLen, maxLen, pp, len_m1; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 251 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 252 | /* Read Huffman code lengths for each symbol. They're stored in |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 253 | a way similar to mtf; record a starting value for the first symbol, |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 254 | and an offset from the previous value for every symbol after that. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 255 | (Subtracting 1 before the loop and then adding it back at the end is |
| 256 | an optimization that makes the test inside the loop simpler: symbol |
| 257 | length 0 becomes negative, so an unsigned inequality catches it.) */ |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 258 | len_m1 = get_bits(bd, 5) - 1; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 259 | for (i = 0; i < symCount; i++) { |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 260 | for (;;) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 261 | int two_bits; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 262 | if ((unsigned)len_m1 > (MAX_HUFCODE_BITS-1)) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 263 | return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 264 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 265 | /* If first bit is 0, stop. Else second bit indicates whether |
| 266 | to increment or decrement the value. Optimization: grab 2 |
| 267 | bits and unget the second if the first was 0. */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 268 | two_bits = get_bits(bd, 2); |
| 269 | if (two_bits < 2) { |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 270 | bd->inbufBitCount++; |
| 271 | break; |
| 272 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 273 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 274 | /* Add one if second bit 1, else subtract 1. Avoids if/else */ |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 275 | len_m1 += (((two_bits+1) & 2) - 1); |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 276 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 277 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 278 | /* Correct for the initial -1, to get the final symbol length */ |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 279 | length[i] = len_m1 + 1; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 280 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 281 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 282 | /* Find largest and smallest lengths in this group */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 283 | minLen = maxLen = length[0]; |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 284 | for (i = 1; i < symCount; i++) { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 285 | if (length[i] > maxLen) maxLen = length[i]; |
| 286 | else if (length[i] < minLen) minLen = length[i]; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 287 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 288 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 289 | /* Calculate permute[], base[], and limit[] tables from length[]. |
| 290 | * |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 291 | * permute[] is the lookup table for converting Huffman coded symbols |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 292 | * into decoded symbols. base[] is the amount to subtract from the |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 293 | * value of a Huffman symbol of a given length when using permute[]. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 294 | * |
| 295 | * limit[] indicates the largest numerical value a symbol with a given |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 296 | * number of bits can have. This is how the Huffman codes can vary in |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 297 | * length: each code with a value>limit[length] needs another bit. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 298 | */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 299 | hufGroup = bd->groups + j; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 300 | hufGroup->minLen = minLen; |
| 301 | hufGroup->maxLen = maxLen; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 302 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 303 | /* Note that minLen can't be smaller than 1, so we adjust the base |
| 304 | and limit array pointers so we're not always wasting the first |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 305 | entry. We do this again when using them (during symbol decoding). */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 306 | base = hufGroup->base - 1; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 307 | limit = hufGroup->limit - 1; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 308 | |
Denys Vlasenko | 10ad622 | 2017-04-17 16:13:32 +0200 | [diff] [blame] | 309 | /* Calculate permute[]. Concurrently, initialize temp[] and limit[]. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 310 | pp = 0; |
| 311 | for (i = minLen; i <= maxLen; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 312 | int k; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 313 | temp[i] = limit[i] = 0; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 314 | for (k = 0; k < symCount; k++) |
| 315 | if (length[k] == i) |
| 316 | hufGroup->permute[pp++] = k; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 317 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 318 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 319 | /* Count symbols coded for at each bit length */ |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 320 | /* NB: in pathological cases, temp[8] can end ip being 256. |
| 321 | * That's why uint8_t is too small for temp[]. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 322 | for (i = 0; i < symCount; i++) temp[length[i]]++; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 323 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 324 | /* Calculate limit[] (the largest symbol-coding value at each bit |
| 325 | * length, which is (previous limit<<1)+symbols at this level), and |
| 326 | * base[] (number of symbols to ignore at each bit length, which is |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 327 | * limit minus the cumulative count of symbols coded for already). */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 328 | pp = t = 0; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 329 | for (i = minLen; i < maxLen;) { |
| 330 | unsigned temp_i = temp[i]; |
| 331 | |
| 332 | pp += temp_i; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 333 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 334 | /* We read the largest possible symbol size and then unget bits |
| 335 | after determining how many we need, and those extra bits could |
| 336 | be set to anything. (They're noise from future symbols.) At |
| 337 | each level we're really only interested in the first few bits, |
| 338 | so here we set all the trailing to-be-ignored bits to 1 so they |
| 339 | don't affect the value>limit[length] comparison. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 340 | limit[i] = (pp << (maxLen - i)) - 1; |
| 341 | pp <<= 1; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 342 | t += temp_i; |
| 343 | base[++i] = pp - t; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 344 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 345 | limit[maxLen] = pp + temp[maxLen] - 1; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 346 | limit[maxLen+1] = INT_MAX; /* Sentinel value for reading next sym. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 347 | base[minLen] = 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 348 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 349 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 350 | /* We've finished reading and digesting the block header. Now read this |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 351 | block's Huffman coded symbols from the file and undo the Huffman coding |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 352 | and run length encoding, saving the result into dbuf[dbufCount++] = uc */ |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 353 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 354 | /* Initialize symbol occurrence counters and symbol Move To Front table */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 355 | /*memset(byteCount, 0, sizeof(byteCount)); - smaller, but slower */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 356 | for (i = 0; i < 256; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 357 | byteCount[i] = 0; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 358 | mtfSymbol[i] = (uint8_t)i; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 359 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 360 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 361 | /* Loop through compressed symbols. */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 362 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 363 | runPos = dbufCount = selector = 0; |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 364 | for (;;) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 365 | int nextSym; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 366 | |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 367 | /* Fetch next Huffman coding group from list. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 368 | symCount = GROUP_SIZE - 1; |
| 369 | if (selector >= nSelectors) return RETVAL_DATA_ERROR; |
| 370 | hufGroup = bd->groups + selectors[selector++]; |
| 371 | base = hufGroup->base - 1; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 372 | limit = hufGroup->limit - 1; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 373 | |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 374 | continue_this_group: |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 375 | /* Read next Huffman-coded symbol. */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 376 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 377 | /* Note: It is far cheaper to read maxLen bits and back up than it is |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 378 | to read minLen bits and then add additional bit at a time, testing |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 379 | as we go. Because there is a trailing last block (with file CRC), |
| 380 | there is no danger of the overread causing an unexpected EOF for a |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 381 | valid compressed file. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 382 | */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 383 | if (1) { |
| 384 | /* As a further optimization, we do the read inline |
| 385 | (falling back to a call to get_bits if the buffer runs dry). |
| 386 | */ |
| 387 | int new_cnt; |
| 388 | while ((new_cnt = bd->inbufBitCount - hufGroup->maxLen) < 0) { |
| 389 | /* bd->inbufBitCount < hufGroup->maxLen */ |
| 390 | if (bd->inbufPos == bd->inbufCount) { |
| 391 | nextSym = get_bits(bd, hufGroup->maxLen); |
| 392 | goto got_huff_bits; |
| 393 | } |
| 394 | bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; |
| 395 | bd->inbufBitCount += 8; |
| 396 | }; |
| 397 | bd->inbufBitCount = new_cnt; /* "bd->inbufBitCount -= hufGroup->maxLen;" */ |
| 398 | nextSym = (bd->inbufBits >> new_cnt) & ((1 << hufGroup->maxLen) - 1); |
| 399 | got_huff_bits: ; |
| 400 | } else { /* unoptimized equivalent */ |
| 401 | nextSym = get_bits(bd, hufGroup->maxLen); |
| 402 | } |
| 403 | /* Figure how many bits are in next symbol and unget extras */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 404 | i = hufGroup->minLen; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 405 | while (nextSym > limit[i]) ++i; |
| 406 | j = hufGroup->maxLen - i; |
| 407 | if (j < 0) |
| 408 | return RETVAL_DATA_ERROR; |
| 409 | bd->inbufBitCount += j; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 410 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 411 | /* Huffman decode value to get nextSym (with bounds checking) */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 412 | nextSym = (nextSym >> j) - base[i]; |
| 413 | if ((unsigned)nextSym >= MAX_SYMBOLS) |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 414 | return RETVAL_DATA_ERROR; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 415 | nextSym = hufGroup->permute[nextSym]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 416 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 417 | /* We have now decoded the symbol, which indicates either a new literal |
| 418 | byte, or a repeated run of the most recent literal byte. First, |
| 419 | check if nextSym indicates a repeated run, and if so loop collecting |
| 420 | how many times to repeat the last literal. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 421 | if ((unsigned)nextSym <= SYMBOL_RUNB) { /* RUNA or RUNB */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 422 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 423 | /* If this is the start of a new run, zero out counter */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 424 | if (runPos == 0) { |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 425 | runPos = 1; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 426 | runCnt = 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 427 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 428 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 429 | /* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at |
| 430 | each bit position, add 1 or 2 instead. For example, |
| 431 | 1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2. |
| 432 | You can make any bit pattern that way using 1 less symbol than |
| 433 | the basic or 0/1 method (except all bits 0, which would use no |
| 434 | symbols, but a run of length 0 doesn't mean anything in this |
| 435 | context). Thus space is saved. */ |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 436 | runCnt += (runPos << nextSym); /* +runPos if RUNA; +2*runPos if RUNB */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 437 | if (runPos < dbufSize) runPos <<= 1; |
Rob Landley | a8b98d6 | 2004-11-16 12:07:04 +0000 | [diff] [blame] | 438 | goto end_of_huffman_loop; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 439 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 440 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 441 | /* When we hit the first non-run symbol after a run, we now know |
| 442 | how many times to repeat the last literal, so append that many |
| 443 | copies to our buffer of decoded symbols (dbuf) now. (The last |
| 444 | literal used is the one at the head of the mtfSymbol array.) */ |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 445 | if (runPos != 0) { |
| 446 | uint8_t tmp_byte; |
Denys Vlasenko | 932e233 | 2013-10-06 22:53:14 +0200 | [diff] [blame] | 447 | if (dbufCount + runCnt > dbufSize) { |
| 448 | dbg("dbufCount:%d+runCnt:%d %d > dbufSize:%d RETVAL_DATA_ERROR", |
| 449 | dbufCount, runCnt, dbufCount + runCnt, dbufSize); |
| 450 | return RETVAL_DATA_ERROR; |
| 451 | } |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 452 | tmp_byte = symToByte[mtfSymbol[0]]; |
Denys Vlasenko | f16727e | 2010-10-30 00:55:02 +0200 | [diff] [blame] | 453 | byteCount[tmp_byte] += runCnt; |
| 454 | while (--runCnt >= 0) dbuf[dbufCount++] = (uint32_t)tmp_byte; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 455 | runPos = 0; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 456 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 457 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 458 | /* Is this the terminating symbol? */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 459 | if (nextSym > symTotal) break; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 460 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 461 | /* At this point, nextSym indicates a new literal character. Subtract |
| 462 | one to get the position in the MTF array at which this literal is |
| 463 | currently to be found. (Note that the result can't be -1 or 0, |
| 464 | because 0 and 1 are RUNA and RUNB. But another instance of the |
| 465 | first symbol in the mtf array, position 0, would have been handled |
| 466 | as part of a run above. Therefore 1 unused mtf position minus |
| 467 | 2 non-literal nextSym values equals -1.) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 468 | if (dbufCount >= dbufSize) return RETVAL_DATA_ERROR; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 469 | i = nextSym - 1; |
| 470 | uc = mtfSymbol[i]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 471 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 472 | /* Adjust the MTF array. Since we typically expect to move only a |
| 473 | * small number of symbols, and are bound by 256 in any case, using |
| 474 | * memmove here would typically be bigger and slower due to function |
| 475 | * call overhead and other assorted setup costs. */ |
Eric Andersen | 1acfb72 | 2003-10-18 01:59:46 +0000 | [diff] [blame] | 476 | do { |
| 477 | mtfSymbol[i] = mtfSymbol[i-1]; |
| 478 | } while (--i); |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 479 | mtfSymbol[0] = uc; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 480 | uc = symToByte[uc]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 481 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 482 | /* We have our literal byte. Save it into dbuf. */ |
| 483 | byteCount[uc]++; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 484 | dbuf[dbufCount++] = (uint32_t)uc; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 485 | |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 486 | /* Skip group initialization if we're not done with this group. Done |
| 487 | * this way to avoid compiler warning. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 488 | end_of_huffman_loop: |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 489 | if (--symCount >= 0) goto continue_this_group; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 490 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 491 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 492 | /* At this point, we've read all the Huffman-coded symbols (and repeated |
Denis Vlasenko | 246b5c3 | 2007-04-10 17:18:12 +0000 | [diff] [blame] | 493 | runs) for this block from the input stream, and decoded them into the |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 494 | intermediate buffer. There are dbufCount many decoded bytes in dbuf[]. |
| 495 | Now undo the Burrows-Wheeler transform on dbuf. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 496 | See http://dogma.net/markn/articles/bwt/bwt.htm |
| 497 | */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 498 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 499 | /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 500 | j = 0; |
| 501 | for (i = 0; i < 256; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 502 | int tmp_count = j + byteCount[i]; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 503 | byteCount[i] = j; |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 504 | j = tmp_count; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 505 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 506 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 507 | /* Figure out what order dbuf would be in if we sorted it. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 508 | for (i = 0; i < dbufCount; i++) { |
Denys Vlasenko | 0c57697 | 2010-10-30 00:54:10 +0200 | [diff] [blame] | 509 | uint8_t tmp_byte = (uint8_t)dbuf[i]; |
| 510 | int tmp_count = byteCount[tmp_byte]; |
| 511 | dbuf[tmp_count] |= (i << 8); |
| 512 | byteCount[tmp_byte] = tmp_count + 1; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 513 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 514 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 515 | /* Decode first byte by hand to initialize "previous" byte. Note that it |
| 516 | doesn't get output, and if the first three characters are identical |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 517 | it doesn't qualify as a run (hence writeRunCountdown=5). */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 518 | if (dbufCount) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 519 | uint32_t tmp; |
Denis Vlasenko | 6b06cb8 | 2008-05-15 21:30:45 +0000 | [diff] [blame] | 520 | if ((int)origPtr >= dbufCount) return RETVAL_DATA_ERROR; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 521 | tmp = dbuf[origPtr]; |
| 522 | bd->writeCurrent = (uint8_t)tmp; |
| 523 | bd->writePos = (tmp >> 8); |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 524 | bd->writeRunCountdown = 5; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 525 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 526 | bd->writeCount = dbufCount; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 527 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 528 | return RETVAL_OK; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 529 | } |
| 530 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 531 | /* Undo Burrows-Wheeler transform on intermediate buffer to produce output. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 532 | If start_bunzip was initialized with out_fd=-1, then up to len bytes of |
| 533 | data are written to outbuf. Return value is number of bytes written or |
| 534 | error (all errors are negative numbers). If out_fd!=-1, outbuf and len |
| 535 | are ignored, data is written to out_fd and return is RETVAL_OK or error. |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 536 | |
| 537 | NB: read_bunzip returns < 0 on error, or the number of *unfilled* bytes |
| 538 | in outbuf. IOW: on EOF returns len ("all bytes are not filled"), not 0. |
| 539 | (Why? This allows to get rid of one local variable) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 540 | */ |
Denis Vlasenko | defc1ea | 2008-06-27 02:52:20 +0000 | [diff] [blame] | 541 | int FAST_FUNC read_bunzip(bunzip_data *bd, char *outbuf, int len) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 542 | { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 543 | const uint32_t *dbuf; |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 544 | int pos, current, previous; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 545 | uint32_t CRC; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 546 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 547 | /* If we already have error/end indicator, return it */ |
| 548 | if (bd->writeCount < 0) |
| 549 | return bd->writeCount; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 550 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 551 | dbuf = bd->dbuf; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 552 | |
| 553 | /* Register-cached state (hopefully): */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 554 | pos = bd->writePos; |
| 555 | current = bd->writeCurrent; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 556 | CRC = bd->writeCRC; /* small loss on x86-32 (not enough regs), win on x86-64 */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 557 | |
| 558 | /* We will always have pending decoded data to write into the output |
| 559 | buffer unless this is the very first call (in which case we haven't |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 560 | Huffman-decoded a block into the intermediate buffer yet). */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 561 | if (bd->writeCopies) { |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 562 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 563 | dec_writeCopies: |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 564 | /* Inside the loop, writeCopies means extra copies (beyond 1) */ |
| 565 | --bd->writeCopies; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 566 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 567 | /* Loop outputting bytes */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 568 | for (;;) { |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 569 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 570 | /* If the output buffer is full, save cached state and return */ |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 571 | if (--len < 0) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 572 | /* Unlikely branch. |
| 573 | * Use of "goto" instead of keeping code here |
| 574 | * helps compiler to realize this. */ |
| 575 | goto outbuf_full; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 576 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 577 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 578 | /* Write next byte into output buffer, updating CRC */ |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 579 | *outbuf++ = current; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 580 | CRC = (CRC << 8) ^ bd->crc32Table[(CRC >> 24) ^ current]; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 581 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 582 | /* Loop now if we're outputting multiple copies of this byte */ |
| 583 | if (bd->writeCopies) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 584 | /* Unlikely branch */ |
| 585 | /*--bd->writeCopies;*/ |
| 586 | /*continue;*/ |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 587 | /* Same, but (ab)using other existing --writeCopies operation |
| 588 | * (and this if() compiles into just test+branch pair): */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 589 | goto dec_writeCopies; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 590 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 591 | decode_next_byte: |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 592 | if (--bd->writeCount < 0) |
| 593 | break; /* input block is fully consumed, need next one */ |
| 594 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 595 | /* Follow sequence vector to undo Burrows-Wheeler transform */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 596 | previous = current; |
| 597 | pos = dbuf[pos]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 598 | current = (uint8_t)pos; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 599 | pos >>= 8; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 600 | |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 601 | /* After 3 consecutive copies of the same byte, the 4th |
| 602 | * is a repeat count. We count down from 4 instead |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 603 | * of counting up because testing for non-zero is faster */ |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 604 | if (--bd->writeRunCountdown != 0) { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 605 | if (current != previous) |
| 606 | bd->writeRunCountdown = 4; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 607 | } else { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 608 | /* Unlikely branch */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 609 | /* We have a repeated run, this byte indicates the count */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 610 | bd->writeCopies = current; |
| 611 | current = previous; |
| 612 | bd->writeRunCountdown = 5; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 613 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 614 | /* Sometimes there are just 3 bytes (run length 0) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 615 | if (!bd->writeCopies) goto decode_next_byte; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 616 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 617 | /* Subtract the 1 copy we'd output anyway to get extras */ |
| 618 | --bd->writeCopies; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 619 | } |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 620 | } /* for(;;) */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 621 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 622 | /* Decompression of this input block completed successfully */ |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 623 | bd->writeCRC = CRC = ~CRC; |
| 624 | bd->totalCRC = ((bd->totalCRC << 1) | (bd->totalCRC >> 31)) ^ CRC; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 625 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 626 | /* If this block had a CRC error, force file level CRC error */ |
| 627 | if (CRC != bd->headerCRC) { |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 628 | bd->totalCRC = bd->headerCRC + 1; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 629 | return RETVAL_LAST_BLOCK; |
| 630 | } |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 631 | } |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 632 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 633 | /* Refill the intermediate buffer by Huffman-decoding next block of input */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 634 | { |
| 635 | int r = get_next_block(bd); |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 636 | if (r) { /* error/end */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 637 | bd->writeCount = r; |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 638 | return (r != RETVAL_LAST_BLOCK) ? r : len; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 639 | } |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 640 | } |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 641 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 642 | CRC = ~0; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 643 | pos = bd->writePos; |
| 644 | current = bd->writeCurrent; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 645 | goto decode_next_byte; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 646 | |
| 647 | outbuf_full: |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 648 | /* Output buffer is full, save cached state and return */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 649 | bd->writePos = pos; |
| 650 | bd->writeCurrent = current; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 651 | bd->writeCRC = CRC; |
| 652 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 653 | bd->writeCopies++; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 654 | |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 655 | return 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 656 | } |
| 657 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 658 | /* Allocate the structure, read file header. If in_fd==-1, inbuf must contain |
| 659 | a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are |
| 660 | ignored, and data is read from file handle into temporary buffer. */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 661 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 662 | /* Because bunzip2 is used for help text unpacking, and because bb_show_usage() |
| 663 | should work for NOFORK applets too, we must be extremely careful to not leak |
| 664 | any allocations! */ |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 665 | int FAST_FUNC start_bunzip(bunzip_data **bdp, int in_fd, |
| 666 | const void *inbuf, int len) |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 667 | { |
| 668 | bunzip_data *bd; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 669 | unsigned i; |
| 670 | enum { |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 671 | BZh0 = ('B' << 24) + ('Z' << 16) + ('h' << 8) + '0', |
| 672 | h0 = ('h' << 8) + '0', |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 673 | }; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 674 | |
| 675 | /* Figure out how much data to allocate */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 676 | i = sizeof(bunzip_data); |
| 677 | if (in_fd != -1) i += IOBUF_SIZE; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 678 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 679 | /* Allocate bunzip_data. Most fields initialize to zero. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 680 | bd = *bdp = xzalloc(i); |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 681 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 682 | /* Setup input buffer */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 683 | bd->in_fd = in_fd; |
| 684 | if (-1 == in_fd) { |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 685 | /* in this case, bd->inbuf is read-only */ |
| 686 | bd->inbuf = (void*)inbuf; /* cast away const-ness */ |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 687 | } else { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 688 | bd->inbuf = (uint8_t*)(bd + 1); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 689 | memcpy(bd->inbuf, inbuf, len); |
| 690 | } |
| 691 | bd->inbufCount = len; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 692 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 693 | /* Init the CRC32 table (big endian) */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 694 | crc32_filltable(bd->crc32Table, 1); |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 695 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 696 | /* Setup for I/O error handling via longjmp */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 697 | i = setjmp(bd->jmpbuf); |
| 698 | if (i) return i; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 699 | |
| 700 | /* Ensure that file starts with "BZh['1'-'9']." */ |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 701 | /* Update: now caller verifies 1st two bytes, makes .gz/.bz2 |
| 702 | * integration easier */ |
| 703 | /* was: */ |
| 704 | /* i = get_bits(bd, 32); */ |
| 705 | /* if ((unsigned)(i - BZh0 - 1) >= 9) return RETVAL_NOT_BZIP_DATA; */ |
| 706 | i = get_bits(bd, 16); |
| 707 | if ((unsigned)(i - h0 - 1) >= 9) return RETVAL_NOT_BZIP_DATA; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 708 | |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 709 | /* Fourth byte (ascii '1'-'9') indicates block size in units of 100k of |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 710 | uncompressed data. Allocate intermediate buffer for block. */ |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 711 | /* bd->dbufSize = 100000 * (i - BZh0); */ |
| 712 | bd->dbufSize = 100000 * (i - h0); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 713 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 714 | /* Cannot use xmalloc - may leak bd in NOFORK case! */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 715 | bd->dbuf = malloc_or_warn(bd->dbufSize * sizeof(bd->dbuf[0])); |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 716 | if (!bd->dbuf) { |
| 717 | free(bd); |
| 718 | xfunc_die(); |
| 719 | } |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 720 | return RETVAL_OK; |
| 721 | } |
| 722 | |
Denis Vlasenko | defc1ea | 2008-06-27 02:52:20 +0000 | [diff] [blame] | 723 | void FAST_FUNC dealloc_bunzip(bunzip_data *bd) |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 724 | { |
Denis Vlasenko | 4b924f3 | 2007-05-30 00:29:55 +0000 | [diff] [blame] | 725 | free(bd->dbuf); |
| 726 | free(bd); |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 727 | } |
| 728 | |
| 729 | |
| 730 | /* Decompress src_fd to dst_fd. Stops at end of bzip data, not end of file. */ |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 731 | IF_DESKTOP(long long) int FAST_FUNC |
Denys Vlasenko | b4c11c1 | 2014-12-07 00:44:00 +0100 | [diff] [blame] | 732 | unpack_bz2_stream(transformer_state_t *xstate) |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 733 | { |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 734 | IF_DESKTOP(long long total_written = 0;) |
Denys Vlasenko | 4d4d1a0 | 2010-11-01 02:19:47 +0100 | [diff] [blame] | 735 | bunzip_data *bd; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 736 | char *outbuf; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 737 | int i; |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 738 | unsigned len; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 739 | |
Denys Vlasenko | b4c11c1 | 2014-12-07 00:44:00 +0100 | [diff] [blame] | 740 | if (check_signature16(xstate, BZIP2_MAGIC)) |
Denys Vlasenko | 8a6a2f9 | 2012-03-06 16:27:48 +0100 | [diff] [blame] | 741 | return -1; |
| 742 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 743 | outbuf = xmalloc(IOBUF_SIZE); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 744 | len = 0; |
| 745 | while (1) { /* "Process one BZ... stream" loop */ |
| 746 | |
Denys Vlasenko | b4c11c1 | 2014-12-07 00:44:00 +0100 | [diff] [blame] | 747 | i = start_bunzip(&bd, xstate->src_fd, outbuf + 2, len); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 748 | |
| 749 | if (i == 0) { |
| 750 | while (1) { /* "Produce some output bytes" loop */ |
| 751 | i = read_bunzip(bd, outbuf, IOBUF_SIZE); |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 752 | if (i < 0) /* error? */ |
| 753 | break; |
| 754 | i = IOBUF_SIZE - i; /* number of bytes produced */ |
| 755 | if (i == 0) /* EOF? */ |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 756 | break; |
Denys Vlasenko | b4c11c1 | 2014-12-07 00:44:00 +0100 | [diff] [blame] | 757 | if (i != transformer_write(xstate, outbuf, i)) { |
Denys Vlasenko | 8531c43 | 2010-11-01 01:38:54 +0100 | [diff] [blame] | 758 | i = RETVAL_SHORT_WRITE; |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 759 | goto release_mem; |
| 760 | } |
| 761 | IF_DESKTOP(total_written += i;) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 762 | } |
| 763 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 764 | |
Denys Vlasenko | c531b9a | 2011-10-31 01:05:16 +0100 | [diff] [blame] | 765 | if (i != RETVAL_LAST_BLOCK |
| 766 | /* Observed case when i == RETVAL_OK: |
| 767 | * "bzcat z.bz2", where "z.bz2" is a bzipped zero-length file |
| 768 | * (to be exact, z.bz2 is exactly these 14 bytes: |
| 769 | * 42 5a 68 39 17 72 45 38 50 90 00 00 00 00). |
| 770 | */ |
| 771 | && i != RETVAL_OK |
| 772 | ) { |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 773 | bb_error_msg("bunzip error %d", i); |
| 774 | break; |
| 775 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 776 | if (bd->headerCRC != bd->totalCRC) { |
Denis Vlasenko | 66bbfbd | 2007-09-28 23:45:56 +0000 | [diff] [blame] | 777 | bb_error_msg("CRC error"); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 778 | break; |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 779 | } |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 780 | |
| 781 | /* Successfully unpacked one BZ stream */ |
| 782 | i = RETVAL_OK; |
| 783 | |
| 784 | /* Do we have "BZ..." after last processed byte? |
| 785 | * pbzip2 (parallelized bzip2) produces such files. |
| 786 | */ |
| 787 | len = bd->inbufCount - bd->inbufPos; |
| 788 | memcpy(outbuf, &bd->inbuf[bd->inbufPos], len); |
| 789 | if (len < 2) { |
Denys Vlasenko | b4c11c1 | 2014-12-07 00:44:00 +0100 | [diff] [blame] | 790 | if (safe_read(xstate->src_fd, outbuf + len, 2 - len) != 2 - len) |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 791 | break; |
| 792 | len = 2; |
| 793 | } |
| 794 | if (*(uint16_t*)outbuf != BZIP2_MAGIC) /* "BZ"? */ |
| 795 | break; |
| 796 | dealloc_bunzip(bd); |
| 797 | len -= 2; |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 798 | } |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 799 | |
| 800 | release_mem: |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 801 | dealloc_bunzip(bd); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 802 | free(outbuf); |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 803 | |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 804 | return i ? i : IF_DESKTOP(total_written) + 0; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 805 | } |
| 806 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 807 | #ifdef TESTING |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 808 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 809 | static char *const bunzip_errors[] = { |
| 810 | NULL, "Bad file checksum", "Not bzip data", |
| 811 | "Unexpected input EOF", "Unexpected output EOF", "Data error", |
| 812 | "Out of memory", "Obsolete (pre 0.9.5) bzip format not supported" |
| 813 | }; |
Glenn L McGrath | 237ae42 | 2002-11-03 14:05:15 +0000 | [diff] [blame] | 814 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 815 | /* Dumb little test thing, decompress stdin to stdout */ |
Bernhard Reutner-Fischer | febe3c4 | 2007-04-04 20:52:03 +0000 | [diff] [blame] | 816 | int main(int argc, char **argv) |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 817 | { |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 818 | char c; |
| 819 | |
Denys Vlasenko | 8a6a2f9 | 2012-03-06 16:27:48 +0100 | [diff] [blame] | 820 | int i = unpack_bz2_stream(0, 1); |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 821 | if (i < 0) |
Denis Vlasenko | f5d8c90 | 2008-06-26 14:32:57 +0000 | [diff] [blame] | 822 | fprintf(stderr, "%s\n", bunzip_errors[-i]); |
Bernhard Reutner-Fischer | 5e25ddb | 2008-05-19 09:48:17 +0000 | [diff] [blame] | 823 | else if (read(STDIN_FILENO, &c, 1)) |
Denis Vlasenko | f5d8c90 | 2008-06-26 14:32:57 +0000 | [diff] [blame] | 824 | fprintf(stderr, "Trailing garbage ignored\n"); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 825 | return -i; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 826 | } |
| 827 | #endif |