Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 1 | /* vi: set sw=4 ts=4: */ |
Rob Landley | e66c7ef | 2006-04-14 19:25:01 +0000 | [diff] [blame] | 2 | /* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 3 | |
Rob Landley | e66c7ef | 2006-04-14 19:25:01 +0000 | [diff] [blame] | 4 | Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), |
| 5 | which also acknowledges contributions by Mike Burrows, David Wheeler, |
| 6 | Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, |
| 7 | Robert Sedgewick, and Jon L. Bentley. |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 8 | |
Denys Vlasenko | 0ef64bd | 2010-08-16 20:14:46 +0200 | [diff] [blame] | 9 | Licensed under GPLv2 or later, see file LICENSE in this source tree. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 10 | */ |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 11 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 12 | /* |
| 13 | Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). |
| 14 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 15 | More efficient reading of Huffman codes, a streamlined read_bunzip() |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 16 | function, and various other tweaks. In (limited) tests, approximately |
| 17 | 20% faster than bzcat on x86 and about 10% faster on arm. |
| 18 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 19 | Note that about 2/3 of the time is spent in read_bunzip() reversing |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 20 | the Burrows-Wheeler transformation. Much of that time is delay |
| 21 | resulting from cache misses. |
| 22 | |
Denys Vlasenko | 5d49b72 | 2010-10-29 19:26:38 +0200 | [diff] [blame^] | 23 | (2010 update by vda: profiled "bzcat <84mbyte.bz2 >/dev/null" |
| 24 | on x86-64 CPU with L2 > 1M: get_next_block is hotter than read_bunzip: |
| 25 | %time seconds calls function |
| 26 | 71.01 12.69 444 get_next_block |
| 27 | 28.65 5.12 93065 read_bunzip |
| 28 | 00.22 0.04 7736490 get_bits |
| 29 | 00.11 0.02 47 dealloc_bunzip |
| 30 | 00.00 0.00 93018 full_write |
| 31 | ...) |
| 32 | |
| 33 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 34 | I would ask that anyone benefiting from this work, especially those |
| 35 | using it in commercial products, consider making a donation to my local |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 36 | non-profit hospice organization (www.hospiceacadiana.com) in the name of |
Bernhard Reutner-Fischer | cfb53df | 2006-04-02 21:50:01 +0000 | [diff] [blame] | 37 | the woman I loved, Toni W. Hagan, who passed away Feb. 12, 2003. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 38 | |
| 39 | Manuel |
| 40 | */ |
| 41 | |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 42 | #include "libbb.h" |
Bernhard Reutner-Fischer | cfb53df | 2006-04-02 21:50:01 +0000 | [diff] [blame] | 43 | #include "unarchive.h" |
| 44 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 45 | /* Constants for Huffman coding */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 46 | #define MAX_GROUPS 6 |
| 47 | #define GROUP_SIZE 50 /* 64 would have been more efficient */ |
| 48 | #define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */ |
| 49 | #define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ |
| 50 | #define SYMBOL_RUNA 0 |
| 51 | #define SYMBOL_RUNB 1 |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 52 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 53 | /* Status return values */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 54 | #define RETVAL_OK 0 |
| 55 | #define RETVAL_LAST_BLOCK (-1) |
| 56 | #define RETVAL_NOT_BZIP_DATA (-2) |
| 57 | #define RETVAL_UNEXPECTED_INPUT_EOF (-3) |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 58 | //#define RETVAL_SHORT_WRITE (-4) |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 59 | #define RETVAL_DATA_ERROR (-5) |
| 60 | #define RETVAL_OUT_OF_MEMORY (-6) |
| 61 | #define RETVAL_OBSOLETE_INPUT (-7) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 62 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 63 | /* Other housekeeping constants */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 64 | #define IOBUF_SIZE 4096 |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 65 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 66 | /* This is what we know about each Huffman coding group */ |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 67 | struct group_data { |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 68 | /* We have an extra slot at the end of limit[] for a sentinel value. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 69 | int limit[MAX_HUFCODE_BITS+1], base[MAX_HUFCODE_BITS], permute[MAX_SYMBOLS]; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 70 | int minLen, maxLen; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 71 | }; |
| 72 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 73 | /* Structure holding all the housekeeping data, including IO buffers and |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 74 | * memory that persists between calls to bunzip |
| 75 | * Found the most used member: |
| 76 | * cat this_file.c | sed -e 's/"/ /g' -e "s/'/ /g" | xargs -n1 \ |
| 77 | * | grep 'bd->' | sed 's/^.*bd->/bd->/' | sort | $PAGER |
| 78 | * and moved it (inbufBitCount) to offset 0. |
| 79 | */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 80 | struct bunzip_data { |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 81 | /* I/O tracking data (file handles, buffers, positions, etc.) */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 82 | unsigned inbufBitCount, inbufBits; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 83 | int in_fd, out_fd, inbufCount, inbufPos /*, outbufPos*/; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 84 | uint8_t *inbuf /*,*outbuf*/; |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 85 | |
| 86 | /* State for interrupting output loop */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 87 | int writeCopies, writePos, writeRunCountdown, writeCount; |
| 88 | int writeCurrent; /* actually a uint8_t */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 89 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 90 | /* The CRC values stored in the block header and calculated from the data */ |
Rob Landley | c57ec37 | 2006-04-10 17:07:15 +0000 | [diff] [blame] | 91 | uint32_t headerCRC, totalCRC, writeCRC; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 92 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 93 | /* Intermediate buffer and its size (in bytes) */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 94 | uint32_t *dbuf; |
| 95 | unsigned dbufSize; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 96 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 97 | /* For I/O error handling */ |
| 98 | jmp_buf jmpbuf; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 99 | |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 100 | /* Big things go last (register-relative addressing can be larger for big offsets) */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 101 | uint32_t crc32Table[256]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 102 | uint8_t selectors[32768]; /* nSelectors=15 bits */ |
Denys Vlasenko | fb132e4 | 2010-10-29 11:46:52 +0200 | [diff] [blame] | 103 | struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 104 | }; |
| 105 | /* typedef struct bunzip_data bunzip_data; -- done in .h file */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 106 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 107 | |
| 108 | /* Return the next nnn bits of input. All reads from the compressed input |
| 109 | are done through this function. All reads are big endian */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 110 | static unsigned get_bits(bunzip_data *bd, int bits_wanted) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 111 | { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 112 | unsigned bits = 0; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 113 | /* Cache bd->inbufBitCount in a CPU register (hopefully): */ |
| 114 | int bit_count = bd->inbufBitCount; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 115 | |
| 116 | /* If we need to get more data from the byte buffer, do so. (Loop getting |
| 117 | one byte at a time to enforce endianness and avoid unaligned access.) */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 118 | while (bit_count < bits_wanted) { |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 119 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 120 | /* If we need to read more data from file into byte buffer, do so */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 121 | if (bd->inbufPos == bd->inbufCount) { |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 122 | /* if "no input fd" case: in_fd == -1, read fails, we jump */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 123 | bd->inbufCount = read(bd->in_fd, bd->inbuf, IOBUF_SIZE); |
| 124 | if (bd->inbufCount <= 0) |
| 125 | longjmp(bd->jmpbuf, RETVAL_UNEXPECTED_INPUT_EOF); |
| 126 | bd->inbufPos = 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 127 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 128 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 129 | /* Avoid 32-bit overflow (dump bit buffer to top of output) */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 130 | if (bit_count >= 24) { |
| 131 | bits = bd->inbufBits & ((1 << bit_count) - 1); |
| 132 | bits_wanted -= bit_count; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 133 | bits <<= bits_wanted; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 134 | bit_count = 0; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 135 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 136 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 137 | /* Grab next 8 bits of input from buffer. */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 138 | bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 139 | bit_count += 8; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 140 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 141 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 142 | /* Calculate result */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 143 | bit_count -= bits_wanted; |
| 144 | bd->inbufBitCount = bit_count; |
| 145 | bits |= (bd->inbufBits >> bit_count) & ((1 << bits_wanted) - 1); |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 146 | |
| 147 | return bits; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 148 | } |
| 149 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 150 | /* Unpacks the next block and sets up for the inverse Burrows-Wheeler step. */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 151 | static int get_next_block(bunzip_data *bd) |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 152 | { |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 153 | struct group_data *hufGroup; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 154 | int dbufCount, nextSym, dbufSize, groupCount, *base, *limit, selector, |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 155 | i, j, k, t, runPos, symCount, symTotal, nSelectors, byteCount[256]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 156 | uint8_t uc, symToByte[256], mtfSymbol[256], *selectors; |
| 157 | uint32_t *dbuf; |
| 158 | unsigned origPtr; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 159 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 160 | dbuf = bd->dbuf; |
| 161 | dbufSize = bd->dbufSize; |
| 162 | selectors = bd->selectors; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 163 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 164 | /* Reset longjmp I/O error handling */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 165 | i = setjmp(bd->jmpbuf); |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 166 | if (i) return i; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 167 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 168 | /* Read in header signature and CRC, then validate signature. |
| 169 | (last block signature means CRC is for whole file, return now) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 170 | i = get_bits(bd, 24); |
| 171 | j = get_bits(bd, 24); |
| 172 | bd->headerCRC = get_bits(bd, 32); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 173 | if ((i == 0x177245) && (j == 0x385090)) return RETVAL_LAST_BLOCK; |
| 174 | if ((i != 0x314159) || (j != 0x265359)) return RETVAL_NOT_BZIP_DATA; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 175 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 176 | /* We can add support for blockRandomised if anybody complains. There was |
| 177 | some code for this in busybox 1.0.0-pre3, but nobody ever noticed that |
| 178 | it didn't actually work. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 179 | if (get_bits(bd, 1)) return RETVAL_OBSOLETE_INPUT; |
| 180 | origPtr = get_bits(bd, 24); |
Denis Vlasenko | 6b06cb8 | 2008-05-15 21:30:45 +0000 | [diff] [blame] | 181 | if ((int)origPtr > dbufSize) return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 182 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 183 | /* mapping table: if some byte values are never used (encoding things |
| 184 | like ascii text), the compression code removes the gaps to have fewer |
| 185 | symbols to deal with, and writes a sparse bitfield indicating which |
| 186 | values were present. We make a translation table to convert the symbols |
| 187 | back to the corresponding bytes. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 188 | t = get_bits(bd, 16); |
| 189 | symTotal = 0; |
| 190 | for (i = 0; i < 16; i++) { |
| 191 | if (t & (1 << (15-i))) { |
| 192 | k = get_bits(bd, 16); |
| 193 | for (j = 0; j < 16; j++) |
| 194 | if (k & (1 << (15-j))) |
| 195 | symToByte[symTotal++] = (16*i) + j; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 196 | } |
| 197 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 198 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 199 | /* How many different Huffman coding groups does this block use? */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 200 | groupCount = get_bits(bd, 3); |
| 201 | if (groupCount < 2 || groupCount > MAX_GROUPS) |
| 202 | return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 203 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 204 | /* nSelectors: Every GROUP_SIZE many symbols we select a new Huffman coding |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 205 | group. Read in the group selector list, which is stored as MTF encoded |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 206 | bit runs. (MTF=Move To Front, as each value is used it's moved to the |
| 207 | start of the list.) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 208 | nSelectors = get_bits(bd, 15); |
| 209 | if (!nSelectors) return RETVAL_DATA_ERROR; |
| 210 | for (i = 0; i < groupCount; i++) mtfSymbol[i] = i; |
| 211 | for (i = 0; i < nSelectors; i++) { |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 212 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 213 | /* Get next value */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 214 | for (j = 0; get_bits(bd, 1); j++) |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 215 | if (j >= groupCount) return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 216 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 217 | /* Decode MTF to get the next selector */ |
| 218 | uc = mtfSymbol[j]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 219 | for (; j; j--) |
| 220 | mtfSymbol[j] = mtfSymbol[j-1]; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 221 | mtfSymbol[0] = selectors[i] = uc; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 222 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 223 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 224 | /* Read the Huffman coding tables for each group, which code for symTotal |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 225 | literal symbols, plus two run symbols (RUNA, RUNB) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 226 | symCount = symTotal + 2; |
| 227 | for (j = 0; j < groupCount; j++) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 228 | uint8_t length[MAX_SYMBOLS]; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 229 | /* 8 bits is ALMOST enough for temp[], see below */ |
| 230 | unsigned temp[MAX_HUFCODE_BITS+1]; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 231 | int minLen, maxLen, pp; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 232 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 233 | /* Read Huffman code lengths for each symbol. They're stored in |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 234 | a way similar to mtf; record a starting value for the first symbol, |
| 235 | and an offset from the previous value for everys symbol after that. |
| 236 | (Subtracting 1 before the loop and then adding it back at the end is |
| 237 | an optimization that makes the test inside the loop simpler: symbol |
| 238 | length 0 becomes negative, so an unsigned inequality catches it.) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 239 | t = get_bits(bd, 5) - 1; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 240 | for (i = 0; i < symCount; i++) { |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 241 | for (;;) { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 242 | if ((unsigned)t > (MAX_HUFCODE_BITS-1)) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 243 | return RETVAL_DATA_ERROR; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 244 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 245 | /* If first bit is 0, stop. Else second bit indicates whether |
| 246 | to increment or decrement the value. Optimization: grab 2 |
| 247 | bits and unget the second if the first was 0. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 248 | k = get_bits(bd, 2); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 249 | if (k < 2) { |
| 250 | bd->inbufBitCount++; |
| 251 | break; |
| 252 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 253 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 254 | /* Add one if second bit 1, else subtract 1. Avoids if/else */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 255 | t += (((k+1) & 2) - 1); |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 256 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 257 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 258 | /* Correct for the initial -1, to get the final symbol length */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 259 | length[i] = t + 1; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 260 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 261 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 262 | /* Find largest and smallest lengths in this group */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 263 | minLen = maxLen = length[0]; |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 264 | for (i = 1; i < symCount; i++) { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 265 | if (length[i] > maxLen) maxLen = length[i]; |
| 266 | else if (length[i] < minLen) minLen = length[i]; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 267 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 268 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 269 | /* Calculate permute[], base[], and limit[] tables from length[]. |
| 270 | * |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 271 | * permute[] is the lookup table for converting Huffman coded symbols |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 272 | * into decoded symbols. base[] is the amount to subtract from the |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 273 | * value of a Huffman symbol of a given length when using permute[]. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 274 | * |
| 275 | * limit[] indicates the largest numerical value a symbol with a given |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 276 | * number of bits can have. This is how the Huffman codes can vary in |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 277 | * length: each code with a value>limit[length] needs another bit. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 278 | */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 279 | hufGroup = bd->groups + j; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 280 | hufGroup->minLen = minLen; |
| 281 | hufGroup->maxLen = maxLen; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 282 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 283 | /* Note that minLen can't be smaller than 1, so we adjust the base |
| 284 | and limit array pointers so we're not always wasting the first |
| 285 | entry. We do this again when using them (during symbol decoding).*/ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 286 | base = hufGroup->base - 1; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 287 | limit = hufGroup->limit - 1; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 288 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 289 | /* Calculate permute[]. Concurently, initialize temp[] and limit[]. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 290 | pp = 0; |
| 291 | for (i = minLen; i <= maxLen; i++) { |
| 292 | temp[i] = limit[i] = 0; |
| 293 | for (t = 0; t < symCount; t++) |
| 294 | if (length[t] == i) |
| 295 | hufGroup->permute[pp++] = t; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 296 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 297 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 298 | /* Count symbols coded for at each bit length */ |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 299 | /* NB: in pathological cases, temp[8] can end ip being 256. |
| 300 | * That's why uint8_t is too small for temp[]. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 301 | for (i = 0; i < symCount; i++) temp[length[i]]++; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 302 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 303 | /* Calculate limit[] (the largest symbol-coding value at each bit |
| 304 | * length, which is (previous limit<<1)+symbols at this level), and |
| 305 | * base[] (number of symbols to ignore at each bit length, which is |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 306 | * limit minus the cumulative count of symbols coded for already). */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 307 | pp = t = 0; |
| 308 | for (i = minLen; i < maxLen; i++) { |
| 309 | pp += temp[i]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 310 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 311 | /* We read the largest possible symbol size and then unget bits |
| 312 | after determining how many we need, and those extra bits could |
| 313 | be set to anything. (They're noise from future symbols.) At |
| 314 | each level we're really only interested in the first few bits, |
| 315 | so here we set all the trailing to-be-ignored bits to 1 so they |
| 316 | don't affect the value>limit[length] comparison. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 317 | limit[i] = (pp << (maxLen - i)) - 1; |
| 318 | pp <<= 1; |
| 319 | t += temp[i]; |
| 320 | base[i+1] = pp - t; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 321 | } |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 322 | limit[maxLen+1] = INT_MAX; /* Sentinel value for reading next sym. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 323 | limit[maxLen] = pp + temp[maxLen] - 1; |
| 324 | base[minLen] = 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 325 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 326 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 327 | /* We've finished reading and digesting the block header. Now read this |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 328 | block's Huffman coded symbols from the file and undo the Huffman coding |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 329 | and run length encoding, saving the result into dbuf[dbufCount++] = uc */ |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 330 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 331 | /* Initialize symbol occurrence counters and symbol Move To Front table */ |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 332 | memset(byteCount, 0, sizeof(byteCount)); /* smaller, maybe slower? */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 333 | for (i = 0; i < 256; i++) { |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 334 | //byteCount[i] = 0; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 335 | mtfSymbol[i] = (uint8_t)i; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 336 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 337 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 338 | /* Loop through compressed symbols. */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 339 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 340 | runPos = dbufCount = selector = 0; |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 341 | for (;;) { |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 342 | |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 343 | /* Fetch next Huffman coding group from list. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 344 | symCount = GROUP_SIZE - 1; |
| 345 | if (selector >= nSelectors) return RETVAL_DATA_ERROR; |
| 346 | hufGroup = bd->groups + selectors[selector++]; |
| 347 | base = hufGroup->base - 1; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 348 | limit = hufGroup->limit - 1; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 349 | continue_this_group: |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 350 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 351 | /* Read next Huffman-coded symbol. */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 352 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 353 | /* Note: It is far cheaper to read maxLen bits and back up than it is |
| 354 | to read minLen bits and then an additional bit at a time, testing |
| 355 | as we go. Because there is a trailing last block (with file CRC), |
| 356 | there is no danger of the overread causing an unexpected EOF for a |
| 357 | valid compressed file. As a further optimization, we do the read |
| 358 | inline (falling back to a call to get_bits if the buffer runs |
| 359 | dry). The following (up to got_huff_bits:) is equivalent to |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 360 | j = get_bits(bd, hufGroup->maxLen); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 361 | */ |
Denis Vlasenko | 6b06cb8 | 2008-05-15 21:30:45 +0000 | [diff] [blame] | 362 | while ((int)(bd->inbufBitCount) < hufGroup->maxLen) { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 363 | if (bd->inbufPos == bd->inbufCount) { |
| 364 | j = get_bits(bd, hufGroup->maxLen); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 365 | goto got_huff_bits; |
| 366 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 367 | bd->inbufBits = (bd->inbufBits << 8) | bd->inbuf[bd->inbufPos++]; |
| 368 | bd->inbufBitCount += 8; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 369 | }; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 370 | bd->inbufBitCount -= hufGroup->maxLen; |
| 371 | j = (bd->inbufBits >> bd->inbufBitCount) & ((1 << hufGroup->maxLen) - 1); |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 372 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 373 | got_huff_bits: |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 374 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 375 | /* Figure how how many bits are in next symbol and unget extras */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 376 | i = hufGroup->minLen; |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 377 | while (j > limit[i]) ++i; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 378 | bd->inbufBitCount += (hufGroup->maxLen - i); |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 379 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 380 | /* Huffman decode value to get nextSym (with bounds checking) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 381 | if (i > hufGroup->maxLen) |
| 382 | return RETVAL_DATA_ERROR; |
| 383 | j = (j >> (hufGroup->maxLen - i)) - base[i]; |
| 384 | if ((unsigned)j >= MAX_SYMBOLS) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 385 | return RETVAL_DATA_ERROR; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 386 | nextSym = hufGroup->permute[j]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 387 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 388 | /* We have now decoded the symbol, which indicates either a new literal |
| 389 | byte, or a repeated run of the most recent literal byte. First, |
| 390 | check if nextSym indicates a repeated run, and if so loop collecting |
| 391 | how many times to repeat the last literal. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 392 | if ((unsigned)nextSym <= SYMBOL_RUNB) { /* RUNA or RUNB */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 393 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 394 | /* If this is the start of a new run, zero out counter */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 395 | if (!runPos) { |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 396 | runPos = 1; |
| 397 | t = 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 398 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 399 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 400 | /* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at |
| 401 | each bit position, add 1 or 2 instead. For example, |
| 402 | 1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2. |
| 403 | You can make any bit pattern that way using 1 less symbol than |
| 404 | the basic or 0/1 method (except all bits 0, which would use no |
| 405 | symbols, but a run of length 0 doesn't mean anything in this |
| 406 | context). Thus space is saved. */ |
Eric Andersen | 1acfb72 | 2003-10-18 01:59:46 +0000 | [diff] [blame] | 407 | t += (runPos << nextSym); /* +runPos if RUNA; +2*runPos if RUNB */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 408 | if (runPos < dbufSize) runPos <<= 1; |
Rob Landley | a8b98d6 | 2004-11-16 12:07:04 +0000 | [diff] [blame] | 409 | goto end_of_huffman_loop; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 410 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 411 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 412 | /* When we hit the first non-run symbol after a run, we now know |
| 413 | how many times to repeat the last literal, so append that many |
| 414 | copies to our buffer of decoded symbols (dbuf) now. (The last |
| 415 | literal used is the one at the head of the mtfSymbol array.) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 416 | if (runPos) { |
| 417 | runPos = 0; |
| 418 | if (dbufCount + t >= dbufSize) return RETVAL_DATA_ERROR; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 419 | |
| 420 | uc = symToByte[mtfSymbol[0]]; |
| 421 | byteCount[uc] += t; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 422 | while (t--) dbuf[dbufCount++] = uc; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 423 | } |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 424 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 425 | /* Is this the terminating symbol? */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 426 | if (nextSym > symTotal) break; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 427 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 428 | /* At this point, nextSym indicates a new literal character. Subtract |
| 429 | one to get the position in the MTF array at which this literal is |
| 430 | currently to be found. (Note that the result can't be -1 or 0, |
| 431 | because 0 and 1 are RUNA and RUNB. But another instance of the |
| 432 | first symbol in the mtf array, position 0, would have been handled |
| 433 | as part of a run above. Therefore 1 unused mtf position minus |
| 434 | 2 non-literal nextSym values equals -1.) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 435 | if (dbufCount >= dbufSize) return RETVAL_DATA_ERROR; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 436 | i = nextSym - 1; |
| 437 | uc = mtfSymbol[i]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 438 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 439 | /* Adjust the MTF array. Since we typically expect to move only a |
| 440 | * small number of symbols, and are bound by 256 in any case, using |
| 441 | * memmove here would typically be bigger and slower due to function |
| 442 | * call overhead and other assorted setup costs. */ |
Eric Andersen | 1acfb72 | 2003-10-18 01:59:46 +0000 | [diff] [blame] | 443 | do { |
| 444 | mtfSymbol[i] = mtfSymbol[i-1]; |
| 445 | } while (--i); |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 446 | mtfSymbol[0] = uc; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 447 | uc = symToByte[uc]; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 448 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 449 | /* We have our literal byte. Save it into dbuf. */ |
| 450 | byteCount[uc]++; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 451 | dbuf[dbufCount++] = (unsigned)uc; |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 452 | |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 453 | /* Skip group initialization if we're not done with this group. Done |
| 454 | * this way to avoid compiler warning. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 455 | end_of_huffman_loop: |
| 456 | if (symCount--) goto continue_this_group; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 457 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 458 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 459 | /* At this point, we've read all the Huffman-coded symbols (and repeated |
Denis Vlasenko | 246b5c3 | 2007-04-10 17:18:12 +0000 | [diff] [blame] | 460 | runs) for this block from the input stream, and decoded them into the |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 461 | intermediate buffer. There are dbufCount many decoded bytes in dbuf[]. |
| 462 | Now undo the Burrows-Wheeler transform on dbuf. |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 463 | See http://dogma.net/markn/articles/bwt/bwt.htm |
| 464 | */ |
Rob Landley | 2c98c40 | 2006-02-17 05:12:03 +0000 | [diff] [blame] | 465 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 466 | /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 467 | j = 0; |
| 468 | for (i = 0; i < 256; i++) { |
| 469 | k = j + byteCount[i]; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 470 | byteCount[i] = j; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 471 | j = k; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 472 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 473 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 474 | /* Figure out what order dbuf would be in if we sorted it. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 475 | for (i = 0; i < dbufCount; i++) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 476 | uc = (uint8_t)dbuf[i]; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 477 | dbuf[byteCount[uc]] |= (i << 8); |
| 478 | byteCount[uc]++; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 479 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 480 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 481 | /* Decode first byte by hand to initialize "previous" byte. Note that it |
| 482 | doesn't get output, and if the first three characters are identical |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 483 | it doesn't qualify as a run (hence writeRunCountdown=5). */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 484 | if (dbufCount) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 485 | uint32_t tmp; |
Denis Vlasenko | 6b06cb8 | 2008-05-15 21:30:45 +0000 | [diff] [blame] | 486 | if ((int)origPtr >= dbufCount) return RETVAL_DATA_ERROR; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 487 | tmp = dbuf[origPtr]; |
| 488 | bd->writeCurrent = (uint8_t)tmp; |
| 489 | bd->writePos = (tmp >> 8); |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 490 | bd->writeRunCountdown = 5; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 491 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 492 | bd->writeCount = dbufCount; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 493 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 494 | return RETVAL_OK; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 495 | } |
| 496 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 497 | /* Undo Burrows-Wheeler transform on intermediate buffer to produce output. |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 498 | If start_bunzip was initialized with out_fd=-1, then up to len bytes of |
| 499 | data are written to outbuf. Return value is number of bytes written or |
| 500 | error (all errors are negative numbers). If out_fd!=-1, outbuf and len |
| 501 | are ignored, data is written to out_fd and return is RETVAL_OK or error. |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 502 | |
| 503 | NB: read_bunzip returns < 0 on error, or the number of *unfilled* bytes |
| 504 | in outbuf. IOW: on EOF returns len ("all bytes are not filled"), not 0. |
| 505 | (Why? This allows to get rid of one local variable) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 506 | */ |
Denis Vlasenko | defc1ea | 2008-06-27 02:52:20 +0000 | [diff] [blame] | 507 | int FAST_FUNC read_bunzip(bunzip_data *bd, char *outbuf, int len) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 508 | { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 509 | const uint32_t *dbuf; |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 510 | int pos, current, previous; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 511 | uint32_t CRC; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 512 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 513 | /* If we already have error/end indicator, return it */ |
| 514 | if (bd->writeCount < 0) |
| 515 | return bd->writeCount; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 516 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 517 | dbuf = bd->dbuf; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 518 | |
| 519 | /* Register-cached state (hopefully): */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 520 | pos = bd->writePos; |
| 521 | current = bd->writeCurrent; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 522 | CRC = bd->writeCRC; /* small loss on x86-32 (not enough regs), win on x86-64 */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 523 | |
| 524 | /* We will always have pending decoded data to write into the output |
| 525 | buffer unless this is the very first call (in which case we haven't |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 526 | Huffman-decoded a block into the intermediate buffer yet). */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 527 | if (bd->writeCopies) { |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 528 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 529 | dec_writeCopies: |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 530 | /* Inside the loop, writeCopies means extra copies (beyond 1) */ |
| 531 | --bd->writeCopies; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 532 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 533 | /* Loop outputting bytes */ |
Denis Vlasenko | bf0a201 | 2006-12-26 10:42:51 +0000 | [diff] [blame] | 534 | for (;;) { |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 535 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 536 | /* If the output buffer is full, save cached state and return */ |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 537 | if (--len < 0) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 538 | /* Unlikely branch. |
| 539 | * Use of "goto" instead of keeping code here |
| 540 | * helps compiler to realize this. */ |
| 541 | goto outbuf_full; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 542 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 543 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 544 | /* Write next byte into output buffer, updating CRC */ |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 545 | *outbuf++ = current; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 546 | CRC = (CRC << 8) ^ bd->crc32Table[(CRC >> 24) ^ current]; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 547 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 548 | /* Loop now if we're outputting multiple copies of this byte */ |
| 549 | if (bd->writeCopies) { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 550 | /* Unlikely branch */ |
| 551 | /*--bd->writeCopies;*/ |
| 552 | /*continue;*/ |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 553 | /* Same, but (ab)using other existing --writeCopies operation |
| 554 | * (and this if() compiles into just test+branch pair): */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 555 | goto dec_writeCopies; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 556 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 557 | decode_next_byte: |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 558 | if (--bd->writeCount < 0) |
| 559 | break; /* input block is fully consumed, need next one */ |
| 560 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 561 | /* Follow sequence vector to undo Burrows-Wheeler transform */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 562 | previous = current; |
| 563 | pos = dbuf[pos]; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 564 | current = (uint8_t)pos; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 565 | pos >>= 8; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 566 | |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 567 | /* After 3 consecutive copies of the same byte, the 4th |
| 568 | * is a repeat count. We count down from 4 instead |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 569 | * of counting up because testing for non-zero is faster */ |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 570 | if (--bd->writeRunCountdown != 0) { |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 571 | if (current != previous) |
| 572 | bd->writeRunCountdown = 4; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 573 | } else { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 574 | /* Unlikely branch */ |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 575 | /* We have a repeated run, this byte indicates the count */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 576 | bd->writeCopies = current; |
| 577 | current = previous; |
| 578 | bd->writeRunCountdown = 5; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 579 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 580 | /* Sometimes there are just 3 bytes (run length 0) */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 581 | if (!bd->writeCopies) goto decode_next_byte; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 582 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 583 | /* Subtract the 1 copy we'd output anyway to get extras */ |
| 584 | --bd->writeCopies; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 585 | } |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 586 | } /* for(;;) */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 587 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 588 | /* Decompression of this input block completed successfully */ |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 589 | bd->writeCRC = CRC = ~CRC; |
| 590 | bd->totalCRC = ((bd->totalCRC << 1) | (bd->totalCRC >> 31)) ^ CRC; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 591 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 592 | /* If this block had a CRC error, force file level CRC error */ |
| 593 | if (CRC != bd->headerCRC) { |
Denis Vlasenko | 52a4388 | 2007-10-10 20:53:41 +0000 | [diff] [blame] | 594 | bd->totalCRC = bd->headerCRC + 1; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 595 | return RETVAL_LAST_BLOCK; |
| 596 | } |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 597 | } |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 598 | |
Eric Andersen | aff114c | 2004-04-14 17:51:38 +0000 | [diff] [blame] | 599 | /* Refill the intermediate buffer by Huffman-decoding next block of input */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 600 | { |
| 601 | int r = get_next_block(bd); |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 602 | if (r) { /* error/end */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 603 | bd->writeCount = r; |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 604 | return (r != RETVAL_LAST_BLOCK) ? r : len; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 605 | } |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 606 | } |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 607 | |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 608 | CRC = ~0; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 609 | pos = bd->writePos; |
| 610 | current = bd->writeCurrent; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 611 | goto decode_next_byte; |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 612 | |
| 613 | outbuf_full: |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 614 | /* Output buffer is full, save cached state and return */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 615 | bd->writePos = pos; |
| 616 | bd->writeCurrent = current; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 617 | bd->writeCRC = CRC; |
| 618 | |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 619 | bd->writeCopies++; |
Denys Vlasenko | bf3bec5 | 2010-10-29 18:16:29 +0200 | [diff] [blame] | 620 | |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 621 | return 0; |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 622 | } |
| 623 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 624 | /* Allocate the structure, read file header. If in_fd==-1, inbuf must contain |
| 625 | a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are |
| 626 | ignored, and data is read from file handle into temporary buffer. */ |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 627 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 628 | /* Because bunzip2 is used for help text unpacking, and because bb_show_usage() |
| 629 | should work for NOFORK applets too, we must be extremely careful to not leak |
| 630 | any allocations! */ |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 631 | int FAST_FUNC start_bunzip(bunzip_data **bdp, int in_fd, |
| 632 | const void *inbuf, int len) |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 633 | { |
| 634 | bunzip_data *bd; |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 635 | unsigned i; |
| 636 | enum { |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 637 | BZh0 = ('B' << 24) + ('Z' << 16) + ('h' << 8) + '0', |
| 638 | h0 = ('h' << 8) + '0', |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 639 | }; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 640 | |
| 641 | /* Figure out how much data to allocate */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 642 | i = sizeof(bunzip_data); |
| 643 | if (in_fd != -1) i += IOBUF_SIZE; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 644 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 645 | /* Allocate bunzip_data. Most fields initialize to zero. */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 646 | bd = *bdp = xzalloc(i); |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 647 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 648 | /* Setup input buffer */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 649 | bd->in_fd = in_fd; |
| 650 | if (-1 == in_fd) { |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 651 | /* in this case, bd->inbuf is read-only */ |
| 652 | bd->inbuf = (void*)inbuf; /* cast away const-ness */ |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 653 | } else { |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 654 | bd->inbuf = (uint8_t*)(bd + 1); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 655 | memcpy(bd->inbuf, inbuf, len); |
| 656 | } |
| 657 | bd->inbufCount = len; |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 658 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 659 | /* Init the CRC32 table (big endian) */ |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 660 | crc32_filltable(bd->crc32Table, 1); |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 661 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 662 | /* Setup for I/O error handling via longjmp */ |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 663 | i = setjmp(bd->jmpbuf); |
| 664 | if (i) return i; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 665 | |
| 666 | /* Ensure that file starts with "BZh['1'-'9']." */ |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 667 | /* Update: now caller verifies 1st two bytes, makes .gz/.bz2 |
| 668 | * integration easier */ |
| 669 | /* was: */ |
| 670 | /* i = get_bits(bd, 32); */ |
| 671 | /* if ((unsigned)(i - BZh0 - 1) >= 9) return RETVAL_NOT_BZIP_DATA; */ |
| 672 | i = get_bits(bd, 16); |
| 673 | if ((unsigned)(i - h0 - 1) >= 9) return RETVAL_NOT_BZIP_DATA; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 674 | |
Denis Vlasenko | 86d88c0 | 2008-06-28 18:10:09 +0000 | [diff] [blame] | 675 | /* Fourth byte (ascii '1'-'9') indicates block size in units of 100k of |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 676 | uncompressed data. Allocate intermediate buffer for block. */ |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 677 | /* bd->dbufSize = 100000 * (i - BZh0); */ |
| 678 | bd->dbufSize = 100000 * (i - h0); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 679 | |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 680 | /* Cannot use xmalloc - may leak bd in NOFORK case! */ |
Denys Vlasenko | 36ef0a6 | 2010-10-29 16:05:05 +0200 | [diff] [blame] | 681 | bd->dbuf = malloc_or_warn(bd->dbufSize * sizeof(bd->dbuf[0])); |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 682 | if (!bd->dbuf) { |
| 683 | free(bd); |
| 684 | xfunc_die(); |
| 685 | } |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 686 | return RETVAL_OK; |
| 687 | } |
| 688 | |
Denis Vlasenko | defc1ea | 2008-06-27 02:52:20 +0000 | [diff] [blame] | 689 | void FAST_FUNC dealloc_bunzip(bunzip_data *bd) |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 690 | { |
Denis Vlasenko | 4b924f3 | 2007-05-30 00:29:55 +0000 | [diff] [blame] | 691 | free(bd->dbuf); |
| 692 | free(bd); |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | |
| 696 | /* Decompress src_fd to dst_fd. Stops at end of bzip data, not end of file. */ |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 697 | IF_DESKTOP(long long) int FAST_FUNC |
Denis Vlasenko | c14d39e | 2007-06-08 13:05:39 +0000 | [diff] [blame] | 698 | unpack_bz2_stream(int src_fd, int dst_fd) |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 699 | { |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 700 | IF_DESKTOP(long long total_written = 0;) |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 701 | bunzip_data *bd; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 702 | char *outbuf; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 703 | int i; |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 704 | unsigned len; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 705 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 706 | outbuf = xmalloc(IOBUF_SIZE); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 707 | len = 0; |
| 708 | while (1) { /* "Process one BZ... stream" loop */ |
| 709 | |
| 710 | i = start_bunzip(&bd, src_fd, outbuf + 2, len); |
| 711 | |
| 712 | if (i == 0) { |
| 713 | while (1) { /* "Produce some output bytes" loop */ |
| 714 | i = read_bunzip(bd, outbuf, IOBUF_SIZE); |
Denys Vlasenko | 1014a9a | 2010-10-29 19:01:58 +0200 | [diff] [blame] | 715 | if (i < 0) /* error? */ |
| 716 | break; |
| 717 | i = IOBUF_SIZE - i; /* number of bytes produced */ |
| 718 | if (i == 0) /* EOF? */ |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 719 | break; |
| 720 | if (i != full_write(dst_fd, outbuf, i)) { |
| 721 | bb_error_msg("short write"); |
| 722 | goto release_mem; |
| 723 | } |
| 724 | IF_DESKTOP(total_written += i;) |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 725 | } |
| 726 | } |
Rob Landley | f856eab | 2006-02-17 03:43:49 +0000 | [diff] [blame] | 727 | |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 728 | if (i != RETVAL_LAST_BLOCK) { |
| 729 | bb_error_msg("bunzip error %d", i); |
| 730 | break; |
| 731 | } |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 732 | if (bd->headerCRC != bd->totalCRC) { |
Denis Vlasenko | 66bbfbd | 2007-09-28 23:45:56 +0000 | [diff] [blame] | 733 | bb_error_msg("CRC error"); |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 734 | break; |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 735 | } |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 736 | |
| 737 | /* Successfully unpacked one BZ stream */ |
| 738 | i = RETVAL_OK; |
| 739 | |
| 740 | /* Do we have "BZ..." after last processed byte? |
| 741 | * pbzip2 (parallelized bzip2) produces such files. |
| 742 | */ |
| 743 | len = bd->inbufCount - bd->inbufPos; |
| 744 | memcpy(outbuf, &bd->inbuf[bd->inbufPos], len); |
| 745 | if (len < 2) { |
| 746 | if (safe_read(src_fd, outbuf + len, 2 - len) != 2 - len) |
| 747 | break; |
| 748 | len = 2; |
| 749 | } |
| 750 | if (*(uint16_t*)outbuf != BZIP2_MAGIC) /* "BZ"? */ |
| 751 | break; |
| 752 | dealloc_bunzip(bd); |
| 753 | len -= 2; |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 754 | } |
Denys Vlasenko | caddfc8 | 2010-10-28 23:08:53 +0200 | [diff] [blame] | 755 | |
| 756 | release_mem: |
Denis Vlasenko | c6758a0 | 2007-04-10 21:40:19 +0000 | [diff] [blame] | 757 | dealloc_bunzip(bd); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 758 | free(outbuf); |
Glenn L McGrath | 1c83440 | 2003-10-28 23:32:12 +0000 | [diff] [blame] | 759 | |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 760 | return i ? i : IF_DESKTOP(total_written) + 0; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 761 | } |
| 762 | |
Denis Vlasenko | 5e34ff2 | 2009-04-21 11:09:40 +0000 | [diff] [blame] | 763 | IF_DESKTOP(long long) int FAST_FUNC |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 764 | unpack_bz2_stream_prime(int src_fd, int dst_fd) |
| 765 | { |
Denys Vlasenko | 620e863 | 2010-06-30 19:43:44 +0200 | [diff] [blame] | 766 | uint16_t magic2; |
| 767 | xread(src_fd, &magic2, 2); |
| 768 | if (magic2 != BZIP2_MAGIC) { |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 769 | bb_error_msg_and_die("invalid magic"); |
| 770 | } |
| 771 | return unpack_bz2_stream(src_fd, dst_fd); |
| 772 | } |
| 773 | |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 774 | #ifdef TESTING |
Glenn L McGrath | 60bce49 | 2002-11-03 07:28:38 +0000 | [diff] [blame] | 775 | |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 776 | static char *const bunzip_errors[] = { |
| 777 | NULL, "Bad file checksum", "Not bzip data", |
| 778 | "Unexpected input EOF", "Unexpected output EOF", "Data error", |
| 779 | "Out of memory", "Obsolete (pre 0.9.5) bzip format not supported" |
| 780 | }; |
Glenn L McGrath | 237ae42 | 2002-11-03 14:05:15 +0000 | [diff] [blame] | 781 | |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 782 | /* Dumb little test thing, decompress stdin to stdout */ |
Bernhard Reutner-Fischer | febe3c4 | 2007-04-04 20:52:03 +0000 | [diff] [blame] | 783 | int main(int argc, char **argv) |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 784 | { |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 785 | int i; |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 786 | char c; |
| 787 | |
Denis Vlasenko | e9ad84d | 2008-08-05 13:10:34 +0000 | [diff] [blame] | 788 | int i = unpack_bz2_stream_prime(0, 1); |
Denis Vlasenko | b38cf3f | 2007-04-10 17:16:33 +0000 | [diff] [blame] | 789 | if (i < 0) |
Denis Vlasenko | f5d8c90 | 2008-06-26 14:32:57 +0000 | [diff] [blame] | 790 | fprintf(stderr, "%s\n", bunzip_errors[-i]); |
Bernhard Reutner-Fischer | 5e25ddb | 2008-05-19 09:48:17 +0000 | [diff] [blame] | 791 | else if (read(STDIN_FILENO, &c, 1)) |
Denis Vlasenko | f5d8c90 | 2008-06-26 14:32:57 +0000 | [diff] [blame] | 792 | fprintf(stderr, "Trailing garbage ignored\n"); |
Eric Andersen | 5fa4db2 | 2003-10-23 06:52:01 +0000 | [diff] [blame] | 793 | return -i; |
Eric Andersen | 0d6d88a | 2003-10-18 01:58:35 +0000 | [diff] [blame] | 794 | } |
| 795 | #endif |