summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/zlib/src/deflate.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/zlib/src/deflate.c')
-rw-r--r--src/3rdparty/zlib/src/deflate.c880
1 files changed, 428 insertions, 452 deletions
diff --git a/src/3rdparty/zlib/src/deflate.c b/src/3rdparty/zlib/src/deflate.c
index 1ec761448d..012ea8148e 100644
--- a/src/3rdparty/zlib/src/deflate.c
+++ b/src/3rdparty/zlib/src/deflate.c
@@ -1,5 +1,5 @@
/* deflate.c -- compress data using the deflation algorithm
- * Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
+ * Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@@ -52,7 +52,7 @@
#include "deflate.h"
const char deflate_copyright[] =
- " deflate 1.2.11 Copyright 1995-2017 Jean-loup Gailly and Mark Adler ";
+ " deflate 1.3.1 Copyright 1995-2024 Jean-loup Gailly and Mark Adler ";
/*
If you use the zlib library in a product, an acknowledgment is welcome
in the documentation of your product. If for some reason you cannot
@@ -60,9 +60,6 @@ const char deflate_copyright[] =
copyright string in the executable of your product.
*/
-/* ===========================================================================
- * Function prototypes.
- */
typedef enum {
need_more, /* block not completed, need more input or more output */
block_done, /* block flush performed */
@@ -70,35 +67,16 @@ typedef enum {
finish_done /* finish done, accept no more input or output */
} block_state;
-typedef block_state (*compress_func) OF((deflate_state *s, int flush));
+typedef block_state (*compress_func)(deflate_state *s, int flush);
/* Compression function. Returns the block state after the call. */
-local int deflateStateCheck OF((z_streamp strm));
-local void slide_hash OF((deflate_state *s));
-local void fill_window OF((deflate_state *s));
-local block_state deflate_stored OF((deflate_state *s, int flush));
-local block_state deflate_fast OF((deflate_state *s, int flush));
+local block_state deflate_stored(deflate_state *s, int flush);
+local block_state deflate_fast(deflate_state *s, int flush);
#ifndef FASTEST
-local block_state deflate_slow OF((deflate_state *s, int flush));
-#endif
-local block_state deflate_rle OF((deflate_state *s, int flush));
-local block_state deflate_huff OF((deflate_state *s, int flush));
-local void lm_init OF((deflate_state *s));
-local void putShortMSB OF((deflate_state *s, uInt b));
-local void flush_pending OF((z_streamp strm));
-local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
-#ifdef ASMV
-# pragma message("Assembler code may have bugs -- use at your own risk")
- void match_init OF((void)); /* asm code initialization */
- uInt longest_match OF((deflate_state *s, IPos cur_match));
-#else
-local uInt longest_match OF((deflate_state *s, IPos cur_match));
-#endif
-
-#ifdef ZLIB_DEBUG
-local void check_match OF((deflate_state *s, IPos start, IPos match,
- int length));
+local block_state deflate_slow(deflate_state *s, int flush);
#endif
+local block_state deflate_rle(deflate_state *s, int flush);
+local block_state deflate_huff(deflate_state *s, int flush);
/* ===========================================================================
* Local data
@@ -160,7 +138,7 @@ local const config configuration_table[10] = {
* characters, so that a running hash key can be computed from the previous
* key instead of complete recalculation each time.
*/
-#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+#define UPDATE_HASH(s,h,c) (h = (((h) << s->hash_shift) ^ (c)) & s->hash_mask)
/* ===========================================================================
@@ -190,17 +168,23 @@ local const config configuration_table[10] = {
* prev[] will be initialized on the fly.
*/
#define CLEAR_HASH(s) \
- s->head[s->hash_size-1] = NIL; \
- zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+ do { \
+ s->head[s->hash_size - 1] = NIL; \
+ zmemzero((Bytef *)s->head, \
+ (unsigned)(s->hash_size - 1)*sizeof(*s->head)); \
+ } while (0)
/* ===========================================================================
* Slide the hash table when sliding the window down (could be avoided with 32
* bit values at the expense of memory usage). We slide even when level == 0 to
* keep the hash table consistent if we switch back to level > 0 later.
*/
-local void slide_hash(s)
- deflate_state *s;
-{
+#if defined(__has_feature)
+# if __has_feature(memory_sanitizer)
+ __attribute__((no_sanitize("memory")))
+# endif
+#endif
+local void slide_hash(deflate_state *s) {
unsigned n, m;
Posf *p;
uInt wsize = s->w_size;
@@ -224,39 +208,181 @@ local void slide_hash(s)
#endif
}
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read. All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->next_in buffer and copying from it.
+ * (See also flush_pending()).
+ */
+local unsigned read_buf(z_streamp strm, Bytef *buf, unsigned size) {
+ unsigned len = strm->avail_in;
+
+ if (len > size) len = size;
+ if (len == 0) return 0;
+
+ strm->avail_in -= len;
+
+ zmemcpy(buf, strm->next_in, len);
+ if (strm->state->wrap == 1) {
+ strm->adler = adler32(strm->adler, buf, len);
+ }
+#ifdef GZIP
+ else if (strm->state->wrap == 2) {
+ strm->adler = crc32(strm->adler, buf, len);
+ }
+#endif
+ strm->next_in += len;
+ strm->total_in += len;
+
+ return len;
+}
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ * At least one byte has been read, or avail_in == 0; reads are
+ * performed for at least two bytes (required for the zip translate_eol
+ * option -- not supported here).
+ */
+local void fill_window(deflate_state *s) {
+ unsigned n;
+ unsigned more; /* Amount of free space at the end of the window. */
+ uInt wsize = s->w_size;
+
+ Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
+
+ do {
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
+
+ /* Deal with !@#$% 64K limit: */
+ if (sizeof(int) <= 2) {
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+ more = wsize;
+
+ } else if (more == (unsigned)(-1)) {
+ /* Very unlikely, but possible on 16 bit machine if
+ * strstart == 0 && lookahead == 1 (input done a byte at time)
+ */
+ more--;
+ }
+ }
+
+ /* If the window is almost full and there is insufficient lookahead,
+ * move the upper half to the lower one to make room in the upper half.
+ */
+ if (s->strstart >= wsize + MAX_DIST(s)) {
+
+ zmemcpy(s->window, s->window + wsize, (unsigned)wsize - more);
+ s->match_start -= wsize;
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
+ s->block_start -= (long) wsize;
+ if (s->insert > s->strstart)
+ s->insert = s->strstart;
+ slide_hash(s);
+ more += wsize;
+ }
+ if (s->strm->avail_in == 0) break;
+
+ /* If there was no sliding:
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ * more == window_size - lookahead - strstart
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ * => more >= window_size - 2*WSIZE + 2
+ * In the BIG_MEM or MMAP case (not yet supported),
+ * window_size == input_size + MIN_LOOKAHEAD &&
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+ */
+ Assert(more >= 2, "more < 2");
+
+ n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
+ s->lookahead += n;
+
+ /* Initialize the hash value now that we have some input: */
+ if (s->lookahead + s->insert >= MIN_MATCH) {
+ uInt str = s->strstart - s->insert;
+ s->ins_h = s->window[str];
+ UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ while (s->insert) {
+ UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
+#ifndef FASTEST
+ s->prev[str & s->w_mask] = s->head[s->ins_h];
+#endif
+ s->head[s->ins_h] = (Pos)str;
+ str++;
+ s->insert--;
+ if (s->lookahead + s->insert < MIN_MATCH)
+ break;
+ }
+ }
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ * but this is not important since only literal bytes will be emitted.
+ */
+
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
+
+ /* If the WIN_INIT bytes after the end of the current data have never been
+ * written, then zero those bytes in order to avoid memory check reports of
+ * the use of uninitialized (or uninitialised as Julian writes) bytes by
+ * the longest match routines. Update the high water mark for the next
+ * time through here. WIN_INIT is set to MAX_MATCH since the longest match
+ * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
+ */
+ if (s->high_water < s->window_size) {
+ ulg curr = s->strstart + (ulg)(s->lookahead);
+ ulg init;
+
+ if (s->high_water < curr) {
+ /* Previous high water mark below current data -- zero WIN_INIT
+ * bytes or up to end of window, whichever is less.
+ */
+ init = s->window_size - curr;
+ if (init > WIN_INIT)
+ init = WIN_INIT;
+ zmemzero(s->window + curr, (unsigned)init);
+ s->high_water = curr + init;
+ }
+ else if (s->high_water < (ulg)curr + WIN_INIT) {
+ /* High water mark at or above current data, but below current data
+ * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
+ * to end of window, whichever is less.
+ */
+ init = (ulg)curr + WIN_INIT - s->high_water;
+ if (init > s->window_size - s->high_water)
+ init = s->window_size - s->high_water;
+ zmemzero(s->window + s->high_water, (unsigned)init);
+ s->high_water += init;
+ }
+ }
+
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
+ "not enough room for search");
+}
+
/* ========================================================================= */
-int ZEXPORT deflateInit_(strm, level, version, stream_size)
- z_streamp strm;
- int level;
- const char *version;
- int stream_size;
-{
+int ZEXPORT deflateInit_(z_streamp strm, int level, const char *version,
+ int stream_size) {
return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
Z_DEFAULT_STRATEGY, version, stream_size);
/* To do: ignore strm->next_in if we use it as window */
}
/* ========================================================================= */
-int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
- version, stream_size)
- z_streamp strm;
- int level;
- int method;
- int windowBits;
- int memLevel;
- int strategy;
- const char *version;
- int stream_size;
-{
+int ZEXPORT deflateInit2_(z_streamp strm, int level, int method,
+ int windowBits, int memLevel, int strategy,
+ const char *version, int stream_size) {
deflate_state *s;
int wrap = 1;
static const char my_version[] = ZLIB_VERSION;
- ushf *overlay;
- /* We overlay pending_buf and d_buf+l_buf. This works since the average
- * output size for (length,distance) codes is <= 24 bits.
- */
-
if (version == Z_NULL || version[0] != my_version[0] ||
stream_size != sizeof(z_stream)) {
return Z_VERSION_ERROR;
@@ -287,6 +413,8 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
if (windowBits < 0) { /* suppress zlib wrapper */
wrap = 0;
+ if (windowBits < -15)
+ return Z_STREAM_ERROR;
windowBits = -windowBits;
}
#ifdef GZIP
@@ -316,7 +444,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
s->hash_bits = (uInt)memLevel + 7;
s->hash_size = 1 << s->hash_bits;
s->hash_mask = s->hash_size - 1;
- s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+ s->hash_shift = ((s->hash_bits + MIN_MATCH-1) / MIN_MATCH);
s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
@@ -326,9 +454,47 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
- s->pending_buf = (uchf *) overlay;
- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
+ /* We overlay pending_buf and sym_buf. This works since the average size
+ * for length/distance pairs over any compressed block is assured to be 31
+ * bits or less.
+ *
+ * Analysis: The longest fixed codes are a length code of 8 bits plus 5
+ * extra bits, for lengths 131 to 257. The longest fixed distance codes are
+ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
+ * possible fixed-codes length/distance pair is then 31 bits total.
+ *
+ * sym_buf starts one-fourth of the way into pending_buf. So there are
+ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
+ * in sym_buf is three bytes -- two for the distance and one for the
+ * literal/length. As each symbol is consumed, the pointer to the next
+ * sym_buf value to read moves forward three bytes. From that symbol, up to
+ * 31 bits are written to pending_buf. The closest the written pending_buf
+ * bits gets to the next sym_buf symbol to read is just before the last
+ * code is written. At that time, 31*(n - 2) bits have been written, just
+ * after 24*(n - 2) bits have been consumed from sym_buf. sym_buf starts at
+ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n - 1
+ * symbols are written.) The closest the writing gets to what is unread is
+ * then n + 14 bits. Here n is lit_bufsize, which is 16384 by default, and
+ * can range from 128 to 32768.
+ *
+ * Therefore, at a minimum, there are 142 bits of space between what is
+ * written and what is read in the overlain buffers, so the symbols cannot
+ * be overwritten by the compressed data. That space is actually 139 bits,
+ * due to the three-bit fixed-code block header.
+ *
+ * That covers the case where either Z_FIXED is specified, forcing fixed
+ * codes, or when the use of fixed codes is chosen, because that choice
+ * results in a smaller compressed block than dynamic codes. That latter
+ * condition then assures that the above analysis also covers all dynamic
+ * blocks. A dynamic-code block will only be chosen to be emitted if it has
+ * fewer bits than a fixed-code block would for the same set of symbols.
+ * Therefore its average symbol length is assured to be less than 31. So
+ * the compressed data for a dynamic block also cannot overwrite the
+ * symbols from which it is being constructed.
+ */
+
+ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, LIT_BUFS);
+ s->pending_buf_size = (ulg)s->lit_bufsize * 4;
if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
s->pending_buf == Z_NULL) {
@@ -337,8 +503,18 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
deflateEnd (strm);
return Z_MEM_ERROR;
}
- s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
+#ifdef LIT_MEM
+ s->d_buf = (ushf *)(s->pending_buf + (s->lit_bufsize << 1));
+ s->l_buf = s->pending_buf + (s->lit_bufsize << 2);
+ s->sym_end = s->lit_bufsize - 1;
+#else
+ s->sym_buf = s->pending_buf + s->lit_bufsize;
+ s->sym_end = (s->lit_bufsize - 1) * 3;
+#endif
+ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
+ * on 16 bit machines and because stored blocks are restricted to
+ * 64K-1 bytes.
+ */
s->level = level;
s->strategy = strategy;
@@ -350,9 +526,7 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
/* =========================================================================
* Check for a valid deflate stream state. Return 0 if ok, 1 if not.
*/
-local int deflateStateCheck (strm)
- z_streamp strm;
-{
+local int deflateStateCheck(z_streamp strm) {
deflate_state *s;
if (strm == Z_NULL ||
strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0)
@@ -373,11 +547,8 @@ local int deflateStateCheck (strm)
}
/* ========================================================================= */
-int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
- z_streamp strm;
- const Bytef *dictionary;
- uInt dictLength;
-{
+int ZEXPORT deflateSetDictionary(z_streamp strm, const Bytef *dictionary,
+ uInt dictLength) {
deflate_state *s;
uInt str, n;
int wrap;
@@ -442,11 +613,8 @@ int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
}
/* ========================================================================= */
-int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength)
- z_streamp strm;
- Bytef *dictionary;
- uInt *dictLength;
-{
+int ZEXPORT deflateGetDictionary(z_streamp strm, Bytef *dictionary,
+ uInt *dictLength) {
deflate_state *s;
uInt len;
@@ -464,9 +632,7 @@ int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength)
}
/* ========================================================================= */
-int ZEXPORT deflateResetKeep (strm)
- z_streamp strm;
-{
+int ZEXPORT deflateResetKeep(z_streamp strm) {
deflate_state *s;
if (deflateStateCheck(strm)) {
@@ -488,23 +654,45 @@ int ZEXPORT deflateResetKeep (strm)
#ifdef GZIP
s->wrap == 2 ? GZIP_STATE :
#endif
- s->wrap ? INIT_STATE : BUSY_STATE;
+ INIT_STATE;
strm->adler =
#ifdef GZIP
s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
#endif
adler32(0L, Z_NULL, 0);
- s->last_flush = Z_NO_FLUSH;
+ s->last_flush = -2;
_tr_init(s);
return Z_OK;
}
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+local void lm_init(deflate_state *s) {
+ s->window_size = (ulg)2L*s->w_size;
+
+ CLEAR_HASH(s);
+
+ /* Set the default configuration parameters:
+ */
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
+ s->good_match = configuration_table[s->level].good_length;
+ s->nice_match = configuration_table[s->level].nice_length;
+ s->max_chain_length = configuration_table[s->level].max_chain;
+
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->lookahead = 0;
+ s->insert = 0;
+ s->match_length = s->prev_length = MIN_MATCH-1;
+ s->match_available = 0;
+ s->ins_h = 0;
+}
+
/* ========================================================================= */
-int ZEXPORT deflateReset (strm)
- z_streamp strm;
-{
+int ZEXPORT deflateReset(z_streamp strm) {
int ret;
ret = deflateResetKeep(strm);
@@ -514,10 +702,7 @@ int ZEXPORT deflateReset (strm)
}
/* ========================================================================= */
-int ZEXPORT deflateSetHeader (strm, head)
- z_streamp strm;
- gz_headerp head;
-{
+int ZEXPORT deflateSetHeader(z_streamp strm, gz_headerp head) {
if (deflateStateCheck(strm) || strm->state->wrap != 2)
return Z_STREAM_ERROR;
strm->state->gzhead = head;
@@ -525,11 +710,7 @@ int ZEXPORT deflateSetHeader (strm, head)
}
/* ========================================================================= */
-int ZEXPORT deflatePending (strm, pending, bits)
- unsigned *pending;
- int *bits;
- z_streamp strm;
-{
+int ZEXPORT deflatePending(z_streamp strm, unsigned *pending, int *bits) {
if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
if (pending != Z_NULL)
*pending = strm->state->pending;
@@ -539,18 +720,21 @@ int ZEXPORT deflatePending (strm, pending, bits)
}
/* ========================================================================= */
-int ZEXPORT deflatePrime (strm, bits, value)
- z_streamp strm;
- int bits;
- int value;
-{
+int ZEXPORT deflatePrime(z_streamp strm, int bits, int value) {
deflate_state *s;
int put;
if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
s = strm->state;
- if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))
+#ifdef LIT_MEM
+ if (bits < 0 || bits > 16 ||
+ (uchf *)s->d_buf < s->pending_out + ((Buf_size + 7) >> 3))
+ return Z_BUF_ERROR;
+#else
+ if (bits < 0 || bits > 16 ||
+ s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3))
return Z_BUF_ERROR;
+#endif
do {
put = Buf_size - s->bi_valid;
if (put > bits)
@@ -565,11 +749,7 @@ int ZEXPORT deflatePrime (strm, bits, value)
}
/* ========================================================================= */
-int ZEXPORT deflateParams(strm, level, strategy)
- z_streamp strm;
- int level;
- int strategy;
-{
+int ZEXPORT deflateParams(z_streamp strm, int level, int strategy) {
deflate_state *s;
compress_func func;
@@ -587,12 +767,12 @@ int ZEXPORT deflateParams(strm, level, strategy)
func = configuration_table[s->level].func;
if ((strategy != s->strategy || func != configuration_table[level].func) &&
- s->high_water) {
+ s->last_flush != -2) {
/* Flush the last buffer: */
int err = deflate(strm, Z_BLOCK);
if (err == Z_STREAM_ERROR)
return err;
- if (strm->avail_out == 0)
+ if (strm->avail_in || (s->strstart - s->block_start) + s->lookahead)
return Z_BUF_ERROR;
}
if (s->level != level) {
@@ -614,13 +794,8 @@ int ZEXPORT deflateParams(strm, level, strategy)
}
/* ========================================================================= */
-int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
- z_streamp strm;
- int good_length;
- int max_lazy;
- int nice_length;
- int max_chain;
-{
+int ZEXPORT deflateTune(z_streamp strm, int good_length, int max_lazy,
+ int nice_length, int max_chain) {
deflate_state *s;
if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
@@ -633,36 +808,47 @@ int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
}
/* =========================================================================
- * For the default windowBits of 15 and memLevel of 8, this function returns
- * a close to exact, as well as small, upper bound on the compressed size.
- * They are coded as constants here for a reason--if the #define's are
- * changed, then this function needs to be changed as well. The return
- * value for 15 and 8 only works for those exact settings.
+ * For the default windowBits of 15 and memLevel of 8, this function returns a
+ * close to exact, as well as small, upper bound on the compressed size. This
+ * is an expansion of ~0.03%, plus a small constant.
+ *
+ * For any setting other than those defaults for windowBits and memLevel, one
+ * of two worst case bounds is returned. This is at most an expansion of ~4% or
+ * ~13%, plus a small constant.
+ *
+ * Both the 0.03% and 4% derive from the overhead of stored blocks. The first
+ * one is for stored blocks of 16383 bytes (memLevel == 8), whereas the second
+ * is for stored blocks of 127 bytes (the worst case memLevel == 1). The
+ * expansion results from five bytes of header for each stored block.
*
- * For any setting other than those defaults for windowBits and memLevel,
- * the value returned is a conservative worst case for the maximum expansion
- * resulting from using fixed blocks instead of stored blocks, which deflate
- * can emit on compressed data for some combinations of the parameters.
+ * The larger expansion of 13% results from a window size less than or equal to
+ * the symbols buffer size (windowBits <= memLevel + 7). In that case some of
+ * the data being compressed may have slid out of the sliding window, impeding
+ * a stored block from being emitted. Then the only choice is a fixed or
+ * dynamic block, where a fixed block limits the maximum expansion to 9 bits
+ * per 8-bit byte, plus 10 bits for every block. The smallest block size for
+ * which this can occur is 255 (memLevel == 2).
*
- * This function could be more sophisticated to provide closer upper bounds for
- * every combination of windowBits and memLevel. But even the conservative
- * upper bound of about 14% expansion does not seem onerous for output buffer
- * allocation.
+ * Shifts are used to approximate divisions, for speed.
*/
-uLong ZEXPORT deflateBound(strm, sourceLen)
- z_streamp strm;
- uLong sourceLen;
-{
+uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen) {
deflate_state *s;
- uLong complen, wraplen;
+ uLong fixedlen, storelen, wraplen;
- /* conservative upper bound for compressed data */
- complen = sourceLen +
- ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5;
+ /* upper bound for fixed blocks with 9-bit literals and length 255
+ (memLevel == 2, which is the lowest that may not use stored blocks) --
+ ~13% overhead plus a small constant */
+ fixedlen = sourceLen + (sourceLen >> 3) + (sourceLen >> 8) +
+ (sourceLen >> 9) + 4;
- /* if can't get parameters, return conservative bound plus zlib wrapper */
+ /* upper bound for stored blocks with length 127 (memLevel == 1) --
+ ~4% overhead plus a small constant */
+ storelen = sourceLen + (sourceLen >> 5) + (sourceLen >> 7) +
+ (sourceLen >> 11) + 7;
+
+ /* if can't get parameters, return larger bound plus a zlib wrapper */
if (deflateStateCheck(strm))
- return complen + 6;
+ return (fixedlen > storelen ? fixedlen : storelen) + 6;
/* compute wrapper length */
s = strm->state;
@@ -699,11 +885,13 @@ uLong ZEXPORT deflateBound(strm, sourceLen)
wraplen = 6;
}
- /* if not default parameters, return conservative bound */
+ /* if not default parameters, return one of the conservative bounds */
if (s->w_bits != 15 || s->hash_bits != 8 + 7)
- return complen + wraplen;
+ return (s->w_bits <= s->hash_bits && s->level ? fixedlen : storelen) +
+ wraplen;
- /* default settings: return tight bound for that case */
+ /* default settings: return tight bound for that case -- ~0.03% overhead
+ plus a small constant */
return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
(sourceLen >> 25) + 13 - 6 + wraplen;
}
@@ -713,10 +901,7 @@ uLong ZEXPORT deflateBound(strm, sourceLen)
* IN assertion: the stream state is correct and there is enough room in
* pending_buf.
*/
-local void putShortMSB (s, b)
- deflate_state *s;
- uInt b;
-{
+local void putShortMSB(deflate_state *s, uInt b) {
put_byte(s, (Byte)(b >> 8));
put_byte(s, (Byte)(b & 0xff));
}
@@ -727,9 +912,7 @@ local void putShortMSB (s, b)
* applications may wish to modify it to avoid allocating a large
* strm->next_out buffer and copying into it. (See also read_buf()).
*/
-local void flush_pending(strm)
- z_streamp strm;
-{
+local void flush_pending(z_streamp strm) {
unsigned len;
deflate_state *s = strm->state;
@@ -760,10 +943,7 @@ local void flush_pending(strm)
} while (0)
/* ========================================================================= */
-int ZEXPORT deflate (strm, flush)
- z_streamp strm;
- int flush;
-{
+int ZEXPORT deflate(z_streamp strm, int flush) {
int old_flush; /* value of flush param for previous deflate call */
deflate_state *s;
@@ -811,9 +991,11 @@ int ZEXPORT deflate (strm, flush)
}
/* Write the header */
+ if (s->status == INIT_STATE && s->wrap == 0)
+ s->status = BUSY_STATE;
if (s->status == INIT_STATE) {
/* zlib header */
- uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+ uInt header = (Z_DEFLATED + ((s->w_bits - 8) << 4)) << 8;
uInt level_flags;
if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
@@ -1073,9 +1255,7 @@ int ZEXPORT deflate (strm, flush)
}
/* ========================================================================= */
-int ZEXPORT deflateEnd (strm)
- z_streamp strm;
-{
+int ZEXPORT deflateEnd(z_streamp strm) {
int status;
if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
@@ -1099,16 +1279,14 @@ int ZEXPORT deflateEnd (strm)
* To simplify the source, this is not supported for 16-bit MSDOS (which
* doesn't have enough memory anyway to duplicate compression states).
*/
-int ZEXPORT deflateCopy (dest, source)
- z_streamp dest;
- z_streamp source;
-{
+int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) {
#ifdef MAXSEG_64K
+ (void)dest;
+ (void)source;
return Z_STREAM_ERROR;
#else
deflate_state *ds;
deflate_state *ss;
- ushf *overlay;
if (deflateStateCheck(source) || dest == Z_NULL) {
@@ -1128,8 +1306,7 @@ int ZEXPORT deflateCopy (dest, source)
ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
- ds->pending_buf = (uchf *) overlay;
+ ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, LIT_BUFS);
if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
ds->pending_buf == Z_NULL) {
@@ -1140,11 +1317,15 @@ int ZEXPORT deflateCopy (dest, source)
zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
- zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
+ zmemcpy(ds->pending_buf, ss->pending_buf, ds->lit_bufsize * LIT_BUFS);
ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
+#ifdef LIT_MEM
+ ds->d_buf = (ushf *)(ds->pending_buf + (ds->lit_bufsize << 1));
+ ds->l_buf = ds->pending_buf + (ds->lit_bufsize << 2);
+#else
+ ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
+#endif
ds->l_desc.dyn_tree = ds->dyn_ltree;
ds->d_desc.dyn_tree = ds->dyn_dtree;
@@ -1154,71 +1335,6 @@ int ZEXPORT deflateCopy (dest, source)
#endif /* MAXSEG_64K */
}
-/* ===========================================================================
- * Read a new buffer from the current input stream, update the adler32
- * and total number of bytes read. All deflate() input goes through
- * this function so some applications may wish to modify it to avoid
- * allocating a large strm->next_in buffer and copying from it.
- * (See also flush_pending()).
- */
-local unsigned read_buf(strm, buf, size)
- z_streamp strm;
- Bytef *buf;
- unsigned size;
-{
- unsigned len = strm->avail_in;
-
- if (len > size) len = size;
- if (len == 0) return 0;
-
- strm->avail_in -= len;
-
- zmemcpy(buf, strm->next_in, len);
- if (strm->state->wrap == 1) {
- strm->adler = adler32(strm->adler, buf, len);
- }
-#ifdef GZIP
- else if (strm->state->wrap == 2) {
- strm->adler = crc32(strm->adler, buf, len);
- }
-#endif
- strm->next_in += len;
- strm->total_in += len;
-
- return len;
-}
-
-/* ===========================================================================
- * Initialize the "longest match" routines for a new zlib stream
- */
-local void lm_init (s)
- deflate_state *s;
-{
- s->window_size = (ulg)2L*s->w_size;
-
- CLEAR_HASH(s);
-
- /* Set the default configuration parameters:
- */
- s->max_lazy_match = configuration_table[s->level].max_lazy;
- s->good_match = configuration_table[s->level].good_length;
- s->nice_match = configuration_table[s->level].nice_length;
- s->max_chain_length = configuration_table[s->level].max_chain;
-
- s->strstart = 0;
- s->block_start = 0L;
- s->lookahead = 0;
- s->insert = 0;
- s->match_length = s->prev_length = MIN_MATCH-1;
- s->match_available = 0;
- s->ins_h = 0;
-#ifndef FASTEST
-#ifdef ASMV
- match_init(); /* initialize the asm code */
-#endif
-#endif
-}
-
#ifndef FASTEST
/* ===========================================================================
* Set match_start to the longest match starting at the given string and
@@ -1229,14 +1345,7 @@ local void lm_init (s)
* string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
* OUT assertion: the match length is not greater than s->lookahead.
*/
-#ifndef ASMV
-/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
- * match.S. The code will be functionally equivalent.
- */
-local uInt longest_match(s, cur_match)
- deflate_state *s;
- IPos cur_match; /* current match */
-{
+local uInt longest_match(deflate_state *s, IPos cur_match) {
unsigned chain_length = s->max_chain_length;/* max hash chain length */
register Bytef *scan = s->window + s->strstart; /* current string */
register Bytef *match; /* matched string */
@@ -1257,10 +1366,10 @@ local uInt longest_match(s, cur_match)
*/
register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
register ush scan_start = *(ushf*)scan;
- register ush scan_end = *(ushf*)(scan+best_len-1);
+ register ush scan_end = *(ushf*)(scan + best_len - 1);
#else
register Bytef *strend = s->window + s->strstart + MAX_MATCH;
- register Byte scan_end1 = scan[best_len-1];
+ register Byte scan_end1 = scan[best_len - 1];
register Byte scan_end = scan[best_len];
#endif
@@ -1278,7 +1387,8 @@ local uInt longest_match(s, cur_match)
*/
if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead;
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
+ "need lookahead");
do {
Assert(cur_match < s->strstart, "no future");
@@ -1296,43 +1406,44 @@ local uInt longest_match(s, cur_match)
/* This code assumes sizeof(unsigned short) == 2. Do not use
* UNALIGNED_OK if your compiler uses a different size.
*/
- if (*(ushf*)(match+best_len-1) != scan_end ||
+ if (*(ushf*)(match + best_len - 1) != scan_end ||
*(ushf*)match != scan_start) continue;
/* It is not necessary to compare scan[2] and match[2] since they are
* always equal when the other bytes match, given that the hash keys
* are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
- * strstart+3, +5, ... up to strstart+257. We check for insufficient
+ * strstart + 3, + 5, up to strstart + 257. We check for insufficient
* lookahead only every 4th comparison; the 128th check will be made
- * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+ * at strstart + 257. If MAX_MATCH-2 is not a multiple of 8, it is
* necessary to put more guard bytes at the end of the window, or
* to check more often for insufficient lookahead.
*/
Assert(scan[2] == match[2], "scan[2]?");
scan++, match++;
do {
- } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ } while (*(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
+ *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
+ *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
+ *(ushf*)(scan += 2) == *(ushf*)(match += 2) &&
scan < strend);
/* The funny "do {}" generates better code on most compilers */
- /* Here, scan <= window+strstart+257 */
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ /* Here, scan <= window + strstart + 257 */
+ Assert(scan <= s->window + (unsigned)(s->window_size - 1),
+ "wild scan");
if (*scan == *match) scan++;
- len = (MAX_MATCH - 1) - (int)(strend-scan);
+ len = (MAX_MATCH - 1) - (int)(strend - scan);
scan = strend - (MAX_MATCH-1);
#else /* UNALIGNED_OK */
- if (match[best_len] != scan_end ||
- match[best_len-1] != scan_end1 ||
- *match != *scan ||
- *++match != scan[1]) continue;
+ if (match[best_len] != scan_end ||
+ match[best_len - 1] != scan_end1 ||
+ *match != *scan ||
+ *++match != scan[1]) continue;
- /* The check at best_len-1 can be removed because it will be made
+ /* The check at best_len - 1 can be removed because it will be made
* again later. (This heuristic is not always a win.)
* It is not necessary to compare scan[2] and match[2] since they
* are always equal when the other bytes match, given that
@@ -1342,7 +1453,7 @@ local uInt longest_match(s, cur_match)
Assert(*scan == *match, "match[2]?");
/* We check for insufficient lookahead only every 8th comparison;
- * the 256th check will be made at strstart+258.
+ * the 256th check will be made at strstart + 258.
*/
do {
} while (*++scan == *++match && *++scan == *++match &&
@@ -1351,7 +1462,8 @@ local uInt longest_match(s, cur_match)
*++scan == *++match && *++scan == *++match &&
scan < strend);
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ Assert(scan <= s->window + (unsigned)(s->window_size - 1),
+ "wild scan");
len = MAX_MATCH - (int)(strend - scan);
scan = strend - MAX_MATCH;
@@ -1363,9 +1475,9 @@ local uInt longest_match(s, cur_match)
best_len = len;
if (len >= nice_match) break;
#ifdef UNALIGNED_OK
- scan_end = *(ushf*)(scan+best_len-1);
+ scan_end = *(ushf*)(scan + best_len - 1);
#else
- scan_end1 = scan[best_len-1];
+ scan_end1 = scan[best_len - 1];
scan_end = scan[best_len];
#endif
}
@@ -1375,17 +1487,13 @@ local uInt longest_match(s, cur_match)
if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
return s->lookahead;
}
-#endif /* ASMV */
#else /* FASTEST */
/* ---------------------------------------------------------------------------
* Optimized version for FASTEST only
*/
-local uInt longest_match(s, cur_match)
- deflate_state *s;
- IPos cur_match; /* current match */
-{
+local uInt longest_match(deflate_state *s, IPos cur_match) {
register Bytef *scan = s->window + s->strstart; /* current string */
register Bytef *match; /* matched string */
register int len; /* length of current match */
@@ -1396,7 +1504,8 @@ local uInt longest_match(s, cur_match)
*/
Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
+ "need lookahead");
Assert(cur_match < s->strstart, "no future");
@@ -1406,7 +1515,7 @@ local uInt longest_match(s, cur_match)
*/
if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
- /* The check at best_len-1 can be removed because it will be made
+ /* The check at best_len - 1 can be removed because it will be made
* again later. (This heuristic is not always a win.)
* It is not necessary to compare scan[2] and match[2] since they
* are always equal when the other bytes match, given that
@@ -1416,7 +1525,7 @@ local uInt longest_match(s, cur_match)
Assert(*scan == *match, "match[2]?");
/* We check for insufficient lookahead only every 8th comparison;
- * the 256th check will be made at strstart+258.
+ * the 256th check will be made at strstart + 258.
*/
do {
} while (*++scan == *++match && *++scan == *++match &&
@@ -1425,7 +1534,7 @@ local uInt longest_match(s, cur_match)
*++scan == *++match && *++scan == *++match &&
scan < strend);
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ Assert(scan <= s->window + (unsigned)(s->window_size - 1), "wild scan");
len = MAX_MATCH - (int)(strend - scan);
@@ -1445,23 +1554,27 @@ local uInt longest_match(s, cur_match)
/* ===========================================================================
* Check that the match at match_start is indeed a match.
*/
-local void check_match(s, start, match, length)
- deflate_state *s;
- IPos start, match;
- int length;
-{
+local void check_match(deflate_state *s, IPos start, IPos match, int length) {
/* check that the match is indeed a match */
- if (zmemcmp(s->window + match,
- s->window + start, length) != EQUAL) {
- fprintf(stderr, " start %u, match %u, length %d\n",
- start, match, length);
+ Bytef *back = s->window + (int)match, *here = s->window + start;
+ IPos len = length;
+ if (match == (IPos)-1) {
+ /* match starts one byte before the current window -- just compare the
+ subsequent length-1 bytes */
+ back++;
+ here++;
+ len--;
+ }
+ if (zmemcmp(back, here, len) != EQUAL) {
+ fprintf(stderr, " start %u, match %d, length %d\n",
+ start, (int)match, length);
do {
- fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
- } while (--length != 0);
+ fprintf(stderr, "(%02x %02x)", *back++, *here++);
+ } while (--len != 0);
z_error("invalid match");
}
if (z_verbose > 1) {
- fprintf(stderr,"\\[%d,%d]", start-match, length);
+ fprintf(stderr,"\\[%d,%d]", start - match, length);
do { putc(s->window[start++], stderr); } while (--length != 0);
}
}
@@ -1470,135 +1583,6 @@ local void check_match(s, start, match, length)
#endif /* ZLIB_DEBUG */
/* ===========================================================================
- * Fill the window when the lookahead becomes insufficient.
- * Updates strstart and lookahead.
- *
- * IN assertion: lookahead < MIN_LOOKAHEAD
- * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
- * At least one byte has been read, or avail_in == 0; reads are
- * performed for at least two bytes (required for the zip translate_eol
- * option -- not supported here).
- */
-local void fill_window(s)
- deflate_state *s;
-{
- unsigned n;
- unsigned more; /* Amount of free space at the end of the window. */
- uInt wsize = s->w_size;
-
- Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
-
- do {
- more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
-
- /* Deal with !@#$% 64K limit: */
- if (sizeof(int) <= 2) {
- if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
- more = wsize;
-
- } else if (more == (unsigned)(-1)) {
- /* Very unlikely, but possible on 16 bit machine if
- * strstart == 0 && lookahead == 1 (input done a byte at time)
- */
- more--;
- }
- }
-
- /* If the window is almost full and there is insufficient lookahead,
- * move the upper half to the lower one to make room in the upper half.
- */
- if (s->strstart >= wsize+MAX_DIST(s)) {
-
- zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more);
- s->match_start -= wsize;
- s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
- s->block_start -= (long) wsize;
- slide_hash(s);
- more += wsize;
- }
- if (s->strm->avail_in == 0) break;
-
- /* If there was no sliding:
- * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
- * more == window_size - lookahead - strstart
- * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
- * => more >= window_size - 2*WSIZE + 2
- * In the BIG_MEM or MMAP case (not yet supported),
- * window_size == input_size + MIN_LOOKAHEAD &&
- * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
- * Otherwise, window_size == 2*WSIZE so more >= 2.
- * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
- */
- Assert(more >= 2, "more < 2");
-
- n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
- s->lookahead += n;
-
- /* Initialize the hash value now that we have some input: */
- if (s->lookahead + s->insert >= MIN_MATCH) {
- uInt str = s->strstart - s->insert;
- s->ins_h = s->window[str];
- UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
-#if MIN_MATCH != 3
- Call UPDATE_HASH() MIN_MATCH-3 more times
-#endif
- while (s->insert) {
- UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
-#ifndef FASTEST
- s->prev[str & s->w_mask] = s->head[s->ins_h];
-#endif
- s->head[s->ins_h] = (Pos)str;
- str++;
- s->insert--;
- if (s->lookahead + s->insert < MIN_MATCH)
- break;
- }
- }
- /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
- * but this is not important since only literal bytes will be emitted.
- */
-
- } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
-
- /* If the WIN_INIT bytes after the end of the current data have never been
- * written, then zero those bytes in order to avoid memory check reports of
- * the use of uninitialized (or uninitialised as Julian writes) bytes by
- * the longest match routines. Update the high water mark for the next
- * time through here. WIN_INIT is set to MAX_MATCH since the longest match
- * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
- */
- if (s->high_water < s->window_size) {
- ulg curr = s->strstart + (ulg)(s->lookahead);
- ulg init;
-
- if (s->high_water < curr) {
- /* Previous high water mark below current data -- zero WIN_INIT
- * bytes or up to end of window, whichever is less.
- */
- init = s->window_size - curr;
- if (init > WIN_INIT)
- init = WIN_INIT;
- zmemzero(s->window + curr, (unsigned)init);
- s->high_water = curr + init;
- }
- else if (s->high_water < (ulg)curr + WIN_INIT) {
- /* High water mark at or above current data, but below current data
- * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
- * to end of window, whichever is less.
- */
- init = (ulg)curr + WIN_INIT - s->high_water;
- if (init > s->window_size - s->high_water)
- init = s->window_size - s->high_water;
- zmemzero(s->window + s->high_water, (unsigned)init);
- s->high_water += init;
- }
- }
-
- Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
- "not enough room for search");
-}
-
-/* ===========================================================================
* Flush the current block, with given end-of-file flag.
* IN assertion: strstart is set to the end of the current match.
*/
@@ -1638,12 +1622,9 @@ local void fill_window(s)
*
* deflate_stored() is written to minimize the number of times an input byte is
* copied. It is most efficient with large input and output buffers, which
- * maximizes the opportunites to have a single copy from next_in to next_out.
+ * maximizes the opportunities to have a single copy from next_in to next_out.
*/
-local block_state deflate_stored(s, flush)
- deflate_state *s;
- int flush;
-{
+local block_state deflate_stored(deflate_state *s, int flush) {
/* Smallest worthy block size when not flushing or finishing. By default
* this is 32K. This can be as small as 507 bytes for memLevel == 1. For
* large input and output buffers, the stored block size will be larger.
@@ -1742,6 +1723,7 @@ local block_state deflate_stored(s, flush)
s->matches = 2; /* clear hash */
zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
s->strstart = s->w_size;
+ s->insert = s->strstart;
}
else {
if (s->window_size - s->strstart <= used) {
@@ -1750,12 +1732,14 @@ local block_state deflate_stored(s, flush)
zmemcpy(s->window, s->window + s->w_size, s->strstart);
if (s->matches < 2)
s->matches++; /* add a pending slide_hash() */
+ if (s->insert > s->strstart)
+ s->insert = s->strstart;
}
zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
s->strstart += used;
+ s->insert += MIN(used, s->w_size - s->insert);
}
s->block_start = s->strstart;
- s->insert += MIN(used, s->w_size - s->insert);
}
if (s->high_water < s->strstart)
s->high_water = s->strstart;
@@ -1770,7 +1754,7 @@ local block_state deflate_stored(s, flush)
return block_done;
/* Fill the window with any remaining input. */
- have = s->window_size - s->strstart - 1;
+ have = s->window_size - s->strstart;
if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) {
/* Slide the window down. */
s->block_start -= s->w_size;
@@ -1779,12 +1763,15 @@ local block_state deflate_stored(s, flush)
if (s->matches < 2)
s->matches++; /* add a pending slide_hash() */
have += s->w_size; /* more space now */
+ if (s->insert > s->strstart)
+ s->insert = s->strstart;
}
if (have > s->strm->avail_in)
have = s->strm->avail_in;
if (have) {
read_buf(s->strm, s->window + s->strstart, have);
s->strstart += have;
+ s->insert += MIN(have, s->w_size - s->insert);
}
if (s->high_water < s->strstart)
s->high_water = s->strstart;
@@ -1821,10 +1808,7 @@ local block_state deflate_stored(s, flush)
* new strings in the dictionary only for unmatched strings or for short
* matches. It is used only for the fast compression options.
*/
-local block_state deflate_fast(s, flush)
- deflate_state *s;
- int flush;
-{
+local block_state deflate_fast(deflate_state *s, int flush) {
IPos hash_head; /* head of the hash chain */
int bflush; /* set if current block must be flushed */
@@ -1842,7 +1826,7 @@ local block_state deflate_fast(s, flush)
if (s->lookahead == 0) break; /* flush the current block */
}
- /* Insert the string window[strstart .. strstart+2] in the
+ /* Insert the string window[strstart .. strstart + 2] in the
* dictionary, and set hash_head to the head of the hash chain:
*/
hash_head = NIL;
@@ -1890,7 +1874,7 @@ local block_state deflate_fast(s, flush)
s->strstart += s->match_length;
s->match_length = 0;
s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart + 1]);
#if MIN_MATCH != 3
Call UPDATE_HASH() MIN_MATCH-3 more times
#endif
@@ -1901,7 +1885,7 @@ local block_state deflate_fast(s, flush)
} else {
/* No match, output a literal byte */
Tracevv((stderr,"%c", s->window[s->strstart]));
- _tr_tally_lit (s, s->window[s->strstart], bflush);
+ _tr_tally_lit(s, s->window[s->strstart], bflush);
s->lookahead--;
s->strstart++;
}
@@ -1912,7 +1896,7 @@ local block_state deflate_fast(s, flush)
FLUSH_BLOCK(s, 1);
return finish_done;
}
- if (s->last_lit)
+ if (s->sym_next)
FLUSH_BLOCK(s, 0);
return block_done;
}
@@ -1923,10 +1907,7 @@ local block_state deflate_fast(s, flush)
* evaluation for matches: a match is finally adopted only if there is
* no better match at the next window position.
*/
-local block_state deflate_slow(s, flush)
- deflate_state *s;
- int flush;
-{
+local block_state deflate_slow(deflate_state *s, int flush) {
IPos hash_head; /* head of hash chain */
int bflush; /* set if current block must be flushed */
@@ -1945,7 +1926,7 @@ local block_state deflate_slow(s, flush)
if (s->lookahead == 0) break; /* flush the current block */
}
- /* Insert the string window[strstart .. strstart+2] in the
+ /* Insert the string window[strstart .. strstart + 2] in the
* dictionary, and set hash_head to the head of the hash chain:
*/
hash_head = NIL;
@@ -1987,17 +1968,17 @@ local block_state deflate_slow(s, flush)
uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
/* Do not insert strings in hash table beyond this. */
- check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+ check_match(s, s->strstart - 1, s->prev_match, s->prev_length);
- _tr_tally_dist(s, s->strstart -1 - s->prev_match,
+ _tr_tally_dist(s, s->strstart - 1 - s->prev_match,
s->prev_length - MIN_MATCH, bflush);
/* Insert in hash table all strings up to the end of the match.
- * strstart-1 and strstart are already inserted. If there is not
+ * strstart - 1 and strstart are already inserted. If there is not
* enough lookahead, the last two strings are not inserted in
* the hash table.
*/
- s->lookahead -= s->prev_length-1;
+ s->lookahead -= s->prev_length - 1;
s->prev_length -= 2;
do {
if (++s->strstart <= max_insert) {
@@ -2015,8 +1996,8 @@ local block_state deflate_slow(s, flush)
* single literal. If there was a match but the current match
* is longer, truncate the previous match to a single literal.
*/
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ Tracevv((stderr,"%c", s->window[s->strstart - 1]));
+ _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
if (bflush) {
FLUSH_BLOCK_ONLY(s, 0);
}
@@ -2034,8 +2015,8 @@ local block_state deflate_slow(s, flush)
}
Assert (flush != Z_NO_FLUSH, "no flush?");
if (s->match_available) {
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ Tracevv((stderr,"%c", s->window[s->strstart - 1]));
+ _tr_tally_lit(s, s->window[s->strstart - 1], bflush);
s->match_available = 0;
}
s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
@@ -2043,7 +2024,7 @@ local block_state deflate_slow(s, flush)
FLUSH_BLOCK(s, 1);
return finish_done;
}
- if (s->last_lit)
+ if (s->sym_next)
FLUSH_BLOCK(s, 0);
return block_done;
}
@@ -2054,10 +2035,7 @@ local block_state deflate_slow(s, flush)
* one. Do not maintain a hash table. (It will be regenerated if this run of
* deflate switches away from Z_RLE.)
*/
-local block_state deflate_rle(s, flush)
- deflate_state *s;
- int flush;
-{
+local block_state deflate_rle(deflate_state *s, int flush) {
int bflush; /* set if current block must be flushed */
uInt prev; /* byte at distance one to match */
Bytef *scan, *strend; /* scan goes up to strend for length of run */
@@ -2092,7 +2070,8 @@ local block_state deflate_rle(s, flush)
if (s->match_length > s->lookahead)
s->match_length = s->lookahead;
}
- Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
+ Assert(scan <= s->window + (uInt)(s->window_size - 1),
+ "wild scan");
}
/* Emit match if have run of MIN_MATCH or longer, else emit literal */
@@ -2107,7 +2086,7 @@ local block_state deflate_rle(s, flush)
} else {
/* No match, output a literal byte */
Tracevv((stderr,"%c", s->window[s->strstart]));
- _tr_tally_lit (s, s->window[s->strstart], bflush);
+ _tr_tally_lit(s, s->window[s->strstart], bflush);
s->lookahead--;
s->strstart++;
}
@@ -2118,7 +2097,7 @@ local block_state deflate_rle(s, flush)
FLUSH_BLOCK(s, 1);
return finish_done;
}
- if (s->last_lit)
+ if (s->sym_next)
FLUSH_BLOCK(s, 0);
return block_done;
}
@@ -2127,10 +2106,7 @@ local block_state deflate_rle(s, flush)
* For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
* (It will be regenerated if this run of deflate switches away from Huffman.)
*/
-local block_state deflate_huff(s, flush)
- deflate_state *s;
- int flush;
-{
+local block_state deflate_huff(deflate_state *s, int flush) {
int bflush; /* set if current block must be flushed */
for (;;) {
@@ -2147,7 +2123,7 @@ local block_state deflate_huff(s, flush)
/* Output a literal byte */
s->match_length = 0;
Tracevv((stderr,"%c", s->window[s->strstart]));
- _tr_tally_lit (s, s->window[s->strstart], bflush);
+ _tr_tally_lit(s, s->window[s->strstart], bflush);
s->lookahead--;
s->strstart++;
if (bflush) FLUSH_BLOCK(s, 0);
@@ -2157,7 +2133,7 @@ local block_state deflate_huff(s, flush)
FLUSH_BLOCK(s, 1);
return finish_done;
}
- if (s->last_lit)
+ if (s->sym_next)
FLUSH_BLOCK(s, 0);
return block_done;
}