summaryrefslogtreecommitdiffstats
path: root/src/libs/7zip/unix/C
diff options
context:
space:
mode:
Diffstat (limited to 'src/libs/7zip/unix/C')
-rw-r--r--src/libs/7zip/unix/C/7zBuf.h39
-rw-r--r--src/libs/7zip/unix/C/7zBuf2.c45
-rw-r--r--src/libs/7zip/unix/C/7zCrc.c76
-rw-r--r--src/libs/7zip/unix/C/7zCrc.h25
-rw-r--r--src/libs/7zip/unix/C/7zCrcOpt.c34
-rw-r--r--src/libs/7zip/unix/C/7zCrcT8.c43
-rw-r--r--src/libs/7zip/unix/C/7zStream.c169
-rw-r--r--src/libs/7zip/unix/C/7zVersion.h7
-rw-r--r--src/libs/7zip/unix/C/Aes.c284
-rw-r--r--src/libs/7zip/unix/C/Aes.h38
-rw-r--r--src/libs/7zip/unix/C/Alloc.c280
-rw-r--r--src/libs/7zip/unix/C/Alloc.h27
-rw-r--r--src/libs/7zip/unix/C/Bra.c133
-rw-r--r--src/libs/7zip/unix/C/Bra.h68
-rw-r--r--src/libs/7zip/unix/C/Bra86.c85
-rw-r--r--src/libs/7zip/unix/C/BraIA64.c67
-rw-r--r--src/libs/7zip/unix/C/BwtSort.c516
-rw-r--r--src/libs/7zip/unix/C/BwtSort.h30
-rw-r--r--src/libs/7zip/unix/C/CpuArch.c168
-rw-r--r--src/libs/7zip/unix/C/CpuArch.h155
-rw-r--r--src/libs/7zip/unix/C/Delta.c62
-rw-r--r--src/libs/7zip/unix/C/Delta.h23
-rw-r--r--src/libs/7zip/unix/C/HuffEnc.c146
-rw-r--r--src/libs/7zip/unix/C/HuffEnc.h27
-rw-r--r--src/libs/7zip/unix/C/LzFind.c761
-rw-r--r--src/libs/7zip/unix/C/LzFind.h115
-rw-r--r--src/libs/7zip/unix/C/LzFindMt.c793
-rw-r--r--src/libs/7zip/unix/C/LzFindMt.h105
-rw-r--r--src/libs/7zip/unix/C/LzHash.h54
-rw-r--r--src/libs/7zip/unix/C/Lzma2Dec.c356
-rw-r--r--src/libs/7zip/unix/C/Lzma2Dec.h84
-rw-r--r--src/libs/7zip/unix/C/Lzma2Enc.c477
-rw-r--r--src/libs/7zip/unix/C/Lzma2Enc.h66
-rw-r--r--src/libs/7zip/unix/C/LzmaDec.c999
-rw-r--r--src/libs/7zip/unix/C/LzmaDec.h231
-rw-r--r--src/libs/7zip/unix/C/LzmaEnc.c2268
-rw-r--r--src/libs/7zip/unix/C/LzmaEnc.h80
-rw-r--r--src/libs/7zip/unix/C/LzmaUtil/Lzma86Dec.c61
-rw-r--r--src/libs/7zip/unix/C/LzmaUtil/Lzma86Dec.h51
-rw-r--r--src/libs/7zip/unix/C/LzmaUtil/Lzma86Enc.c113
-rw-r--r--src/libs/7zip/unix/C/LzmaUtil/Lzma86Enc.h78
-rw-r--r--src/libs/7zip/unix/C/MtCoder.c327
-rw-r--r--src/libs/7zip/unix/C/MtCoder.h98
-rw-r--r--src/libs/7zip/unix/C/Ppmd.h85
-rw-r--r--src/libs/7zip/unix/C/Ppmd7.c708
-rw-r--r--src/libs/7zip/unix/C/Ppmd7.h140
-rw-r--r--src/libs/7zip/unix/C/Ppmd7Dec.c187
-rw-r--r--src/libs/7zip/unix/C/Ppmd7Enc.c185
-rw-r--r--src/libs/7zip/unix/C/Ppmd8.c1120
-rw-r--r--src/libs/7zip/unix/C/Ppmd8.h133
-rw-r--r--src/libs/7zip/unix/C/Ppmd8Dec.c155
-rw-r--r--src/libs/7zip/unix/C/Ppmd8Enc.c161
-rw-r--r--src/libs/7zip/unix/C/RotateDefs.h20
-rw-r--r--src/libs/7zip/unix/C/Sha256.c204
-rw-r--r--src/libs/7zip/unix/C/Sha256.h26
-rw-r--r--src/libs/7zip/unix/C/Sort.c93
-rw-r--r--src/libs/7zip/unix/C/Sort.h20
-rw-r--r--src/libs/7zip/unix/C/Threads.c582
-rw-r--r--src/libs/7zip/unix/C/Threads.h123
-rw-r--r--src/libs/7zip/unix/C/Types.h254
-rw-r--r--src/libs/7zip/unix/C/Xz.c88
-rw-r--r--src/libs/7zip/unix/C/Xz.h252
-rw-r--r--src/libs/7zip/unix/C/XzCrc64.c33
-rw-r--r--src/libs/7zip/unix/C/XzCrc64.h26
-rw-r--r--src/libs/7zip/unix/C/XzDec.c875
-rw-r--r--src/libs/7zip/unix/C/XzEnc.c497
-rw-r--r--src/libs/7zip/unix/C/XzEnc.h25
-rw-r--r--src/libs/7zip/unix/C/XzIn.c306
68 files changed, 15962 insertions, 0 deletions
diff --git a/src/libs/7zip/unix/C/7zBuf.h b/src/libs/7zip/unix/C/7zBuf.h
new file mode 100644
index 000000000..e9f2f316d
--- /dev/null
+++ b/src/libs/7zip/unix/C/7zBuf.h
@@ -0,0 +1,39 @@
+/* 7zBuf.h -- Byte Buffer
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_BUF_H
+#define __7Z_BUF_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct
+{
+ Byte *data;
+ size_t size;
+} CBuf;
+
+void Buf_Init(CBuf *p);
+int Buf_Create(CBuf *p, size_t size, ISzAlloc *alloc);
+void Buf_Free(CBuf *p, ISzAlloc *alloc);
+
+typedef struct
+{
+ Byte *data;
+ size_t size;
+ size_t pos;
+} CDynBuf;
+
+void DynBuf_Construct(CDynBuf *p);
+void DynBuf_SeekToBeg(CDynBuf *p);
+int DynBuf_Write(CDynBuf *p, const Byte *buf, size_t size, ISzAlloc *alloc);
+void DynBuf_Free(CDynBuf *p, ISzAlloc *alloc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/7zBuf2.c b/src/libs/7zip/unix/C/7zBuf2.c
new file mode 100644
index 000000000..8d17e0dcf
--- /dev/null
+++ b/src/libs/7zip/unix/C/7zBuf2.c
@@ -0,0 +1,45 @@
+/* 7zBuf2.c -- Byte Buffer
+2008-10-04 : Igor Pavlov : Public domain */
+
+#include <string.h>
+#include "7zBuf.h"
+
+void DynBuf_Construct(CDynBuf *p)
+{
+ p->data = 0;
+ p->size = 0;
+ p->pos = 0;
+}
+
+void DynBuf_SeekToBeg(CDynBuf *p)
+{
+ p->pos = 0;
+}
+
+int DynBuf_Write(CDynBuf *p, const Byte *buf, size_t size, ISzAlloc *alloc)
+{
+ if (size > p->size - p->pos)
+ {
+ size_t newSize = p->pos + size;
+ Byte *data;
+ newSize += newSize / 4;
+ data = (Byte *)alloc->Alloc(alloc, newSize);
+ if (data == 0)
+ return 0;
+ p->size = newSize;
+ memcpy(data, p->data, p->pos);
+ alloc->Free(alloc, p->data);
+ p->data = data;
+ }
+ memcpy(p->data + p->pos, buf, size);
+ p->pos += size;
+ return 1;
+}
+
+void DynBuf_Free(CDynBuf *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->data);
+ p->data = 0;
+ p->size = 0;
+ p->pos = 0;
+}
diff --git a/src/libs/7zip/unix/C/7zCrc.c b/src/libs/7zip/unix/C/7zCrc.c
new file mode 100644
index 000000000..404090ef3
--- /dev/null
+++ b/src/libs/7zip/unix/C/7zCrc.c
@@ -0,0 +1,76 @@
+/* 7zCrc.c -- CRC32 calculation
+2009-11-23 : Igor Pavlov : Public domain */
+
+#include "7zCrc.h"
+#include "CpuArch.h"
+
+#define kCrcPoly 0xEDB88320
+
+#ifdef MY_CPU_LE
+#define CRC_NUM_TABLES 8
+#else
+#define CRC_NUM_TABLES 1
+#endif
+
+typedef UInt32 (MY_FAST_CALL *CRC_FUNC)(UInt32 v, const void *data, size_t size, const UInt32 *table);
+
+static CRC_FUNC g_CrcUpdate;
+UInt32 g_CrcTable[256 * CRC_NUM_TABLES];
+
+#if CRC_NUM_TABLES == 1
+
+#define CRC_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
+
+static UInt32 MY_FAST_CALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const UInt32 *table)
+{
+ const Byte *p = (const Byte *)data;
+ for (; size > 0; size--, p++)
+ v = CRC_UPDATE_BYTE_2(v, *p);
+ return v;
+}
+
+#else
+
+UInt32 MY_FAST_CALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table);
+UInt32 MY_FAST_CALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
+
+#endif
+
+UInt32 MY_FAST_CALL CrcUpdate(UInt32 v, const void *data, size_t size)
+{
+ return g_CrcUpdate(v, data, size, g_CrcTable);
+}
+
+UInt32 MY_FAST_CALL CrcCalc(const void *data, size_t size)
+{
+ return g_CrcUpdate(CRC_INIT_VAL, data, size, g_CrcTable) ^ CRC_INIT_VAL;
+}
+
+void MY_FAST_CALL CrcGenerateTable()
+{
+ UInt32 i;
+ for (i = 0; i < 256; i++)
+ {
+ UInt32 r = i;
+ unsigned j;
+ for (j = 0; j < 8; j++)
+ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
+ g_CrcTable[i] = r;
+ }
+ #if CRC_NUM_TABLES == 1
+ g_CrcUpdate = CrcUpdateT1;
+ #else
+ for (; i < 256 * CRC_NUM_TABLES; i++)
+ {
+ UInt32 r = g_CrcTable[i - 256];
+ g_CrcTable[i] = g_CrcTable[r & 0xFF] ^ (r >> 8);
+ }
+ g_CrcUpdate = CrcUpdateT4;
+/* FIXME
+ #ifdef MY_CPU_X86_OR_AMD64
+ if (!CPU_Is_InOrder())
+ g_CrcUpdate = CrcUpdateT8;
+ #endif
+*/
+ #endif
+}
diff --git a/src/libs/7zip/unix/C/7zCrc.h b/src/libs/7zip/unix/C/7zCrc.h
new file mode 100644
index 000000000..38e3e5fbc
--- /dev/null
+++ b/src/libs/7zip/unix/C/7zCrc.h
@@ -0,0 +1,25 @@
+/* 7zCrc.h -- CRC32 calculation
+2009-11-21 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_CRC_H
+#define __7Z_CRC_H
+
+#include "Types.h"
+
+EXTERN_C_BEGIN
+
+extern UInt32 g_CrcTable[];
+
+/* Call CrcGenerateTable one time before other CRC functions */
+void MY_FAST_CALL CrcGenerateTable(void);
+
+#define CRC_INIT_VAL 0xFFFFFFFF
+#define CRC_GET_DIGEST(crc) ((crc) ^ CRC_INIT_VAL)
+#define CRC_UPDATE_BYTE(crc, b) (g_CrcTable[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
+
+UInt32 MY_FAST_CALL CrcUpdate(UInt32 crc, const void *data, size_t size);
+UInt32 MY_FAST_CALL CrcCalc(const void *data, size_t size);
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/7zCrcOpt.c b/src/libs/7zip/unix/C/7zCrcOpt.c
new file mode 100644
index 000000000..6c766a209
--- /dev/null
+++ b/src/libs/7zip/unix/C/7zCrcOpt.c
@@ -0,0 +1,34 @@
+/* 7zCrcOpt.c -- CRC32 calculation : optimized version
+2009-11-23 : Igor Pavlov : Public domain */
+
+#include "CpuArch.h"
+
+#ifdef MY_CPU_LE
+
+#define CRC_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
+
+UInt32 MY_FAST_CALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table)
+{
+ const Byte *p = (const Byte *)data;
+ for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++)
+ v = CRC_UPDATE_BYTE_2(v, *p);
+ for (; size >= 4; size -= 4, p += 4)
+ {
+ v ^= *(const UInt32 *)p;
+ v =
+ table[0x300 + (v & 0xFF)] ^
+ table[0x200 + ((v >> 8) & 0xFF)] ^
+ table[0x100 + ((v >> 16) & 0xFF)] ^
+ table[0x000 + ((v >> 24))];
+ }
+ for (; size > 0; size--, p++)
+ v = CRC_UPDATE_BYTE_2(v, *p);
+ return v;
+}
+
+UInt32 MY_FAST_CALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table)
+{
+ return CrcUpdateT4(v, data, size, table);
+}
+
+#endif
diff --git a/src/libs/7zip/unix/C/7zCrcT8.c b/src/libs/7zip/unix/C/7zCrcT8.c
new file mode 100644
index 000000000..cd80e262b
--- /dev/null
+++ b/src/libs/7zip/unix/C/7zCrcT8.c
@@ -0,0 +1,43 @@
+/* 7zCrcT8.c -- CRC32 calculation with 8 tables
+2008-03-19
+Igor Pavlov
+Public domain */
+
+#include "7zCrc.h"
+
+#define kCrcPoly 0xEDB88320
+#define CRC_NUM_TABLES 8
+
+UInt32 g_CrcTable[256 * CRC_NUM_TABLES];
+
+void MY_FAST_CALL CrcGenerateTable()
+{
+ UInt32 i;
+ for (i = 0; i < 256; i++)
+ {
+ UInt32 r = i;
+ int j;
+ for (j = 0; j < 8; j++)
+ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
+ g_CrcTable[i] = r;
+ }
+ #if CRC_NUM_TABLES > 1
+ for (; i < 256 * CRC_NUM_TABLES; i++)
+ {
+ UInt32 r = g_CrcTable[i - 256];
+ g_CrcTable[i] = g_CrcTable[r & 0xFF] ^ (r >> 8);
+ }
+ #endif
+}
+
+UInt32 MY_FAST_CALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table);
+
+UInt32 MY_FAST_CALL CrcUpdate(UInt32 v, const void *data, size_t size)
+{
+ return CrcUpdateT8(v, data, size, g_CrcTable);
+}
+
+UInt32 MY_FAST_CALL CrcCalc(const void *data, size_t size)
+{
+ return CrcUpdateT8(CRC_INIT_VAL, data, size, g_CrcTable) ^ 0xFFFFFFFF;
+}
diff --git a/src/libs/7zip/unix/C/7zStream.c b/src/libs/7zip/unix/C/7zStream.c
new file mode 100644
index 000000000..0ebb7b5f9
--- /dev/null
+++ b/src/libs/7zip/unix/C/7zStream.c
@@ -0,0 +1,169 @@
+/* 7zStream.c -- 7z Stream functions
+2010-03-11 : Igor Pavlov : Public domain */
+
+#include <string.h>
+
+#include "Types.h"
+
+SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType)
+{
+ while (size != 0)
+ {
+ size_t processed = size;
+ RINOK(stream->Read(stream, buf, &processed));
+ if (processed == 0)
+ return errorType;
+ buf = (void *)((Byte *)buf + processed);
+ size -= processed;
+ }
+ return SZ_OK;
+}
+
+SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size)
+{
+ return SeqInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF);
+}
+
+SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf)
+{
+ size_t processed = 1;
+ RINOK(stream->Read(stream, buf, &processed));
+ return (processed == 1) ? SZ_OK : SZ_ERROR_INPUT_EOF;
+}
+
+SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset)
+{
+ Int64 t = offset;
+ return stream->Seek(stream, &t, SZ_SEEK_SET);
+}
+
+SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size)
+{
+ const void *lookBuf;
+ if (*size == 0)
+ return SZ_OK;
+ RINOK(stream->Look(stream, &lookBuf, size));
+ memcpy(buf, lookBuf, *size);
+ return stream->Skip(stream, *size);
+}
+
+SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType)
+{
+ while (size != 0)
+ {
+ size_t processed = size;
+ RINOK(stream->Read(stream, buf, &processed));
+ if (processed == 0)
+ return errorType;
+ buf = (void *)((Byte *)buf + processed);
+ size -= processed;
+ }
+ return SZ_OK;
+}
+
+SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size)
+{
+ return LookInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF);
+}
+
+static SRes LookToRead_Look_Lookahead(void *pp, const void **buf, size_t *size)
+{
+ SRes res = SZ_OK;
+ CLookToRead *p = (CLookToRead *)pp;
+ size_t size2 = p->size - p->pos;
+ if (size2 == 0 && *size > 0)
+ {
+ p->pos = 0;
+ size2 = LookToRead_BUF_SIZE;
+ res = p->realStream->Read(p->realStream, p->buf, &size2);
+ p->size = size2;
+ }
+ if (size2 < *size)
+ *size = size2;
+ *buf = p->buf + p->pos;
+ return res;
+}
+
+static SRes LookToRead_Look_Exact(void *pp, const void **buf, size_t *size)
+{
+ SRes res = SZ_OK;
+ CLookToRead *p = (CLookToRead *)pp;
+ size_t size2 = p->size - p->pos;
+ if (size2 == 0 && *size > 0)
+ {
+ p->pos = 0;
+ if (*size > LookToRead_BUF_SIZE)
+ *size = LookToRead_BUF_SIZE;
+ res = p->realStream->Read(p->realStream, p->buf, size);
+ size2 = p->size = *size;
+ }
+ if (size2 < *size)
+ *size = size2;
+ *buf = p->buf + p->pos;
+ return res;
+}
+
+static SRes LookToRead_Skip(void *pp, size_t offset)
+{
+ CLookToRead *p = (CLookToRead *)pp;
+ p->pos += offset;
+ return SZ_OK;
+}
+
+static SRes LookToRead_Read(void *pp, void *buf, size_t *size)
+{
+ CLookToRead *p = (CLookToRead *)pp;
+ size_t rem = p->size - p->pos;
+ if (rem == 0)
+ return p->realStream->Read(p->realStream, buf, size);
+ if (rem > *size)
+ rem = *size;
+ memcpy(buf, p->buf + p->pos, rem);
+ p->pos += rem;
+ *size = rem;
+ return SZ_OK;
+}
+
+static SRes LookToRead_Seek(void *pp, Int64 *pos, ESzSeek origin)
+{
+ CLookToRead *p = (CLookToRead *)pp;
+ p->pos = p->size = 0;
+ return p->realStream->Seek(p->realStream, pos, origin);
+}
+
+void LookToRead_CreateVTable(CLookToRead *p, int lookahead)
+{
+ p->s.Look = lookahead ?
+ LookToRead_Look_Lookahead :
+ LookToRead_Look_Exact;
+ p->s.Skip = LookToRead_Skip;
+ p->s.Read = LookToRead_Read;
+ p->s.Seek = LookToRead_Seek;
+}
+
+void LookToRead_Init(CLookToRead *p)
+{
+ p->pos = p->size = 0;
+}
+
+static SRes SecToLook_Read(void *pp, void *buf, size_t *size)
+{
+ CSecToLook *p = (CSecToLook *)pp;
+ return LookInStream_LookRead(p->realStream, buf, size);
+}
+
+void SecToLook_CreateVTable(CSecToLook *p)
+{
+ p->s.Read = SecToLook_Read;
+}
+
+static SRes SecToRead_Read(void *pp, void *buf, size_t *size)
+{
+ CSecToRead *p = (CSecToRead *)pp;
+ return p->realStream->Read(p->realStream, buf, size);
+}
+
+void SecToRead_CreateVTable(CSecToRead *p)
+{
+ p->s.Read = SecToRead_Read;
+}
diff --git a/src/libs/7zip/unix/C/7zVersion.h b/src/libs/7zip/unix/C/7zVersion.h
new file mode 100644
index 000000000..9d99c5dff
--- /dev/null
+++ b/src/libs/7zip/unix/C/7zVersion.h
@@ -0,0 +1,7 @@
+#define MY_VER_MAJOR 9
+#define MY_VER_MINOR 20
+#define MY_VER_BUILD 0
+#define MY_VERSION "9.20"
+#define MY_DATE "2010-11-18"
+#define MY_COPYRIGHT ": Igor Pavlov : Public domain"
+#define MY_VERSION_COPYRIGHT_DATE MY_VERSION " " MY_COPYRIGHT " : " MY_DATE
diff --git a/src/libs/7zip/unix/C/Aes.c b/src/libs/7zip/unix/C/Aes.c
new file mode 100644
index 000000000..1bb658793
--- /dev/null
+++ b/src/libs/7zip/unix/C/Aes.c
@@ -0,0 +1,284 @@
+/* Aes.c -- AES encryption / decryption
+2009-11-23 : Igor Pavlov : Public domain */
+
+#include "Aes.h"
+#include "CpuArch.h"
+
+static UInt32 T[256 * 4];
+static Byte Sbox[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};
+
+void MY_FAST_CALL AesCbc_Encode(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCbc_Decode(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCtr_Code(UInt32 *ivAes, Byte *data, size_t numBlocks);
+
+void MY_FAST_CALL AesCbc_Encode_Intel(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCbc_Decode_Intel(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCtr_Code_Intel(UInt32 *ivAes, Byte *data, size_t numBlocks);
+
+AES_CODE_FUNC g_AesCbc_Encode;
+AES_CODE_FUNC g_AesCbc_Decode;
+AES_CODE_FUNC g_AesCtr_Code;
+
+static UInt32 D[256 * 4];
+static Byte InvS[256];
+
+static Byte Rcon[11] = { 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 };
+
+#define xtime(x) ((((x) << 1) ^ (((x) & 0x80) != 0 ? 0x1B : 0)) & 0xFF)
+
+#define Ui32(a0, a1, a2, a3) ((UInt32)(a0) | ((UInt32)(a1) << 8) | ((UInt32)(a2) << 16) | ((UInt32)(a3) << 24))
+
+#define gb0(x) ( (x) & 0xFF)
+#define gb1(x) (((x) >> ( 8)) & 0xFF)
+#define gb2(x) (((x) >> (16)) & 0xFF)
+#define gb3(x) (((x) >> (24)) & 0xFF)
+
+void AesGenTables(void)
+{
+ unsigned i;
+ for (i = 0; i < 256; i++)
+ InvS[Sbox[i]] = (Byte)i;
+ for (i = 0; i < 256; i++)
+ {
+ {
+ UInt32 a1 = Sbox[i];
+ UInt32 a2 = xtime(a1);
+ UInt32 a3 = a2 ^ a1;
+ T[ i] = Ui32(a2, a1, a1, a3);
+ T[0x100 + i] = Ui32(a3, a2, a1, a1);
+ T[0x200 + i] = Ui32(a1, a3, a2, a1);
+ T[0x300 + i] = Ui32(a1, a1, a3, a2);
+ }
+ {
+ UInt32 a1 = InvS[i];
+ UInt32 a2 = xtime(a1);
+ UInt32 a4 = xtime(a2);
+ UInt32 a8 = xtime(a4);
+ UInt32 a9 = a8 ^ a1;
+ UInt32 aB = a8 ^ a2 ^ a1;
+ UInt32 aD = a8 ^ a4 ^ a1;
+ UInt32 aE = a8 ^ a4 ^ a2;
+ D[ i] = Ui32(aE, a9, aD, aB);
+ D[0x100 + i] = Ui32(aB, aE, a9, aD);
+ D[0x200 + i] = Ui32(aD, aB, aE, a9);
+ D[0x300 + i] = Ui32(a9, aD, aB, aE);
+ }
+ }
+ g_AesCbc_Encode = AesCbc_Encode;
+ g_AesCbc_Decode = AesCbc_Decode;
+ g_AesCtr_Code = AesCtr_Code;
+/* FIXME
+ #ifdef MY_CPU_X86_OR_AMD64
+ if (CPU_Is_Aes_Supported())
+ {
+ g_AesCbc_Encode = AesCbc_Encode_Intel;
+ g_AesCbc_Decode = AesCbc_Decode_Intel;
+ g_AesCtr_Code = AesCtr_Code_Intel;
+ }
+ #endif
+*/
+}
+
+#define HT(i, x, s) (T + (x << 8))[gb ## x(s[(i + x) & 3])]
+#define HT4(m, i, s, p) m[i] = \
+ HT(i, 0, s) ^ \
+ HT(i, 1, s) ^ \
+ HT(i, 2, s) ^ \
+ HT(i, 3, s) ^ w[p + i]
+/* such order (2031) in HT16 is for VC6/K8 speed optimization) */
+#define HT16(m, s, p) \
+ HT4(m, 2, s, p); \
+ HT4(m, 0, s, p); \
+ HT4(m, 3, s, p); \
+ HT4(m, 1, s, p); \
+
+#define FT(i, x) Sbox[gb ## x(m[(i + x) & 3])]
+#define FT4(i) dest[i] = Ui32(FT(i, 0), FT(i, 1), FT(i, 2), FT(i, 3)) ^ w[i];
+
+#define HD(i, x, s) (D + (x << 8))[gb ## x(s[(i - x) & 3])]
+#define HD4(m, i, s, p) m[i] = \
+ HD(i, 0, s) ^ \
+ HD(i, 1, s) ^ \
+ HD(i, 2, s) ^ \
+ HD(i, 3, s) ^ w[p + i];
+/* such order (0231) in HD16 is for VC6/K8 speed optimization) */
+#define HD16(m, s, p) \
+ HD4(m, 0, s, p); \
+ HD4(m, 2, s, p); \
+ HD4(m, 3, s, p); \
+ HD4(m, 1, s, p); \
+
+#define FD(i, x) InvS[gb ## x(m[(i - x) & 3])]
+#define FD4(i) dest[i] = Ui32(FD(i, 0), FD(i, 1), FD(i, 2), FD(i, 3)) ^ w[i];
+
+void MY_FAST_CALL Aes_SetKey_Enc(UInt32 *w, const Byte *key, unsigned keySize)
+{
+ unsigned i, wSize;
+ wSize = keySize + 28;
+ keySize /= 4;
+ w[0] = ((UInt32)keySize / 2) + 3;
+ w += 4;
+
+ for (i = 0; i < keySize; i++, key += 4)
+ w[i] = GetUi32(key);
+
+ for (; i < wSize; i++)
+ {
+ UInt32 t = w[i - 1];
+ unsigned rem = i % keySize;
+ if (rem == 0)
+ t = Ui32(Sbox[gb1(t)] ^ Rcon[i / keySize], Sbox[gb2(t)], Sbox[gb3(t)], Sbox[gb0(t)]);
+ else if (keySize > 6 && rem == 4)
+ t = Ui32(Sbox[gb0(t)], Sbox[gb1(t)], Sbox[gb2(t)], Sbox[gb3(t)]);
+ w[i] = w[i - keySize] ^ t;
+ }
+}
+
+void MY_FAST_CALL Aes_SetKey_Dec(UInt32 *w, const Byte *key, unsigned keySize)
+{
+ unsigned i, num;
+ Aes_SetKey_Enc(w, key, keySize);
+ num = keySize + 20;
+ w += 8;
+ for (i = 0; i < num; i++)
+ {
+ UInt32 r = w[i];
+ w[i] =
+ D[ Sbox[gb0(r)]] ^
+ D[0x100 + Sbox[gb1(r)]] ^
+ D[0x200 + Sbox[gb2(r)]] ^
+ D[0x300 + Sbox[gb3(r)]];
+ }
+}
+
+/* Aes_Encode and Aes_Decode functions work with little-endian words.
+ src and dest are pointers to 4 UInt32 words.
+ arc and dest can point to same block */
+
+static void Aes_Encode(const UInt32 *w, UInt32 *dest, const UInt32 *src)
+{
+ UInt32 s[4];
+ UInt32 m[4];
+ UInt32 numRounds2 = w[0];
+ w += 4;
+ s[0] = src[0] ^ w[0];
+ s[1] = src[1] ^ w[1];
+ s[2] = src[2] ^ w[2];
+ s[3] = src[3] ^ w[3];
+ w += 4;
+ for (;;)
+ {
+ HT16(m, s, 0);
+ if (--numRounds2 == 0)
+ break;
+ HT16(s, m, 4);
+ w += 8;
+ }
+ w += 4;
+ FT4(0); FT4(1); FT4(2); FT4(3);
+}
+
+static void Aes_Decode(const UInt32 *w, UInt32 *dest, const UInt32 *src)
+{
+ UInt32 s[4];
+ UInt32 m[4];
+ UInt32 numRounds2 = w[0];
+ w += 4 + numRounds2 * 8;
+ s[0] = src[0] ^ w[0];
+ s[1] = src[1] ^ w[1];
+ s[2] = src[2] ^ w[2];
+ s[3] = src[3] ^ w[3];
+ for (;;)
+ {
+ w -= 8;
+ HD16(m, s, 4);
+ if (--numRounds2 == 0)
+ break;
+ HD16(s, m, 0);
+ }
+ FD4(0); FD4(1); FD4(2); FD4(3);
+}
+
+void AesCbc_Init(UInt32 *p, const Byte *iv)
+{
+ unsigned i;
+ for (i = 0; i < 4; i++)
+ p[i] = GetUi32(iv + i * 4);
+}
+
+void MY_FAST_CALL AesCbc_Encode(UInt32 *p, Byte *data, size_t numBlocks)
+{
+ for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE)
+ {
+ p[0] ^= GetUi32(data);
+ p[1] ^= GetUi32(data + 4);
+ p[2] ^= GetUi32(data + 8);
+ p[3] ^= GetUi32(data + 12);
+
+ Aes_Encode(p + 4, p, p);
+
+ SetUi32(data, p[0]);
+ SetUi32(data + 4, p[1]);
+ SetUi32(data + 8, p[2]);
+ SetUi32(data + 12, p[3]);
+ }
+}
+
+void MY_FAST_CALL AesCbc_Decode(UInt32 *p, Byte *data, size_t numBlocks)
+{
+ UInt32 in[4], out[4];
+ for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE)
+ {
+ in[0] = GetUi32(data);
+ in[1] = GetUi32(data + 4);
+ in[2] = GetUi32(data + 8);
+ in[3] = GetUi32(data + 12);
+
+ Aes_Decode(p + 4, out, in);
+
+ SetUi32(data, p[0] ^ out[0]);
+ SetUi32(data + 4, p[1] ^ out[1]);
+ SetUi32(data + 8, p[2] ^ out[2]);
+ SetUi32(data + 12, p[3] ^ out[3]);
+
+ p[0] = in[0];
+ p[1] = in[1];
+ p[2] = in[2];
+ p[3] = in[3];
+ }
+}
+
+void MY_FAST_CALL AesCtr_Code(UInt32 *p, Byte *data, size_t numBlocks)
+{
+ for (; numBlocks != 0; numBlocks--)
+ {
+ UInt32 temp[4];
+ Byte buf[16];
+ int i;
+ if (++p[0] == 0)
+ p[1]++;
+ Aes_Encode(p + 4, temp, p);
+ SetUi32(buf, temp[0]);
+ SetUi32(buf + 4, temp[1]);
+ SetUi32(buf + 8, temp[2]);
+ SetUi32(buf + 12, temp[3]);
+ for (i = 0; i < 16; i++)
+ *data++ ^= buf[i];
+ }
+}
diff --git a/src/libs/7zip/unix/C/Aes.h b/src/libs/7zip/unix/C/Aes.h
new file mode 100644
index 000000000..c9b0677c8
--- /dev/null
+++ b/src/libs/7zip/unix/C/Aes.h
@@ -0,0 +1,38 @@
+/* Aes.h -- AES encryption / decryption
+2009-11-23 : Igor Pavlov : Public domain */
+
+#ifndef __AES_H
+#define __AES_H
+
+#include "Types.h"
+
+EXTERN_C_BEGIN
+
+#define AES_BLOCK_SIZE 16
+
+/* Call AesGenTables one time before other AES functions */
+void AesGenTables(void);
+
+/* UInt32 pointers must be 16-byte aligned */
+
+/* 16-byte (4 * 32-bit words) blocks: 1 (IV) + 1 (keyMode) + 15 (AES-256 roundKeys) */
+#define AES_NUM_IVMRK_WORDS ((1 + 1 + 15) * 4)
+
+/* aes - 16-byte aligned pointer to keyMode+roundKeys sequence */
+/* keySize = 16 or 24 or 32 (bytes) */
+typedef void (MY_FAST_CALL *AES_SET_KEY_FUNC)(UInt32 *aes, const Byte *key, unsigned keySize);
+void MY_FAST_CALL Aes_SetKey_Enc(UInt32 *aes, const Byte *key, unsigned keySize);
+void MY_FAST_CALL Aes_SetKey_Dec(UInt32 *aes, const Byte *key, unsigned keySize);
+
+/* ivAes - 16-byte aligned pointer to iv+keyMode+roundKeys sequence: UInt32[AES_NUM_IVMRK_WORDS] */
+void AesCbc_Init(UInt32 *ivAes, const Byte *iv); /* iv size is AES_BLOCK_SIZE */
+/* data - 16-byte aligned pointer to data */
+/* numBlocks - the number of 16-byte blocks in data array */
+typedef void (MY_FAST_CALL *AES_CODE_FUNC)(UInt32 *ivAes, Byte *data, size_t numBlocks);
+extern AES_CODE_FUNC g_AesCbc_Encode;
+extern AES_CODE_FUNC g_AesCbc_Decode;
+extern AES_CODE_FUNC g_AesCtr_Code;
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/Alloc.c b/src/libs/7zip/unix/C/Alloc.c
new file mode 100644
index 000000000..be02a9299
--- /dev/null
+++ b/src/libs/7zip/unix/C/Alloc.c
@@ -0,0 +1,280 @@
+/* Alloc.c -- Memory allocation functions
+2008-09-24
+Igor Pavlov
+Public domain */
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifdef _7ZIP_LARGE_PAGES
+#ifdef __linux__
+#ifndef _7ZIP_ST
+#include <pthread.h>
+#endif
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <mntent.h>
+#endif
+#endif
+
+#include "Alloc.h"
+
+/* #define _SZ_ALLOC_DEBUG */
+
+/* use _SZ_ALLOC_DEBUG to debug alloc/free operations */
+#ifdef _SZ_ALLOC_DEBUG
+#include <stdio.h>
+int g_allocCount = 0;
+int g_allocCountMid = 0;
+int g_allocCountBig = 0;
+#endif
+
+void *MyAlloc(size_t size)
+{
+ if (size == 0)
+ return 0;
+ #ifdef _SZ_ALLOC_DEBUG
+ {
+ void *p = malloc(size);
+ fprintf(stderr, "\nAlloc %10d bytes, count = %10d, addr = %8X", size, g_allocCount++, (unsigned)p);
+ return p;
+ }
+ #else
+ return malloc(size);
+ #endif
+}
+
+void MyFree(void *address)
+{
+ #ifdef _SZ_ALLOC_DEBUG
+ if (address != 0)
+ fprintf(stderr, "\nFree; count = %10d, addr = %8X", --g_allocCount, (unsigned)address);
+ #endif
+ free(address);
+}
+
+#ifndef _WIN32
+
+#ifdef _7ZIP_LARGE_PAGES
+
+#ifdef __linux__
+#define _7ZIP_MAX_HUGE_ALLOCS 64
+static void *g_HugePageAddr[_7ZIP_MAX_HUGE_ALLOCS] = { NULL };
+static size_t g_HugePageLen[_7ZIP_MAX_HUGE_ALLOCS];
+static char *g_HugetlbPath;
+#endif
+
+#endif
+
+static void *VirtualAlloc(size_t size, int memLargePages)
+{
+ #ifdef _7ZIP_LARGE_PAGES
+ if (memLargePages)
+ {
+ #ifdef __linux__
+ /* huge pages support for Linux; added by Joachim Henke */
+ #ifndef _7ZIP_ST
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+ #endif
+ int i;
+
+ void * address = NULL;
+ #ifndef _7ZIP_ST
+ pthread_mutex_lock(&mutex);
+ #endif
+ for (i = 0; i < _7ZIP_MAX_HUGE_ALLOCS; ++i)
+ {
+ if (g_HugePageAddr[i] == NULL)
+ {
+ int fd, pathlen = strlen(g_HugetlbPath);
+ char tempname[pathlen+12];
+
+ memcpy(tempname, g_HugetlbPath, pathlen);
+ memcpy(tempname + pathlen, "/7z-XXXXXX", 11);
+ fd = mkstemp(tempname);
+ unlink(tempname);
+ if (fd < 0)
+ {
+ fprintf(stderr,"cant't open %s (%s)\n",tempname,strerror(errno));
+ break;
+ }
+ address = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ if (address == MAP_FAILED)
+ {
+ address = NULL;
+ break;
+ }
+ g_HugePageLen[i] = size;
+ g_HugePageAddr[i] = address;
+// fprintf(stderr,"HUGE[%d]=%ld %p\n",i,(long)size,address);
+ break;
+ }
+ }
+ #ifndef _7ZIP_ST
+ pthread_mutex_unlock(&mutex);
+ #endif
+ return address;
+ #endif
+ }
+ #endif
+ return malloc(size);
+}
+
+static int VirtualFree(void *address)
+{
+ #ifdef _7ZIP_LARGE_PAGES
+ #ifdef __linux__
+ int i;
+
+ for (i = 0; i < _7ZIP_MAX_HUGE_ALLOCS; ++i)
+ {
+ if (g_HugePageAddr[i] == address)
+ {
+ munmap(address, g_HugePageLen[i]);
+ g_HugePageAddr[i] = NULL;
+ return 1;
+ }
+ }
+ #endif
+ #endif
+ free(address);
+ return 1;
+}
+
+#endif
+
+void *MidAlloc(size_t size)
+{
+ if (size == 0)
+ return 0;
+ #ifdef _SZ_ALLOC_DEBUG
+ fprintf(stderr, "\nAlloc_Mid %10d bytes; count = %10d", size, g_allocCountMid++);
+ #endif
+ return VirtualAlloc(size, 0);
+}
+
+void MidFree(void *address)
+{
+ #ifdef _SZ_ALLOC_DEBUG
+ if (address != 0)
+ fprintf(stderr, "\nFree_Mid; count = %10d", --g_allocCountMid);
+ #endif
+ if (address == 0)
+ return;
+ VirtualFree(address);
+}
+
+#ifdef _7ZIP_LARGE_PAGES
+size_t g_LargePageSize = 0;
+#ifdef _WIN32
+typedef SIZE_T (WINAPI *GetLargePageMinimumP)();
+#elif defined(__linux__)
+size_t largePageMinimum()
+{
+ size_t size;
+
+ g_HugetlbPath = getenv("HUGETLB_PATH");
+
+ if (g_HugetlbPath == NULL)
+ {
+ // not defined => try to find out the directory
+ static char dir_hugetlbfs[1024];
+ const char * filename = "/etc/mtab"; // mounted filesystems
+ FILE *fp;
+ struct mntent * info;
+
+ dir_hugetlbfs[0]=0;
+
+ fp = setmntent(filename,"r");
+ if (fp)
+ {
+ info = getmntent(fp);
+ while(info)
+ {
+/*
+ printf("%s:\n",info->mnt_fsname);
+ printf(" dir='%s'\n",info->mnt_dir);
+ printf(" type='%s'\n",info->mnt_type);
+*/
+
+ if (strcmp(info->mnt_type,"hugetlbfs") == 0)
+ {
+ strcpy(dir_hugetlbfs,info->mnt_dir);
+ break;
+ }
+
+ info = getmntent(fp);
+ }
+ endmntent(fp);
+ }
+
+ if (dir_hugetlbfs[0])
+ {
+ g_HugetlbPath = dir_hugetlbfs;
+ // fprintf(stderr," Found hugetlbfs = '%s'\n",g_HugetlbPath);
+ }
+ }
+ if (g_HugetlbPath == NULL || (size = pathconf(g_HugetlbPath, _PC_REC_MIN_XFER_SIZE)) <= getpagesize())
+ return 0;
+ return size;
+}
+#else
+#define largePageMinimum() 0
+#endif
+#endif
+
+void SetLargePageSize()
+{
+ #ifdef _7ZIP_LARGE_PAGES
+ size_t size;
+ #ifdef _WIN32
+ GetLargePageMinimumP largePageMinimum = (GetLargePageMinimumP)
+ GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "GetLargePageMinimum");
+ if (largePageMinimum == 0)
+ return;
+ #endif
+ size = largePageMinimum();
+ if (size == 0 || (size & (size - 1)) != 0)
+ return;
+ g_LargePageSize = size;
+ // fprintf(stderr,"SetLargePageSize : %ld\n",(long)g_LargePageSize);
+ #endif
+}
+
+
+void *BigAlloc(size_t size)
+{
+ if (size == 0)
+ return 0;
+ #ifdef _SZ_ALLOC_DEBUG
+ fprintf(stderr, "\nAlloc_Big %10d bytes; count = %10d", size, g_allocCountBig++);
+ #endif
+
+ #ifdef _7ZIP_LARGE_PAGES
+ if (g_LargePageSize != 0 && g_LargePageSize <= (1 << 30) && size >= (1 << 18))
+ {
+ void *res = VirtualAlloc( (size + g_LargePageSize - 1) & (~(g_LargePageSize - 1)), 1);
+ if (res != 0)
+ return res;
+ }
+ #endif
+ return VirtualAlloc(size, 0);
+}
+
+void BigFree(void *address)
+{
+ #ifdef _SZ_ALLOC_DEBUG
+ if (address != 0)
+ fprintf(stderr, "\nFree_Big; count = %10d", --g_allocCountBig);
+ #endif
+
+ if (address == 0)
+ return;
+ VirtualFree(address);
+}
diff --git a/src/libs/7zip/unix/C/Alloc.h b/src/libs/7zip/unix/C/Alloc.h
new file mode 100644
index 000000000..7ef372e3a
--- /dev/null
+++ b/src/libs/7zip/unix/C/Alloc.h
@@ -0,0 +1,27 @@
+/* Alloc.h -- Memory allocation functions
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __COMMON_ALLOC_H
+#define __COMMON_ALLOC_H
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void *MyAlloc(size_t size);
+void MyFree(void *address);
+
+void SetLargePageSize();
+
+void *MidAlloc(size_t size);
+void MidFree(void *address);
+void *BigAlloc(size_t size);
+void BigFree(void *address);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/Bra.c b/src/libs/7zip/unix/C/Bra.c
new file mode 100644
index 000000000..2e47b1413
--- /dev/null
+++ b/src/libs/7zip/unix/C/Bra.c
@@ -0,0 +1,133 @@
+/* Bra.c -- Converters for RISC code
+2010-04-16 : Igor Pavlov : Public domain */
+
+#include "Bra.h"
+
+SizeT ARM_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ SizeT i;
+ if (size < 4)
+ return 0;
+ size -= 4;
+ ip += 8;
+ for (i = 0; i <= size; i += 4)
+ {
+ if (data[i + 3] == 0xEB)
+ {
+ UInt32 dest;
+ UInt32 src = ((UInt32)data[i + 2] << 16) | ((UInt32)data[i + 1] << 8) | (data[i + 0]);
+ src <<= 2;
+ if (encoding)
+ dest = ip + (UInt32)i + src;
+ else
+ dest = src - (ip + (UInt32)i);
+ dest >>= 2;
+ data[i + 2] = (Byte)(dest >> 16);
+ data[i + 1] = (Byte)(dest >> 8);
+ data[i + 0] = (Byte)dest;
+ }
+ }
+ return i;
+}
+
+SizeT ARMT_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ SizeT i;
+ if (size < 4)
+ return 0;
+ size -= 4;
+ ip += 4;
+ for (i = 0; i <= size; i += 2)
+ {
+ if ((data[i + 1] & 0xF8) == 0xF0 &&
+ (data[i + 3] & 0xF8) == 0xF8)
+ {
+ UInt32 dest;
+ UInt32 src =
+ (((UInt32)data[i + 1] & 0x7) << 19) |
+ ((UInt32)data[i + 0] << 11) |
+ (((UInt32)data[i + 3] & 0x7) << 8) |
+ (data[i + 2]);
+
+ src <<= 1;
+ if (encoding)
+ dest = ip + (UInt32)i + src;
+ else
+ dest = src - (ip + (UInt32)i);
+ dest >>= 1;
+
+ data[i + 1] = (Byte)(0xF0 | ((dest >> 19) & 0x7));
+ data[i + 0] = (Byte)(dest >> 11);
+ data[i + 3] = (Byte)(0xF8 | ((dest >> 8) & 0x7));
+ data[i + 2] = (Byte)dest;
+ i += 2;
+ }
+ }
+ return i;
+}
+
+SizeT PPC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ SizeT i;
+ if (size < 4)
+ return 0;
+ size -= 4;
+ for (i = 0; i <= size; i += 4)
+ {
+ if ((data[i] >> 2) == 0x12 && (data[i + 3] & 3) == 1)
+ {
+ UInt32 src = ((UInt32)(data[i + 0] & 3) << 24) |
+ ((UInt32)data[i + 1] << 16) |
+ ((UInt32)data[i + 2] << 8) |
+ ((UInt32)data[i + 3] & (~3));
+
+ UInt32 dest;
+ if (encoding)
+ dest = ip + (UInt32)i + src;
+ else
+ dest = src - (ip + (UInt32)i);
+ data[i + 0] = (Byte)(0x48 | ((dest >> 24) & 0x3));
+ data[i + 1] = (Byte)(dest >> 16);
+ data[i + 2] = (Byte)(dest >> 8);
+ data[i + 3] &= 0x3;
+ data[i + 3] |= dest;
+ }
+ }
+ return i;
+}
+
+SizeT SPARC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ UInt32 i;
+ if (size < 4)
+ return 0;
+ size -= 4;
+ for (i = 0; i <= size; i += 4)
+ {
+ if ((data[i] == 0x40 && (data[i + 1] & 0xC0) == 0x00) ||
+ (data[i] == 0x7F && (data[i + 1] & 0xC0) == 0xC0))
+ {
+ UInt32 src =
+ ((UInt32)data[i + 0] << 24) |
+ ((UInt32)data[i + 1] << 16) |
+ ((UInt32)data[i + 2] << 8) |
+ ((UInt32)data[i + 3]);
+ UInt32 dest;
+
+ src <<= 2;
+ if (encoding)
+ dest = ip + i + src;
+ else
+ dest = src - (ip + i);
+ dest >>= 2;
+
+ dest = (((0 - ((dest >> 22) & 1)) << 22) & 0x3FFFFFFF) | (dest & 0x3FFFFF) | 0x40000000;
+
+ data[i + 0] = (Byte)(dest >> 24);
+ data[i + 1] = (Byte)(dest >> 16);
+ data[i + 2] = (Byte)(dest >> 8);
+ data[i + 3] = (Byte)dest;
+ }
+ }
+ return i;
+}
diff --git a/src/libs/7zip/unix/C/Bra.h b/src/libs/7zip/unix/C/Bra.h
new file mode 100644
index 000000000..5748c1c05
--- /dev/null
+++ b/src/libs/7zip/unix/C/Bra.h
@@ -0,0 +1,68 @@
+/* Bra.h -- Branch converters for executables
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __BRA_H
+#define __BRA_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+These functions convert relative addresses to absolute addresses
+in CALL instructions to increase the compression ratio.
+
+ In:
+ data - data buffer
+ size - size of data
+ ip - current virtual Instruction Pinter (IP) value
+ state - state variable for x86 converter
+ encoding - 0 (for decoding), 1 (for encoding)
+
+ Out:
+ state - state variable for x86 converter
+
+ Returns:
+ The number of processed bytes. If you call these functions with multiple calls,
+ you must start next call with first byte after block of processed bytes.
+
+ Type Endian Alignment LookAhead
+
+ x86 little 1 4
+ ARMT little 2 2
+ ARM little 4 0
+ PPC big 4 0
+ SPARC big 4 0
+ IA64 little 16 0
+
+ size must be >= Alignment + LookAhead, if it's not last block.
+ If (size < Alignment + LookAhead), converter returns 0.
+
+ Example:
+
+ UInt32 ip = 0;
+ for ()
+ {
+ ; size must be >= Alignment + LookAhead, if it's not last block
+ SizeT processed = Convert(data, size, ip, 1);
+ data += processed;
+ size -= processed;
+ ip += processed;
+ }
+*/
+
+#define x86_Convert_Init(state) { state = 0; }
+SizeT x86_Convert(Byte *data, SizeT size, UInt32 ip, UInt32 *state, int encoding);
+SizeT ARM_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+SizeT ARMT_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+SizeT PPC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+SizeT SPARC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+SizeT IA64_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/Bra86.c b/src/libs/7zip/unix/C/Bra86.c
new file mode 100644
index 000000000..1ee0e709b
--- /dev/null
+++ b/src/libs/7zip/unix/C/Bra86.c
@@ -0,0 +1,85 @@
+/* Bra86.c -- Converter for x86 code (BCJ)
+2008-10-04 : Igor Pavlov : Public domain */
+
+#include "Bra.h"
+
+#define Test86MSByte(b) ((b) == 0 || (b) == 0xFF)
+
+const Byte kMaskToAllowedStatus[8] = {1, 1, 1, 0, 1, 0, 0, 0};
+const Byte kMaskToBitNumber[8] = {0, 1, 2, 2, 3, 3, 3, 3};
+
+SizeT x86_Convert(Byte *data, SizeT size, UInt32 ip, UInt32 *state, int encoding)
+{
+ SizeT bufferPos = 0, prevPosT;
+ UInt32 prevMask = *state & 0x7;
+ if (size < 5)
+ return 0;
+ ip += 5;
+ prevPosT = (SizeT)0 - 1;
+
+ for (;;)
+ {
+ Byte *p = data + bufferPos;
+ Byte *limit = data + size - 4;
+ for (; p < limit; p++)
+ if ((*p & 0xFE) == 0xE8)
+ break;
+ bufferPos = (SizeT)(p - data);
+ if (p >= limit)
+ break;
+ prevPosT = bufferPos - prevPosT;
+ if (prevPosT > 3)
+ prevMask = 0;
+ else
+ {
+ prevMask = (prevMask << ((int)prevPosT - 1)) & 0x7;
+ if (prevMask != 0)
+ {
+ Byte b = p[4 - kMaskToBitNumber[prevMask]];
+ if (!kMaskToAllowedStatus[prevMask] || Test86MSByte(b))
+ {
+ prevPosT = bufferPos;
+ prevMask = ((prevMask << 1) & 0x7) | 1;
+ bufferPos++;
+ continue;
+ }
+ }
+ }
+ prevPosT = bufferPos;
+
+ if (Test86MSByte(p[4]))
+ {
+ UInt32 src = ((UInt32)p[4] << 24) | ((UInt32)p[3] << 16) | ((UInt32)p[2] << 8) | ((UInt32)p[1]);
+ UInt32 dest;
+ for (;;)
+ {
+ Byte b;
+ int index;
+ if (encoding)
+ dest = (ip + (UInt32)bufferPos) + src;
+ else
+ dest = src - (ip + (UInt32)bufferPos);
+ if (prevMask == 0)
+ break;
+ index = kMaskToBitNumber[prevMask] * 8;
+ b = (Byte)(dest >> (24 - index));
+ if (!Test86MSByte(b))
+ break;
+ src = dest ^ ((1 << (32 - index)) - 1);
+ }
+ p[4] = (Byte)(~(((dest >> 24) & 1) - 1));
+ p[3] = (Byte)(dest >> 16);
+ p[2] = (Byte)(dest >> 8);
+ p[1] = (Byte)dest;
+ bufferPos += 5;
+ }
+ else
+ {
+ prevMask = ((prevMask << 1) & 0x7) | 1;
+ bufferPos++;
+ }
+ }
+ prevPosT = bufferPos - prevPosT;
+ *state = ((prevPosT > 3) ? 0 : ((prevMask << ((int)prevPosT - 1)) & 0x7));
+ return bufferPos;
+}
diff --git a/src/libs/7zip/unix/C/BraIA64.c b/src/libs/7zip/unix/C/BraIA64.c
new file mode 100644
index 000000000..0b4ee85bc
--- /dev/null
+++ b/src/libs/7zip/unix/C/BraIA64.c
@@ -0,0 +1,67 @@
+/* BraIA64.c -- Converter for IA-64 code
+2008-10-04 : Igor Pavlov : Public domain */
+
+#include "Bra.h"
+
+static const Byte kBranchTable[32] =
+{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 4, 4, 6, 6, 0, 0, 7, 7,
+ 4, 4, 0, 0, 4, 4, 0, 0
+};
+
+SizeT IA64_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ SizeT i;
+ if (size < 16)
+ return 0;
+ size -= 16;
+ for (i = 0; i <= size; i += 16)
+ {
+ UInt32 instrTemplate = data[i] & 0x1F;
+ UInt32 mask = kBranchTable[instrTemplate];
+ UInt32 bitPos = 5;
+ int slot;
+ for (slot = 0; slot < 3; slot++, bitPos += 41)
+ {
+ UInt32 bytePos, bitRes;
+ UInt64 instruction, instNorm;
+ int j;
+ if (((mask >> slot) & 1) == 0)
+ continue;
+ bytePos = (bitPos >> 3);
+ bitRes = bitPos & 0x7;
+ instruction = 0;
+ for (j = 0; j < 6; j++)
+ instruction += (UInt64)data[i + j + bytePos] << (8 * j);
+
+ instNorm = instruction >> bitRes;
+ if (((instNorm >> 37) & 0xF) == 0x5 && ((instNorm >> 9) & 0x7) == 0)
+ {
+ UInt32 src = (UInt32)((instNorm >> 13) & 0xFFFFF);
+ UInt32 dest;
+ src |= ((UInt32)(instNorm >> 36) & 1) << 20;
+
+ src <<= 4;
+
+ if (encoding)
+ dest = ip + (UInt32)i + src;
+ else
+ dest = src - (ip + (UInt32)i);
+
+ dest >>= 4;
+
+ instNorm &= ~((UInt64)(0x8FFFFF) << 13);
+ instNorm |= ((UInt64)(dest & 0xFFFFF) << 13);
+ instNorm |= ((UInt64)(dest & 0x100000) << (36 - 20));
+
+ instruction &= (1 << bitRes) - 1;
+ instruction |= (instNorm << bitRes);
+ for (j = 0; j < 6; j++)
+ data[i + j + bytePos] = (Byte)(instruction >> (8 * j));
+ }
+ }
+ }
+ return i;
+}
diff --git a/src/libs/7zip/unix/C/BwtSort.c b/src/libs/7zip/unix/C/BwtSort.c
new file mode 100644
index 000000000..207305072
--- /dev/null
+++ b/src/libs/7zip/unix/C/BwtSort.c
@@ -0,0 +1,516 @@
+/* BwtSort.c -- BWT block sorting
+2008-08-17
+Igor Pavlov
+Public domain */
+
+#include "BwtSort.h"
+#include "Sort.h"
+
+/* #define BLOCK_SORT_USE_HEAP_SORT */
+
+#define NO_INLINE MY_FAST_CALL
+
+/* Don't change it !!! */
+#define kNumHashBytes 2
+#define kNumHashValues (1 << (kNumHashBytes * 8))
+
+/* kNumRefBitsMax must be < (kNumHashBytes * 8) = 16 */
+#define kNumRefBitsMax 12
+
+#define BS_TEMP_SIZE kNumHashValues
+
+#ifdef BLOCK_SORT_EXTERNAL_FLAGS
+
+/* 32 Flags in UInt32 word */
+#define kNumFlagsBits 5
+#define kNumFlagsInWord (1 << kNumFlagsBits)
+#define kFlagsMask (kNumFlagsInWord - 1)
+#define kAllFlags 0xFFFFFFFF
+
+#else
+
+#define kNumBitsMax 20
+#define kIndexMask ((1 << kNumBitsMax) - 1)
+#define kNumExtraBits (32 - kNumBitsMax)
+#define kNumExtra0Bits (kNumExtraBits - 2)
+#define kNumExtra0Mask ((1 << kNumExtra0Bits) - 1)
+
+#define SetFinishedGroupSize(p, size) \
+ { *(p) |= ((((size) - 1) & kNumExtra0Mask) << kNumBitsMax); \
+ if ((size) > (1 << kNumExtra0Bits)) { \
+ *(p) |= 0x40000000; *((p) + 1) |= ((((size) - 1)>> kNumExtra0Bits) << kNumBitsMax); } } \
+
+static void SetGroupSize(UInt32 *p, UInt32 size)
+{
+ if (--size == 0)
+ return;
+ *p |= 0x80000000 | ((size & kNumExtra0Mask) << kNumBitsMax);
+ if (size >= (1 << kNumExtra0Bits))
+ {
+ *p |= 0x40000000;
+ p[1] |= ((size >> kNumExtra0Bits) << kNumBitsMax);
+ }
+}
+
+#endif
+
+/*
+SortGroup - is recursive Range-Sort function with HeapSort optimization for small blocks
+ "range" is not real range. It's only for optimization.
+returns: 1 - if there are groups, 0 - no more groups
+*/
+
+UInt32 NO_INLINE SortGroup(UInt32 BlockSize, UInt32 NumSortedBytes, UInt32 groupOffset, UInt32 groupSize, int NumRefBits, UInt32 *Indices
+ #ifndef BLOCK_SORT_USE_HEAP_SORT
+ , UInt32 left, UInt32 range
+ #endif
+ )
+{
+ UInt32 *ind2 = Indices + groupOffset;
+ UInt32 *Groups;
+ if (groupSize <= 1)
+ {
+ /*
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ SetFinishedGroupSize(ind2, 1);
+ #endif
+ */
+ return 0;
+ }
+ Groups = Indices + BlockSize + BS_TEMP_SIZE;
+ if (groupSize <= ((UInt32)1 << NumRefBits)
+ #ifndef BLOCK_SORT_USE_HEAP_SORT
+ && groupSize <= range
+ #endif
+ )
+ {
+ UInt32 *temp = Indices + BlockSize;
+ UInt32 j;
+ UInt32 mask, thereAreGroups, group, cg;
+ {
+ UInt32 gPrev;
+ UInt32 gRes = 0;
+ {
+ UInt32 sp = ind2[0] + NumSortedBytes;
+ if (sp >= BlockSize) sp -= BlockSize;
+ gPrev = Groups[sp];
+ temp[0] = (gPrev << NumRefBits);
+ }
+
+ for (j = 1; j < groupSize; j++)
+ {
+ UInt32 sp = ind2[j] + NumSortedBytes;
+ UInt32 g;
+ if (sp >= BlockSize) sp -= BlockSize;
+ g = Groups[sp];
+ temp[j] = (g << NumRefBits) | j;
+ gRes |= (gPrev ^ g);
+ }
+ if (gRes == 0)
+ {
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ SetGroupSize(ind2, groupSize);
+ #endif
+ return 1;
+ }
+ }
+
+ HeapSort(temp, groupSize);
+ mask = ((1 << NumRefBits) - 1);
+ thereAreGroups = 0;
+
+ group = groupOffset;
+ cg = (temp[0] >> NumRefBits);
+ temp[0] = ind2[temp[0] & mask];
+
+ {
+ #ifdef BLOCK_SORT_EXTERNAL_FLAGS
+ UInt32 *Flags = Groups + BlockSize;
+ #else
+ UInt32 prevGroupStart = 0;
+ #endif
+
+ for (j = 1; j < groupSize; j++)
+ {
+ UInt32 val = temp[j];
+ UInt32 cgCur = (val >> NumRefBits);
+
+ if (cgCur != cg)
+ {
+ cg = cgCur;
+ group = groupOffset + j;
+
+ #ifdef BLOCK_SORT_EXTERNAL_FLAGS
+ {
+ UInt32 t = group - 1;
+ Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask));
+ }
+ #else
+ SetGroupSize(temp + prevGroupStart, j - prevGroupStart);
+ prevGroupStart = j;
+ #endif
+ }
+ else
+ thereAreGroups = 1;
+ {
+ UInt32 ind = ind2[val & mask];
+ temp[j] = ind;
+ Groups[ind] = group;
+ }
+ }
+
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ SetGroupSize(temp + prevGroupStart, j - prevGroupStart);
+ #endif
+ }
+
+ for (j = 0; j < groupSize; j++)
+ ind2[j] = temp[j];
+ return thereAreGroups;
+ }
+
+ /* Check that all strings are in one group (cannot sort) */
+ {
+ UInt32 group, j;
+ UInt32 sp = ind2[0] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
+ group = Groups[sp];
+ for (j = 1; j < groupSize; j++)
+ {
+ sp = ind2[j] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
+ if (Groups[sp] != group)
+ break;
+ }
+ if (j == groupSize)
+ {
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ SetGroupSize(ind2, groupSize);
+ #endif
+ return 1;
+ }
+ }
+
+ #ifndef BLOCK_SORT_USE_HEAP_SORT
+ {
+ /* ---------- Range Sort ---------- */
+ UInt32 i;
+ UInt32 mid;
+ for (;;)
+ {
+ UInt32 j;
+ if (range <= 1)
+ {
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ SetGroupSize(ind2, groupSize);
+ #endif
+ return 1;
+ }
+ mid = left + ((range + 1) >> 1);
+ j = groupSize;
+ i = 0;
+ do
+ {
+ UInt32 sp = ind2[i] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
+ if (Groups[sp] >= mid)
+ {
+ for (j--; j > i; j--)
+ {
+ sp = ind2[j] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
+ if (Groups[sp] < mid)
+ {
+ UInt32 temp = ind2[i]; ind2[i] = ind2[j]; ind2[j] = temp;
+ break;
+ }
+ }
+ if (i >= j)
+ break;
+ }
+ }
+ while (++i < j);
+ if (i == 0)
+ {
+ range = range - (mid - left);
+ left = mid;
+ }
+ else if (i == groupSize)
+ range = (mid - left);
+ else
+ break;
+ }
+
+ #ifdef BLOCK_SORT_EXTERNAL_FLAGS
+ {
+ UInt32 t = (groupOffset + i - 1);
+ UInt32 *Flags = Groups + BlockSize;
+ Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask));
+ }
+ #endif
+
+ {
+ UInt32 j;
+ for (j = i; j < groupSize; j++)
+ Groups[ind2[j]] = groupOffset + i;
+ }
+
+ {
+ UInt32 res = SortGroup(BlockSize, NumSortedBytes, groupOffset, i, NumRefBits, Indices, left, mid - left);
+ return res | SortGroup(BlockSize, NumSortedBytes, groupOffset + i, groupSize - i, NumRefBits, Indices, mid, range - (mid - left));
+ }
+
+ }
+
+ #else
+
+ /* ---------- Heap Sort ---------- */
+
+ {
+ UInt32 j;
+ for (j = 0; j < groupSize; j++)
+ {
+ UInt32 sp = ind2[j] + NumSortedBytes; if (sp >= BlockSize) sp -= BlockSize;
+ ind2[j] = sp;
+ }
+
+ HeapSortRef(ind2, Groups, groupSize);
+
+ /* Write Flags */
+ {
+ UInt32 sp = ind2[0];
+ UInt32 group = Groups[sp];
+
+ #ifdef BLOCK_SORT_EXTERNAL_FLAGS
+ UInt32 *Flags = Groups + BlockSize;
+ #else
+ UInt32 prevGroupStart = 0;
+ #endif
+
+ for (j = 1; j < groupSize; j++)
+ {
+ sp = ind2[j];
+ if (Groups[sp] != group)
+ {
+ group = Groups[sp];
+ #ifdef BLOCK_SORT_EXTERNAL_FLAGS
+ {
+ UInt32 t = groupOffset + j - 1;
+ Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask));
+ }
+ #else
+ SetGroupSize(ind2 + prevGroupStart, j - prevGroupStart);
+ prevGroupStart = j;
+ #endif
+ }
+ }
+
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ SetGroupSize(ind2 + prevGroupStart, j - prevGroupStart);
+ #endif
+ }
+ {
+ /* Write new Groups values and Check that there are groups */
+ UInt32 thereAreGroups = 0;
+ for (j = 0; j < groupSize; j++)
+ {
+ UInt32 group = groupOffset + j;
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ UInt32 subGroupSize = ((ind2[j] & ~0xC0000000) >> kNumBitsMax);
+ if ((ind2[j] & 0x40000000) != 0)
+ subGroupSize += ((ind2[j + 1] >> kNumBitsMax) << kNumExtra0Bits);
+ subGroupSize++;
+ for (;;)
+ {
+ UInt32 original = ind2[j];
+ UInt32 sp = original & kIndexMask;
+ if (sp < NumSortedBytes) sp += BlockSize; sp -= NumSortedBytes;
+ ind2[j] = sp | (original & ~kIndexMask);
+ Groups[sp] = group;
+ if (--subGroupSize == 0)
+ break;
+ j++;
+ thereAreGroups = 1;
+ }
+ #else
+ UInt32 *Flags = Groups + BlockSize;
+ for (;;)
+ {
+ UInt32 sp = ind2[j]; if (sp < NumSortedBytes) sp += BlockSize; sp -= NumSortedBytes;
+ ind2[j] = sp;
+ Groups[sp] = group;
+ if ((Flags[(groupOffset + j) >> kNumFlagsBits] & (1 << ((groupOffset + j) & kFlagsMask))) == 0)
+ break;
+ j++;
+ thereAreGroups = 1;
+ }
+ #endif
+ }
+ return thereAreGroups;
+ }
+ }
+ #endif
+}
+
+/* conditions: blockSize > 0 */
+UInt32 BlockSort(UInt32 *Indices, const Byte *data, UInt32 blockSize)
+{
+ UInt32 *counters = Indices + blockSize;
+ UInt32 i;
+ UInt32 *Groups;
+ #ifdef BLOCK_SORT_EXTERNAL_FLAGS
+ UInt32 *Flags;
+ #endif
+
+ /* Radix-Sort for 2 bytes */
+ for (i = 0; i < kNumHashValues; i++)
+ counters[i] = 0;
+ for (i = 0; i < blockSize - 1; i++)
+ counters[((UInt32)data[i] << 8) | data[i + 1]]++;
+ counters[((UInt32)data[i] << 8) | data[0]]++;
+
+ Groups = counters + BS_TEMP_SIZE;
+ #ifdef BLOCK_SORT_EXTERNAL_FLAGS
+ Flags = Groups + blockSize;
+ {
+ UInt32 numWords = (blockSize + kFlagsMask) >> kNumFlagsBits;
+ for (i = 0; i < numWords; i++)
+ Flags[i] = kAllFlags;
+ }
+ #endif
+
+ {
+ UInt32 sum = 0;
+ for (i = 0; i < kNumHashValues; i++)
+ {
+ UInt32 groupSize = counters[i];
+ if (groupSize > 0)
+ {
+ #ifdef BLOCK_SORT_EXTERNAL_FLAGS
+ UInt32 t = sum + groupSize - 1;
+ Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask));
+ #endif
+ sum += groupSize;
+ }
+ counters[i] = sum - groupSize;
+ }
+
+ for (i = 0; i < blockSize - 1; i++)
+ Groups[i] = counters[((UInt32)data[i] << 8) | data[i + 1]];
+ Groups[i] = counters[((UInt32)data[i] << 8) | data[0]];
+
+ for (i = 0; i < blockSize - 1; i++)
+ Indices[counters[((UInt32)data[i] << 8) | data[i + 1]]++] = i;
+ Indices[counters[((UInt32)data[i] << 8) | data[0]]++] = i;
+
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ {
+ UInt32 prev = 0;
+ for (i = 0; i < kNumHashValues; i++)
+ {
+ UInt32 prevGroupSize = counters[i] - prev;
+ if (prevGroupSize == 0)
+ continue;
+ SetGroupSize(Indices + prev, prevGroupSize);
+ prev = counters[i];
+ }
+ }
+ #endif
+ }
+
+ {
+ int NumRefBits;
+ UInt32 NumSortedBytes;
+ for (NumRefBits = 0; ((blockSize - 1) >> NumRefBits) != 0; NumRefBits++);
+ NumRefBits = 32 - NumRefBits;
+ if (NumRefBits > kNumRefBitsMax)
+ NumRefBits = kNumRefBitsMax;
+
+ for (NumSortedBytes = kNumHashBytes; ; NumSortedBytes <<= 1)
+ {
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ UInt32 finishedGroupSize = 0;
+ #endif
+ UInt32 newLimit = 0;
+ for (i = 0; i < blockSize;)
+ {
+ UInt32 groupSize;
+ #ifdef BLOCK_SORT_EXTERNAL_FLAGS
+
+ if ((Flags[i >> kNumFlagsBits] & (1 << (i & kFlagsMask))) == 0)
+ {
+ i++;
+ continue;
+ }
+ for (groupSize = 1;
+ (Flags[(i + groupSize) >> kNumFlagsBits] & (1 << ((i + groupSize) & kFlagsMask))) != 0;
+ groupSize++);
+
+ groupSize++;
+
+ #else
+
+ groupSize = ((Indices[i] & ~0xC0000000) >> kNumBitsMax);
+ {
+ Bool finishedGroup = ((Indices[i] & 0x80000000) == 0);
+ if ((Indices[i] & 0x40000000) != 0)
+ {
+ groupSize += ((Indices[i + 1] >> kNumBitsMax) << kNumExtra0Bits);
+ Indices[i + 1] &= kIndexMask;
+ }
+ Indices[i] &= kIndexMask;
+ groupSize++;
+ if (finishedGroup || groupSize == 1)
+ {
+ Indices[i - finishedGroupSize] &= kIndexMask;
+ if (finishedGroupSize > 1)
+ Indices[i - finishedGroupSize + 1] &= kIndexMask;
+ {
+ UInt32 newGroupSize = groupSize + finishedGroupSize;
+ SetFinishedGroupSize(Indices + i - finishedGroupSize, newGroupSize);
+ finishedGroupSize = newGroupSize;
+ }
+ i += groupSize;
+ continue;
+ }
+ finishedGroupSize = 0;
+ }
+
+ #endif
+
+ if (NumSortedBytes >= blockSize)
+ {
+ UInt32 j;
+ for (j = 0; j < groupSize; j++)
+ {
+ UInt32 t = (i + j);
+ /* Flags[t >> kNumFlagsBits] &= ~(1 << (t & kFlagsMask)); */
+ Groups[Indices[t]] = t;
+ }
+ }
+ else
+ if (SortGroup(blockSize, NumSortedBytes, i, groupSize, NumRefBits, Indices
+ #ifndef BLOCK_SORT_USE_HEAP_SORT
+ , 0, blockSize
+ #endif
+ ) != 0)
+ newLimit = i + groupSize;
+ i += groupSize;
+ }
+ if (newLimit == 0)
+ break;
+ }
+ }
+ #ifndef BLOCK_SORT_EXTERNAL_FLAGS
+ for (i = 0; i < blockSize;)
+ {
+ UInt32 groupSize = ((Indices[i] & ~0xC0000000) >> kNumBitsMax);
+ if ((Indices[i] & 0x40000000) != 0)
+ {
+ groupSize += ((Indices[i + 1] >> kNumBitsMax) << kNumExtra0Bits);
+ Indices[i + 1] &= kIndexMask;
+ }
+ Indices[i] &= kIndexMask;
+ groupSize++;
+ i += groupSize;
+ }
+ #endif
+ return Groups[0];
+}
+
diff --git a/src/libs/7zip/unix/C/BwtSort.h b/src/libs/7zip/unix/C/BwtSort.h
new file mode 100644
index 000000000..ce5598f0f
--- /dev/null
+++ b/src/libs/7zip/unix/C/BwtSort.h
@@ -0,0 +1,30 @@
+/* BwtSort.h -- BWT block sorting
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __BWT_SORT_H
+#define __BWT_SORT_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* use BLOCK_SORT_EXTERNAL_FLAGS if blockSize can be > 1M */
+/* #define BLOCK_SORT_EXTERNAL_FLAGS */
+
+#ifdef BLOCK_SORT_EXTERNAL_FLAGS
+#define BLOCK_SORT_EXTERNAL_SIZE(blockSize) ((((blockSize) + 31) >> 5))
+#else
+#define BLOCK_SORT_EXTERNAL_SIZE(blockSize) 0
+#endif
+
+#define BLOCK_SORT_BUF_SIZE(blockSize) ((blockSize) * 2 + BLOCK_SORT_EXTERNAL_SIZE(blockSize) + (1 << 16))
+
+UInt32 BlockSort(UInt32 *indices, const Byte *data, UInt32 blockSize);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/CpuArch.c b/src/libs/7zip/unix/C/CpuArch.c
new file mode 100644
index 000000000..260cc1f45
--- /dev/null
+++ b/src/libs/7zip/unix/C/CpuArch.c
@@ -0,0 +1,168 @@
+/* CpuArch.c -- CPU specific code
+2010-10-26: Igor Pavlov : Public domain */
+
+#include "CpuArch.h"
+
+#ifdef MY_CPU_X86_OR_AMD64
+
+#if (defined(_MSC_VER) && !defined(MY_CPU_AMD64)) || defined(__GNUC__)
+#define USE_ASM
+#endif
+
+#if defined(USE_ASM) && !defined(MY_CPU_AMD64)
+static UInt32 CheckFlag(UInt32 flag)
+{
+ #ifdef _MSC_VER
+ __asm pushfd;
+ __asm pop EAX;
+ __asm mov EDX, EAX;
+ __asm xor EAX, flag;
+ __asm push EAX;
+ __asm popfd;
+ __asm pushfd;
+ __asm pop EAX;
+ __asm xor EAX, EDX;
+ __asm push EDX;
+ __asm popfd;
+ __asm and flag, EAX;
+ #else
+ __asm__ __volatile__ (
+ "pushf\n\t"
+ "pop %%EAX\n\t"
+ "movl %%EAX,%%EDX\n\t"
+ "xorl %0,%%EAX\n\t"
+ "push %%EAX\n\t"
+ "popf\n\t"
+ "pushf\n\t"
+ "pop %%EAX\n\t"
+ "xorl %%EDX,%%EAX\n\t"
+ "push %%EDX\n\t"
+ "popf\n\t"
+ "andl %%EAX, %0\n\t":
+ "=c" (flag) : "c" (flag));
+ #endif
+ return flag;
+}
+#define CHECK_CPUID_IS_SUPPORTED if (CheckFlag(1 << 18) == 0 || CheckFlag(1 << 21) == 0) return False;
+#else
+#define CHECK_CPUID_IS_SUPPORTED
+#endif
+
+static void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d)
+{
+ #ifdef USE_ASM
+
+ #ifdef _MSC_VER
+
+ UInt32 a2, b2, c2, d2;
+ __asm xor EBX, EBX;
+ __asm xor ECX, ECX;
+ __asm xor EDX, EDX;
+ __asm mov EAX, function;
+ __asm cpuid;
+ __asm mov a2, EAX;
+ __asm mov b2, EBX;
+ __asm mov c2, ECX;
+ __asm mov d2, EDX;
+
+ *a = a2;
+ *b = b2;
+ *c = c2;
+ *d = d2;
+
+ #else
+
+ __asm__ __volatile__ (
+ "cpuid"
+ : "=a" (*a) ,
+ "=b" (*b) ,
+ "=c" (*c) ,
+ "=d" (*d)
+ : "0" (function)) ;
+
+ #endif
+
+ #else
+
+ int CPUInfo[4];
+ __cpuid(CPUInfo, function);
+ *a = CPUInfo[0];
+ *b = CPUInfo[1];
+ *c = CPUInfo[2];
+ *d = CPUInfo[3];
+
+ #endif
+}
+
+Bool x86cpuid_CheckAndRead(Cx86cpuid *p)
+{
+ CHECK_CPUID_IS_SUPPORTED
+ MyCPUID(0, &p->maxFunc, &p->vendor[0], &p->vendor[2], &p->vendor[1]);
+ MyCPUID(1, &p->ver, &p->b, &p->c, &p->d);
+ return True;
+}
+
+static UInt32 kVendors[][3] =
+{
+ { 0x756E6547, 0x49656E69, 0x6C65746E},
+ { 0x68747541, 0x69746E65, 0x444D4163},
+ { 0x746E6543, 0x48727561, 0x736C7561}
+};
+
+int x86cpuid_GetFirm(const Cx86cpuid *p)
+{
+ unsigned i;
+ for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[i]); i++)
+ {
+ const UInt32 *v = kVendors[i];
+ if (v[0] == p->vendor[0] &&
+ v[1] == p->vendor[1] &&
+ v[2] == p->vendor[2])
+ return (int)i;
+ }
+ return -1;
+}
+
+Bool CPU_Is_InOrder()
+{
+ Cx86cpuid p;
+ int firm;
+ UInt32 family, model;
+ if (!x86cpuid_CheckAndRead(&p))
+ return True;
+ family = x86cpuid_GetFamily(&p);
+ model = x86cpuid_GetModel(&p);
+ firm = x86cpuid_GetFirm(&p);
+ switch (firm)
+ {
+ case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && model == 0x100C));
+ case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA)));
+ case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF));
+ }
+ return True;
+}
+
+#if !defined(MY_CPU_AMD64) && defined(_WIN32)
+static Bool CPU_Sys_Is_SSE_Supported()
+{
+ OSVERSIONINFO vi;
+ vi.dwOSVersionInfoSize = sizeof(vi);
+ if (!GetVersionEx(&vi))
+ return False;
+ return (vi.dwMajorVersion >= 5);
+}
+#define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False;
+#else
+#define CHECK_SYS_SSE_SUPPORT
+#endif
+
+Bool CPU_Is_Aes_Supported()
+{
+ Cx86cpuid p;
+ CHECK_SYS_SSE_SUPPORT
+ if (!x86cpuid_CheckAndRead(&p))
+ return False;
+ return (p.c >> 25) & 1;
+}
+
+#endif
diff --git a/src/libs/7zip/unix/C/CpuArch.h b/src/libs/7zip/unix/C/CpuArch.h
new file mode 100644
index 000000000..01930c7e6
--- /dev/null
+++ b/src/libs/7zip/unix/C/CpuArch.h
@@ -0,0 +1,155 @@
+/* CpuArch.h -- CPU specific code
+2010-10-26: Igor Pavlov : Public domain */
+
+#ifndef __CPU_ARCH_H
+#define __CPU_ARCH_H
+
+#include "Types.h"
+
+EXTERN_C_BEGIN
+
+/*
+MY_CPU_LE means that CPU is LITTLE ENDIAN.
+If MY_CPU_LE is not defined, we don't know about that property of platform (it can be LITTLE ENDIAN).
+
+MY_CPU_LE_UNALIGN means that CPU is LITTLE ENDIAN and CPU supports unaligned memory accesses.
+If MY_CPU_LE_UNALIGN is not defined, we don't know about these properties of platform.
+*/
+
+#if defined(_M_X64) || defined(_M_AMD64) || defined(__x86_64__)
+#define MY_CPU_AMD64
+#endif
+
+#if defined(MY_CPU_AMD64) || defined(_M_IA64)
+#define MY_CPU_64BIT
+#endif
+
+#if defined(_M_IX86) || defined(__i386__)
+#define MY_CPU_X86
+#endif
+
+#if defined(MY_CPU_X86) || defined(MY_CPU_AMD64)
+#define MY_CPU_X86_OR_AMD64
+#endif
+
+#if defined(MY_CPU_X86) || defined(_M_ARM)
+#define MY_CPU_32BIT
+#endif
+
+#if defined(_WIN32) && defined(_M_ARM)
+#define MY_CPU_ARM_LE
+#endif
+
+#if defined(_WIN32) && defined(_M_IA64)
+#define MY_CPU_IA64_LE
+#endif
+
+#if defined(MY_CPU_X86_OR_AMD64)
+#define MY_CPU_LE_UNALIGN
+#endif
+
+#if defined(MY_CPU_X86_OR_AMD64) || defined(MY_CPU_ARM_LE) || defined(MY_CPU_IA64_LE) || defined(__ARMEL__) || defined(__MIPSEL__) || defined(__LITTLE_ENDIAN__)
+#define MY_CPU_LE
+#endif
+
+#if defined(__BIG_ENDIAN__)
+#define MY_CPU_BE
+#endif
+
+#if defined(MY_CPU_LE) && defined(MY_CPU_BE)
+Stop_Compiling_Bad_Endian
+#endif
+
+#ifdef MY_CPU_LE_UNALIGN
+
+#define GetUi16(p) (*(const UInt16 *)(p))
+#define GetUi32(p) (*(const UInt32 *)(p))
+#define GetUi64(p) (*(const UInt64 *)(p))
+#define SetUi16(p, d) *(UInt16 *)(p) = (d);
+#define SetUi32(p, d) *(UInt32 *)(p) = (d);
+#define SetUi64(p, d) *(UInt64 *)(p) = (d);
+
+#else
+
+#define GetUi16(p) (((const Byte *)(p))[0] | ((UInt16)((const Byte *)(p))[1] << 8))
+
+#define GetUi32(p) ( \
+ ((const Byte *)(p))[0] | \
+ ((UInt32)((const Byte *)(p))[1] << 8) | \
+ ((UInt32)((const Byte *)(p))[2] << 16) | \
+ ((UInt32)((const Byte *)(p))[3] << 24))
+
+#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32))
+
+#define SetUi16(p, d) { UInt32 _x_ = (d); \
+ ((Byte *)(p))[0] = (Byte)_x_; \
+ ((Byte *)(p))[1] = (Byte)(_x_ >> 8); }
+
+#define SetUi32(p, d) { UInt32 _x_ = (d); \
+ ((Byte *)(p))[0] = (Byte)_x_; \
+ ((Byte *)(p))[1] = (Byte)(_x_ >> 8); \
+ ((Byte *)(p))[2] = (Byte)(_x_ >> 16); \
+ ((Byte *)(p))[3] = (Byte)(_x_ >> 24); }
+
+#define SetUi64(p, d) { UInt64 _x64_ = (d); \
+ SetUi32(p, (UInt32)_x64_); \
+ SetUi32(((Byte *)(p)) + 4, (UInt32)(_x64_ >> 32)); }
+
+#endif
+
+#if defined(MY_CPU_LE_UNALIGN) && defined(_WIN64) && (_MSC_VER >= 1300)
+
+#pragma intrinsic(_byteswap_ulong)
+#pragma intrinsic(_byteswap_uint64)
+#define GetBe32(p) _byteswap_ulong(*(const UInt32 *)(const Byte *)(p))
+#define GetBe64(p) _byteswap_uint64(*(const UInt64 *)(const Byte *)(p))
+
+#else
+
+#define GetBe32(p) ( \
+ ((UInt32)((const Byte *)(p))[0] << 24) | \
+ ((UInt32)((const Byte *)(p))[1] << 16) | \
+ ((UInt32)((const Byte *)(p))[2] << 8) | \
+ ((const Byte *)(p))[3] )
+
+#define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4))
+
+#endif
+
+#define GetBe16(p) (((UInt16)((const Byte *)(p))[0] << 8) | ((const Byte *)(p))[1])
+
+
+#ifdef MY_CPU_X86_OR_AMD64
+
+typedef struct
+{
+ UInt32 maxFunc;
+ UInt32 vendor[3];
+ UInt32 ver;
+ UInt32 b;
+ UInt32 c;
+ UInt32 d;
+} Cx86cpuid;
+
+enum
+{
+ CPU_FIRM_INTEL,
+ CPU_FIRM_AMD,
+ CPU_FIRM_VIA
+};
+
+Bool x86cpuid_CheckAndRead(Cx86cpuid *p);
+int x86cpuid_GetFirm(const Cx86cpuid *p);
+
+#define x86cpuid_GetFamily(p) (((p)->ver >> 8) & 0xFF00F)
+#define x86cpuid_GetModel(p) (((p)->ver >> 4) & 0xF00F)
+#define x86cpuid_GetStepping(p) ((p)->ver & 0xF)
+
+Bool CPU_Is_InOrder();
+Bool CPU_Is_Aes_Supported();
+
+#endif
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/Delta.c b/src/libs/7zip/unix/C/Delta.c
new file mode 100644
index 000000000..2b327f15f
--- /dev/null
+++ b/src/libs/7zip/unix/C/Delta.c
@@ -0,0 +1,62 @@
+/* Delta.c -- Delta converter
+2009-05-26 : Igor Pavlov : Public domain */
+
+#include "Delta.h"
+
+void Delta_Init(Byte *state)
+{
+ unsigned i;
+ for (i = 0; i < DELTA_STATE_SIZE; i++)
+ state[i] = 0;
+}
+
+static void MyMemCpy(Byte *dest, const Byte *src, unsigned size)
+{
+ unsigned i;
+ for (i = 0; i < size; i++)
+ dest[i] = src[i];
+}
+
+void Delta_Encode(Byte *state, unsigned delta, Byte *data, SizeT size)
+{
+ Byte buf[DELTA_STATE_SIZE];
+ unsigned j = 0;
+ MyMemCpy(buf, state, delta);
+ {
+ SizeT i;
+ for (i = 0; i < size;)
+ {
+ for (j = 0; j < delta && i < size; i++, j++)
+ {
+ Byte b = data[i];
+ data[i] = (Byte)(b - buf[j]);
+ buf[j] = b;
+ }
+ }
+ }
+ if (j == delta)
+ j = 0;
+ MyMemCpy(state, buf + j, delta - j);
+ MyMemCpy(state + delta - j, buf, j);
+}
+
+void Delta_Decode(Byte *state, unsigned delta, Byte *data, SizeT size)
+{
+ Byte buf[DELTA_STATE_SIZE];
+ unsigned j = 0;
+ MyMemCpy(buf, state, delta);
+ {
+ SizeT i;
+ for (i = 0; i < size;)
+ {
+ for (j = 0; j < delta && i < size; i++, j++)
+ {
+ buf[j] = data[i] = (Byte)(buf[j] + data[i]);
+ }
+ }
+ }
+ if (j == delta)
+ j = 0;
+ MyMemCpy(state, buf + j, delta - j);
+ MyMemCpy(state + delta - j, buf, j);
+}
diff --git a/src/libs/7zip/unix/C/Delta.h b/src/libs/7zip/unix/C/Delta.h
new file mode 100644
index 000000000..0d4cd6274
--- /dev/null
+++ b/src/libs/7zip/unix/C/Delta.h
@@ -0,0 +1,23 @@
+/* Delta.h -- Delta converter
+2009-04-15 : Igor Pavlov : Public domain */
+
+#ifndef __DELTA_H
+#define __DELTA_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define DELTA_STATE_SIZE 256
+
+void Delta_Init(Byte *state);
+void Delta_Encode(Byte *state, unsigned delta, Byte *data, SizeT size);
+void Delta_Decode(Byte *state, unsigned delta, Byte *data, SizeT size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/HuffEnc.c b/src/libs/7zip/unix/C/HuffEnc.c
new file mode 100644
index 000000000..561c7e5da
--- /dev/null
+++ b/src/libs/7zip/unix/C/HuffEnc.c
@@ -0,0 +1,146 @@
+/* HuffEnc.c -- functions for Huffman encoding
+2009-09-02 : Igor Pavlov : Public domain */
+
+#include "HuffEnc.h"
+#include "Sort.h"
+
+#define kMaxLen 16
+#define NUM_BITS 10
+#define MASK ((1 << NUM_BITS) - 1)
+
+#define NUM_COUNTERS 64
+
+#define HUFFMAN_SPEED_OPT
+
+void Huffman_Generate(const UInt32 *freqs, UInt32 *p, Byte *lens, UInt32 numSymbols, UInt32 maxLen)
+{
+ UInt32 num = 0;
+ /* if (maxLen > 10) maxLen = 10; */
+ {
+ UInt32 i;
+
+ #ifdef HUFFMAN_SPEED_OPT
+
+ UInt32 counters[NUM_COUNTERS];
+ for (i = 0; i < NUM_COUNTERS; i++)
+ counters[i] = 0;
+ for (i = 0; i < numSymbols; i++)
+ {
+ UInt32 freq = freqs[i];
+ counters[(freq < NUM_COUNTERS - 1) ? freq : NUM_COUNTERS - 1]++;
+ }
+
+ for (i = 1; i < NUM_COUNTERS; i++)
+ {
+ UInt32 temp = counters[i];
+ counters[i] = num;
+ num += temp;
+ }
+
+ for (i = 0; i < numSymbols; i++)
+ {
+ UInt32 freq = freqs[i];
+ if (freq == 0)
+ lens[i] = 0;
+ else
+ p[counters[((freq < NUM_COUNTERS - 1) ? freq : NUM_COUNTERS - 1)]++] = i | (freq << NUM_BITS);
+ }
+ counters[0] = 0;
+ HeapSort(p + counters[NUM_COUNTERS - 2], counters[NUM_COUNTERS - 1] - counters[NUM_COUNTERS - 2]);
+
+ #else
+
+ for (i = 0; i < numSymbols; i++)
+ {
+ UInt32 freq = freqs[i];
+ if (freq == 0)
+ lens[i] = 0;
+ else
+ p[num++] = i | (freq << NUM_BITS);
+ }
+ HeapSort(p, num);
+
+ #endif
+ }
+
+ if (num < 2)
+ {
+ unsigned minCode = 0;
+ unsigned maxCode = 1;
+ if (num == 1)
+ {
+ maxCode = (unsigned)p[0] & MASK;
+ if (maxCode == 0)
+ maxCode++;
+ }
+ p[minCode] = 0;
+ p[maxCode] = 1;
+ lens[minCode] = lens[maxCode] = 1;
+ return;
+ }
+
+ {
+ UInt32 b, e, i;
+
+ i = b = e = 0;
+ do
+ {
+ UInt32 n, m, freq;
+ n = (i != num && (b == e || (p[i] >> NUM_BITS) <= (p[b] >> NUM_BITS))) ? i++ : b++;
+ freq = (p[n] & ~MASK);
+ p[n] = (p[n] & MASK) | (e << NUM_BITS);
+ m = (i != num && (b == e || (p[i] >> NUM_BITS) <= (p[b] >> NUM_BITS))) ? i++ : b++;
+ freq += (p[m] & ~MASK);
+ p[m] = (p[m] & MASK) | (e << NUM_BITS);
+ p[e] = (p[e] & MASK) | freq;
+ e++;
+ }
+ while (num - e > 1);
+
+ {
+ UInt32 lenCounters[kMaxLen + 1];
+ for (i = 0; i <= kMaxLen; i++)
+ lenCounters[i] = 0;
+
+ p[--e] &= MASK;
+ lenCounters[1] = 2;
+ while (e > 0)
+ {
+ UInt32 len = (p[p[--e] >> NUM_BITS] >> NUM_BITS) + 1;
+ p[e] = (p[e] & MASK) | (len << NUM_BITS);
+ if (len >= maxLen)
+ for (len = maxLen - 1; lenCounters[len] == 0; len--);
+ lenCounters[len]--;
+ lenCounters[len + 1] += 2;
+ }
+
+ {
+ UInt32 len;
+ i = 0;
+ for (len = maxLen; len != 0; len--)
+ {
+ UInt32 num;
+ for (num = lenCounters[len]; num != 0; num--)
+ lens[p[i++] & MASK] = (Byte)len;
+ }
+ }
+
+ {
+ UInt32 nextCodes[kMaxLen + 1];
+ {
+ UInt32 code = 0;
+ UInt32 len;
+ for (len = 1; len <= kMaxLen; len++)
+ nextCodes[len] = code = (code + lenCounters[len - 1]) << 1;
+ }
+ /* if (code + lenCounters[kMaxLen] - 1 != (1 << kMaxLen) - 1) throw 1; */
+
+ {
+ UInt32 i;
+ for (i = 0; i < numSymbols; i++)
+ p[i] = nextCodes[lens[i]]++;
+ }
+ }
+ }
+ }
+}
diff --git a/src/libs/7zip/unix/C/HuffEnc.h b/src/libs/7zip/unix/C/HuffEnc.h
new file mode 100644
index 000000000..9cf4bfde8
--- /dev/null
+++ b/src/libs/7zip/unix/C/HuffEnc.h
@@ -0,0 +1,27 @@
+/* HuffEnc.h -- Huffman encoding
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __HUFF_ENC_H
+#define __HUFF_ENC_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+Conditions:
+ num <= 1024 = 2 ^ NUM_BITS
+ Sum(freqs) < 4M = 2 ^ (32 - NUM_BITS)
+ maxLen <= 16 = kMaxLen
+ Num_Items(p) >= HUFFMAN_TEMP_SIZE(num)
+*/
+
+void Huffman_Generate(const UInt32 *freqs, UInt32 *p, Byte *lens, UInt32 num, UInt32 maxLen);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/LzFind.c b/src/libs/7zip/unix/C/LzFind.c
new file mode 100644
index 000000000..e3ecb0542
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzFind.c
@@ -0,0 +1,761 @@
+/* LzFind.c -- Match finder for LZ algorithms
+2009-04-22 : Igor Pavlov : Public domain */
+
+#include <string.h>
+
+#include "LzFind.h"
+#include "LzHash.h"
+
+#define kEmptyHashValue 0
+#define kMaxValForNormalize ((UInt32)0xFFFFFFFF)
+#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */
+#define kNormalizeMask (~(kNormalizeStepMin - 1))
+#define kMaxHistorySize ((UInt32)3 << 30)
+
+#define kStartMaxLen 3
+
+static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc)
+{
+ if (!p->directInput)
+ {
+ alloc->Free(alloc, p->bufferBase);
+ p->bufferBase = 0;
+ }
+}
+
+/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */
+
+static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc)
+{
+ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;
+ if (p->directInput)
+ {
+ p->blockSize = blockSize;
+ return 1;
+ }
+ if (p->bufferBase == 0 || p->blockSize != blockSize)
+ {
+ LzInWindow_Free(p, alloc);
+ p->blockSize = blockSize;
+ p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize);
+ }
+ return (p->bufferBase != 0);
+}
+
+Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
+Byte MatchFinder_GetIndexByte(CMatchFinder *p, Int32 index) { return p->buffer[index]; }
+
+UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
+
+void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
+{
+ p->posLimit -= subValue;
+ p->pos -= subValue;
+ p->streamPos -= subValue;
+}
+
+static void MatchFinder_ReadBlock(CMatchFinder *p)
+{
+ if (p->streamEndWasReached || p->result != SZ_OK)
+ return;
+ if (p->directInput)
+ {
+ UInt32 curSize = 0xFFFFFFFF - p->streamPos;
+ if (curSize > p->directInputRem)
+ curSize = (UInt32)p->directInputRem;
+ p->directInputRem -= curSize;
+ p->streamPos += curSize;
+ if (p->directInputRem == 0)
+ p->streamEndWasReached = 1;
+ return;
+ }
+ for (;;)
+ {
+ Byte *dest = p->buffer + (p->streamPos - p->pos);
+ size_t size = (p->bufferBase + p->blockSize - dest);
+ if (size == 0)
+ return;
+ p->result = p->stream->Read(p->stream, dest, &size);
+ if (p->result != SZ_OK)
+ return;
+ if (size == 0)
+ {
+ p->streamEndWasReached = 1;
+ return;
+ }
+ p->streamPos += (UInt32)size;
+ if (p->streamPos - p->pos > p->keepSizeAfter)
+ return;
+ }
+}
+
+void MatchFinder_MoveBlock(CMatchFinder *p)
+{
+ memmove(p->bufferBase,
+ p->buffer - p->keepSizeBefore,
+ (size_t)(p->streamPos - p->pos + p->keepSizeBefore));
+ p->buffer = p->bufferBase + p->keepSizeBefore;
+}
+
+int MatchFinder_NeedMove(CMatchFinder *p)
+{
+ if (p->directInput)
+ return 0;
+ /* if (p->streamEndWasReached) return 0; */
+ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
+}
+
+void MatchFinder_ReadIfRequired(CMatchFinder *p)
+{
+ if (p->streamEndWasReached)
+ return;
+ if (p->keepSizeAfter >= p->streamPos - p->pos)
+ MatchFinder_ReadBlock(p);
+}
+
+static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p)
+{
+ if (MatchFinder_NeedMove(p))
+ MatchFinder_MoveBlock(p);
+ MatchFinder_ReadBlock(p);
+}
+
+static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
+{
+ p->cutValue = 32;
+ p->btMode = 1;
+ p->numHashBytes = 4;
+ p->bigHash = 0;
+}
+
+#define kCrcPoly 0xEDB88320
+
+void MatchFinder_Construct(CMatchFinder *p)
+{
+ UInt32 i;
+ p->bufferBase = 0;
+ p->directInput = 0;
+ p->hash = 0;
+ MatchFinder_SetDefaultSettings(p);
+
+ for (i = 0; i < 256; i++)
+ {
+ UInt32 r = i;
+ int j;
+ for (j = 0; j < 8; j++)
+ r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));
+ p->crc[i] = r;
+ }
+}
+
+static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->hash);
+ p->hash = 0;
+}
+
+void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc)
+{
+ MatchFinder_FreeThisClassMemory(p, alloc);
+ LzInWindow_Free(p, alloc);
+}
+
+static CLzRef* AllocRefs(UInt32 num, ISzAlloc *alloc)
+{
+ size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
+ if (sizeInBytes / sizeof(CLzRef) != num)
+ return 0;
+ return (CLzRef *)alloc->Alloc(alloc, sizeInBytes);
+}
+
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
+ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+ ISzAlloc *alloc)
+{
+ UInt32 sizeReserv;
+ if (historySize > kMaxHistorySize)
+ {
+ MatchFinder_Free(p, alloc);
+ return 0;
+ }
+ sizeReserv = historySize >> 1;
+ if (historySize > ((UInt32)2 << 30))
+ sizeReserv = historySize >> 2;
+ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19);
+
+ p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
+ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter;
+ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */
+ if (LzInWindow_Create(p, sizeReserv, alloc))
+ {
+ UInt32 newCyclicBufferSize = historySize + 1;
+ UInt32 hs;
+ p->matchMaxLen = matchMaxLen;
+ {
+ p->fixedHashSize = 0;
+ if (p->numHashBytes == 2)
+ hs = (1 << 16) - 1;
+ else
+ {
+ hs = historySize - 1;
+ hs |= (hs >> 1);
+ hs |= (hs >> 2);
+ hs |= (hs >> 4);
+ hs |= (hs >> 8);
+ hs >>= 1;
+ hs |= 0xFFFF; /* don't change it! It's required for Deflate */
+ if (hs > (1 << 24))
+ {
+ if (p->numHashBytes == 3)
+ hs = (1 << 24) - 1;
+ else
+ hs >>= 1;
+ }
+ }
+ p->hashMask = hs;
+ hs++;
+ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
+ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
+ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size;
+ hs += p->fixedHashSize;
+ }
+
+ {
+ UInt32 prevSize = p->hashSizeSum + p->numSons;
+ UInt32 newSize;
+ p->historySize = historySize;
+ p->hashSizeSum = hs;
+ p->cyclicBufferSize = newCyclicBufferSize;
+ p->numSons = (p->btMode ? newCyclicBufferSize * 2 : newCyclicBufferSize);
+ newSize = p->hashSizeSum + p->numSons;
+ if (p->hash != 0 && prevSize == newSize)
+ return 1;
+ MatchFinder_FreeThisClassMemory(p, alloc);
+ p->hash = AllocRefs(newSize, alloc);
+ if (p->hash != 0)
+ {
+ p->son = p->hash + p->hashSizeSum;
+ return 1;
+ }
+ }
+ }
+ MatchFinder_Free(p, alloc);
+ return 0;
+}
+
+static void MatchFinder_SetLimits(CMatchFinder *p)
+{
+ UInt32 limit = kMaxValForNormalize - p->pos;
+ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos;
+ if (limit2 < limit)
+ limit = limit2;
+ limit2 = p->streamPos - p->pos;
+ if (limit2 <= p->keepSizeAfter)
+ {
+ if (limit2 > 0)
+ limit2 = 1;
+ }
+ else
+ limit2 -= p->keepSizeAfter;
+ if (limit2 < limit)
+ limit = limit2;
+ {
+ UInt32 lenLimit = p->streamPos - p->pos;
+ if (lenLimit > p->matchMaxLen)
+ lenLimit = p->matchMaxLen;
+ p->lenLimit = lenLimit;
+ }
+ p->posLimit = p->pos + limit;
+}
+
+void MatchFinder_Init(CMatchFinder *p)
+{
+ UInt32 i;
+ for (i = 0; i < p->hashSizeSum; i++)
+ p->hash[i] = kEmptyHashValue;
+ p->cyclicBufferPos = 0;
+ p->buffer = p->bufferBase;
+ p->pos = p->streamPos = p->cyclicBufferSize;
+ p->result = SZ_OK;
+ p->streamEndWasReached = 0;
+ MatchFinder_ReadBlock(p);
+ MatchFinder_SetLimits(p);
+}
+
+static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)
+{
+ return (p->pos - p->historySize - 1) & kNormalizeMask;
+}
+
+void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems)
+{
+ UInt32 i;
+ for (i = 0; i < numItems; i++)
+ {
+ UInt32 value = items[i];
+ if (value <= subValue)
+ value = kEmptyHashValue;
+ else
+ value -= subValue;
+ items[i] = value;
+ }
+}
+
+static void MatchFinder_Normalize(CMatchFinder *p)
+{
+ UInt32 subValue = MatchFinder_GetSubValue(p);
+ MatchFinder_Normalize3(subValue, p->hash, p->hashSizeSum + p->numSons);
+ MatchFinder_ReduceOffsets(p, subValue);
+}
+
+static void MatchFinder_CheckLimits(CMatchFinder *p)
+{
+ if (p->pos == kMaxValForNormalize)
+ MatchFinder_Normalize(p);
+ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos)
+ MatchFinder_CheckAndMoveAndRead(p);
+ if (p->cyclicBufferPos == p->cyclicBufferSize)
+ p->cyclicBufferPos = 0;
+ MatchFinder_SetLimits(p);
+}
+
+static UInt32 * Hc_GetMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
+ UInt32 *distances, UInt32 maxLen)
+{
+ son[_cyclicBufferPos] = curMatch;
+ for (;;)
+ {
+ UInt32 delta = pos - curMatch;
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+ return distances;
+ {
+ const Byte *pb = cur - delta;
+ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
+ if (pb[maxLen] == cur[maxLen] && *pb == *cur)
+ {
+ UInt32 len = 0;
+ while (++len != lenLimit)
+ if (pb[len] != cur[len])
+ break;
+ if (maxLen < len)
+ {
+ *distances++ = maxLen = len;
+ *distances++ = delta - 1;
+ if (len == lenLimit)
+ return distances;
+ }
+ }
+ }
+ }
+}
+
+UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
+ UInt32 *distances, UInt32 maxLen)
+{
+ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
+ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
+ UInt32 len0 = 0, len1 = 0;
+ for (;;)
+ {
+ UInt32 delta = pos - curMatch;
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+ {
+ *ptr0 = *ptr1 = kEmptyHashValue;
+ return distances;
+ }
+ {
+ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
+ const Byte *pb = cur - delta;
+ UInt32 len = (len0 < len1 ? len0 : len1);
+ if (pb[len] == cur[len])
+ {
+ if (++len != lenLimit && pb[len] == cur[len])
+ while (++len != lenLimit)
+ if (pb[len] != cur[len])
+ break;
+ if (maxLen < len)
+ {
+ *distances++ = maxLen = len;
+ *distances++ = delta - 1;
+ if (len == lenLimit)
+ {
+ *ptr1 = pair[0];
+ *ptr0 = pair[1];
+ return distances;
+ }
+ }
+ }
+ if (pb[len] < cur[len])
+ {
+ *ptr1 = curMatch;
+ ptr1 = pair + 1;
+ curMatch = *ptr1;
+ len1 = len;
+ }
+ else
+ {
+ *ptr0 = curMatch;
+ ptr0 = pair;
+ curMatch = *ptr0;
+ len0 = len;
+ }
+ }
+ }
+}
+
+static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
+{
+ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
+ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
+ UInt32 len0 = 0, len1 = 0;
+ for (;;)
+ {
+ UInt32 delta = pos - curMatch;
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+ {
+ *ptr0 = *ptr1 = kEmptyHashValue;
+ return;
+ }
+ {
+ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
+ const Byte *pb = cur - delta;
+ UInt32 len = (len0 < len1 ? len0 : len1);
+ if (pb[len] == cur[len])
+ {
+ while (++len != lenLimit)
+ if (pb[len] != cur[len])
+ break;
+ {
+ if (len == lenLimit)
+ {
+ *ptr1 = pair[0];
+ *ptr0 = pair[1];
+ return;
+ }
+ }
+ }
+ if (pb[len] < cur[len])
+ {
+ *ptr1 = curMatch;
+ ptr1 = pair + 1;
+ curMatch = *ptr1;
+ len1 = len;
+ }
+ else
+ {
+ *ptr0 = curMatch;
+ ptr0 = pair;
+ curMatch = *ptr0;
+ len0 = len;
+ }
+ }
+ }
+}
+
+#define MOVE_POS \
+ ++p->cyclicBufferPos; \
+ p->buffer++; \
+ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p);
+
+#define MOVE_POS_RET MOVE_POS return offset;
+
+static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; }
+
+#define GET_MATCHES_HEADER2(minLen, ret_op) \
+ UInt32 lenLimit; UInt32 hashValue; const Byte *cur; UInt32 curMatch; \
+ lenLimit = p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
+ cur = p->buffer;
+
+#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0)
+#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue)
+
+#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
+
+#define GET_MATCHES_FOOTER(offset, maxLen) \
+ offset = (UInt32)(GetMatchesSpec1(lenLimit, curMatch, MF_PARAMS(p), \
+ distances + offset, maxLen) - distances); MOVE_POS_RET;
+
+#define SKIP_FOOTER \
+ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS;
+
+static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 offset;
+ GET_MATCHES_HEADER(2)
+ HASH2_CALC;
+ curMatch = p->hash[hashValue];
+ p->hash[hashValue] = p->pos;
+ offset = 0;
+ GET_MATCHES_FOOTER(offset, 1)
+}
+
+UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 offset;
+ GET_MATCHES_HEADER(3)
+ HASH_ZIP_CALC;
+ curMatch = p->hash[hashValue];
+ p->hash[hashValue] = p->pos;
+ offset = 0;
+ GET_MATCHES_FOOTER(offset, 2)
+}
+
+static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 hash2Value, delta2, maxLen, offset;
+ GET_MATCHES_HEADER(3)
+
+ HASH3_CALC;
+
+ delta2 = p->pos - p->hash[hash2Value];
+ curMatch = p->hash[kFix3HashSize + hashValue];
+
+ p->hash[hash2Value] =
+ p->hash[kFix3HashSize + hashValue] = p->pos;
+
+
+ maxLen = 2;
+ offset = 0;
+ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
+ {
+ for (; maxLen != lenLimit; maxLen++)
+ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
+ break;
+ distances[0] = maxLen;
+ distances[1] = delta2 - 1;
+ offset = 2;
+ if (maxLen == lenLimit)
+ {
+ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
+ MOVE_POS_RET;
+ }
+ }
+ GET_MATCHES_FOOTER(offset, maxLen)
+}
+
+static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
+ GET_MATCHES_HEADER(4)
+
+ HASH4_CALC;
+
+ delta2 = p->pos - p->hash[ hash2Value];
+ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
+ curMatch = p->hash[kFix4HashSize + hashValue];
+
+ p->hash[ hash2Value] =
+ p->hash[kFix3HashSize + hash3Value] =
+ p->hash[kFix4HashSize + hashValue] = p->pos;
+
+ maxLen = 1;
+ offset = 0;
+ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
+ {
+ distances[0] = maxLen = 2;
+ distances[1] = delta2 - 1;
+ offset = 2;
+ }
+ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
+ {
+ maxLen = 3;
+ distances[offset + 1] = delta3 - 1;
+ offset += 2;
+ delta2 = delta3;
+ }
+ if (offset != 0)
+ {
+ for (; maxLen != lenLimit; maxLen++)
+ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
+ break;
+ distances[offset - 2] = maxLen;
+ if (maxLen == lenLimit)
+ {
+ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
+ MOVE_POS_RET;
+ }
+ }
+ if (maxLen < 3)
+ maxLen = 3;
+ GET_MATCHES_FOOTER(offset, maxLen)
+}
+
+static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 hash2Value, hash3Value, delta2, delta3, maxLen, offset;
+ GET_MATCHES_HEADER(4)
+
+ HASH4_CALC;
+
+ delta2 = p->pos - p->hash[ hash2Value];
+ delta3 = p->pos - p->hash[kFix3HashSize + hash3Value];
+ curMatch = p->hash[kFix4HashSize + hashValue];
+
+ p->hash[ hash2Value] =
+ p->hash[kFix3HashSize + hash3Value] =
+ p->hash[kFix4HashSize + hashValue] = p->pos;
+
+ maxLen = 1;
+ offset = 0;
+ if (delta2 < p->cyclicBufferSize && *(cur - delta2) == *cur)
+ {
+ distances[0] = maxLen = 2;
+ distances[1] = delta2 - 1;
+ offset = 2;
+ }
+ if (delta2 != delta3 && delta3 < p->cyclicBufferSize && *(cur - delta3) == *cur)
+ {
+ maxLen = 3;
+ distances[offset + 1] = delta3 - 1;
+ offset += 2;
+ delta2 = delta3;
+ }
+ if (offset != 0)
+ {
+ for (; maxLen != lenLimit; maxLen++)
+ if (cur[(ptrdiff_t)maxLen - delta2] != cur[maxLen])
+ break;
+ distances[offset - 2] = maxLen;
+ if (maxLen == lenLimit)
+ {
+ p->son[p->cyclicBufferPos] = curMatch;
+ MOVE_POS_RET;
+ }
+ }
+ if (maxLen < 3)
+ maxLen = 3;
+ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
+ distances + offset, maxLen) - (distances));
+ MOVE_POS_RET
+}
+
+UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 offset;
+ GET_MATCHES_HEADER(3)
+ HASH_ZIP_CALC;
+ curMatch = p->hash[hashValue];
+ p->hash[hashValue] = p->pos;
+ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
+ distances, 2) - (distances));
+ MOVE_POS_RET
+}
+
+static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ SKIP_HEADER(2)
+ HASH2_CALC;
+ curMatch = p->hash[hashValue];
+ p->hash[hashValue] = p->pos;
+ SKIP_FOOTER
+ }
+ while (--num != 0);
+}
+
+void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ SKIP_HEADER(3)
+ HASH_ZIP_CALC;
+ curMatch = p->hash[hashValue];
+ p->hash[hashValue] = p->pos;
+ SKIP_FOOTER
+ }
+ while (--num != 0);
+}
+
+static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ UInt32 hash2Value;
+ SKIP_HEADER(3)
+ HASH3_CALC;
+ curMatch = p->hash[kFix3HashSize + hashValue];
+ p->hash[hash2Value] =
+ p->hash[kFix3HashSize + hashValue] = p->pos;
+ SKIP_FOOTER
+ }
+ while (--num != 0);
+}
+
+static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ UInt32 hash2Value, hash3Value;
+ SKIP_HEADER(4)
+ HASH4_CALC;
+ curMatch = p->hash[kFix4HashSize + hashValue];
+ p->hash[ hash2Value] =
+ p->hash[kFix3HashSize + hash3Value] = p->pos;
+ p->hash[kFix4HashSize + hashValue] = p->pos;
+ SKIP_FOOTER
+ }
+ while (--num != 0);
+}
+
+static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ UInt32 hash2Value, hash3Value;
+ SKIP_HEADER(4)
+ HASH4_CALC;
+ curMatch = p->hash[kFix4HashSize + hashValue];
+ p->hash[ hash2Value] =
+ p->hash[kFix3HashSize + hash3Value] =
+ p->hash[kFix4HashSize + hashValue] = p->pos;
+ p->son[p->cyclicBufferPos] = curMatch;
+ MOVE_POS
+ }
+ while (--num != 0);
+}
+
+void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ SKIP_HEADER(3)
+ HASH_ZIP_CALC;
+ curMatch = p->hash[hashValue];
+ p->hash[hashValue] = p->pos;
+ p->son[p->cyclicBufferPos] = curMatch;
+ MOVE_POS
+ }
+ while (--num != 0);
+}
+
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable)
+{
+ vTable->Init = (Mf_Init_Func)MatchFinder_Init;
+ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinder_GetIndexByte;
+ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
+ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
+ if (!p->btMode)
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
+ }
+ else if (p->numHashBytes == 2)
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
+ }
+ else if (p->numHashBytes == 3)
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
+ }
+ else
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
+ }
+}
diff --git a/src/libs/7zip/unix/C/LzFind.h b/src/libs/7zip/unix/C/LzFind.h
new file mode 100644
index 000000000..010c4b92b
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzFind.h
@@ -0,0 +1,115 @@
+/* LzFind.h -- Match finder for LZ algorithms
+2009-04-22 : Igor Pavlov : Public domain */
+
+#ifndef __LZ_FIND_H
+#define __LZ_FIND_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef UInt32 CLzRef;
+
+typedef struct _CMatchFinder
+{
+ Byte *buffer;
+ UInt32 pos;
+ UInt32 posLimit;
+ UInt32 streamPos;
+ UInt32 lenLimit;
+
+ UInt32 cyclicBufferPos;
+ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
+
+ UInt32 matchMaxLen;
+ CLzRef *hash;
+ CLzRef *son;
+ UInt32 hashMask;
+ UInt32 cutValue;
+
+ Byte *bufferBase;
+ ISeqInStream *stream;
+ int streamEndWasReached;
+
+ UInt32 blockSize;
+ UInt32 keepSizeBefore;
+ UInt32 keepSizeAfter;
+
+ UInt32 numHashBytes;
+ int directInput;
+ size_t directInputRem;
+ int btMode;
+ int bigHash;
+ UInt32 historySize;
+ UInt32 fixedHashSize;
+ UInt32 hashSizeSum;
+ UInt32 numSons;
+ SRes result;
+ UInt32 crc[256];
+} CMatchFinder;
+
+#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
+#define Inline_MatchFinder_GetIndexByte(p, index) ((p)->buffer[(Int32)(index)])
+
+#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
+
+int MatchFinder_NeedMove(CMatchFinder *p);
+Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
+void MatchFinder_MoveBlock(CMatchFinder *p);
+void MatchFinder_ReadIfRequired(CMatchFinder *p);
+
+void MatchFinder_Construct(CMatchFinder *p);
+
+/* Conditions:
+ historySize <= 3 GB
+ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
+*/
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
+ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+ ISzAlloc *alloc);
+void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);
+void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, UInt32 numItems);
+void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
+
+UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
+ UInt32 *distances, UInt32 maxLen);
+
+/*
+Conditions:
+ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
+ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
+*/
+
+typedef void (*Mf_Init_Func)(void *object);
+typedef Byte (*Mf_GetIndexByte_Func)(void *object, Int32 index);
+typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
+typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
+typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
+typedef void (*Mf_Skip_Func)(void *object, UInt32);
+
+typedef struct _IMatchFinder
+{
+ Mf_Init_Func Init;
+ Mf_GetIndexByte_Func GetIndexByte;
+ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
+ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
+ Mf_GetMatches_Func GetMatches;
+ Mf_Skip_Func Skip;
+} IMatchFinder;
+
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
+
+void MatchFinder_Init(CMatchFinder *p);
+UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
+UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
+void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
+void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/LzFindMt.c b/src/libs/7zip/unix/C/LzFindMt.c
new file mode 100644
index 000000000..aa41ed98a
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzFindMt.c
@@ -0,0 +1,793 @@
+/* LzFindMt.c -- multithreaded Match finder for LZ algorithms
+2009-09-20 : Igor Pavlov : Public domain */
+
+#include "LzHash.h"
+
+#include "LzFindMt.h"
+
+void MtSync_Construct(CMtSync *p)
+{
+ p->wasCreated = False;
+ p->csWasInitialized = False;
+ p->csWasEntered = False;
+ Thread_Construct(&p->thread);
+ Event_Construct(&p->canStart);
+ Event_Construct(&p->wasStarted);
+ Event_Construct(&p->wasStopped);
+ Semaphore_Construct(&p->freeSemaphore);
+ Semaphore_Construct(&p->filledSemaphore);
+}
+
+void MtSync_GetNextBlock(CMtSync *p)
+{
+ if (p->needStart)
+ {
+ p->numProcessedBlocks = 1;
+ p->needStart = False;
+ p->stopWriting = False;
+ p->exit = False;
+ Event_Reset(&p->wasStarted);
+ Event_Reset(&p->wasStopped);
+
+ Event_Set(&p->canStart);
+ Event_Wait(&p->wasStarted);
+ }
+ else
+ {
+ CriticalSection_Leave(&p->cs);
+ p->csWasEntered = False;
+ p->numProcessedBlocks++;
+ Semaphore_Release1(&p->freeSemaphore);
+ }
+ Semaphore_Wait(&p->filledSemaphore);
+ CriticalSection_Enter(&p->cs);
+ p->csWasEntered = True;
+}
+
+/* MtSync_StopWriting must be called if Writing was started */
+
+void MtSync_StopWriting(CMtSync *p)
+{
+ UInt32 myNumBlocks = p->numProcessedBlocks;
+ if (!Thread_WasCreated(&p->thread) || p->needStart)
+ return;
+ p->stopWriting = True;
+ if (p->csWasEntered)
+ {
+ CriticalSection_Leave(&p->cs);
+ p->csWasEntered = False;
+ }
+ Semaphore_Release1(&p->freeSemaphore);
+
+ Event_Wait(&p->wasStopped);
+
+ while (myNumBlocks++ != p->numProcessedBlocks)
+ {
+ Semaphore_Wait(&p->filledSemaphore);
+ Semaphore_Release1(&p->freeSemaphore);
+ }
+ p->needStart = True;
+}
+
+void MtSync_Destruct(CMtSync *p)
+{
+ if (Thread_WasCreated(&p->thread))
+ {
+ MtSync_StopWriting(p);
+ p->exit = True;
+ if (p->needStart)
+ Event_Set(&p->canStart);
+ Thread_Wait(&p->thread);
+ Thread_Close(&p->thread);
+ }
+ if (p->csWasInitialized)
+ {
+ CriticalSection_Delete(&p->cs);
+ p->csWasInitialized = False;
+ }
+
+ Event_Close(&p->canStart);
+ Event_Close(&p->wasStarted);
+ Event_Close(&p->wasStopped);
+ Semaphore_Close(&p->freeSemaphore);
+ Semaphore_Close(&p->filledSemaphore);
+
+ p->wasCreated = False;
+}
+
+#define RINOK_THREAD(x) { if ((x) != 0) return SZ_ERROR_THREAD; }
+
+static SRes MtSync_Create2(CMtSync *p, unsigned (MY_STD_CALL *startAddress)(void *), void *obj, UInt32 numBlocks)
+{
+ if (p->wasCreated)
+ return SZ_OK;
+
+ RINOK_THREAD(CriticalSection_Init(&p->cs));
+ p->csWasInitialized = True;
+
+ RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->canStart));
+ RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->wasStarted));
+ RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->wasStopped));
+
+ RINOK_THREAD(Semaphore_Create(&p->freeSemaphore, numBlocks, numBlocks));
+ RINOK_THREAD(Semaphore_Create(&p->filledSemaphore, 0, numBlocks));
+
+ p->needStart = True;
+
+ RINOK_THREAD(Thread_Create(&p->thread, startAddress, obj));
+ p->wasCreated = True;
+ return SZ_OK;
+}
+
+static SRes MtSync_Create(CMtSync *p, unsigned (MY_STD_CALL *startAddress)(void *), void *obj, UInt32 numBlocks)
+{
+ SRes res = MtSync_Create2(p, startAddress, obj, numBlocks);
+ if (res != SZ_OK)
+ MtSync_Destruct(p);
+ return res;
+}
+
+void MtSync_Init(CMtSync *p) { p->needStart = True; }
+
+#define kMtMaxValForNormalize 0xFFFFFFFF
+
+#define DEF_GetHeads2(name, v, action) \
+static void GetHeads ## name(const Byte *p, UInt32 pos, \
+UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc) \
+{ action; for (; numHeads != 0; numHeads--) { \
+const UInt32 value = (v); p++; *heads++ = pos - hash[value]; hash[value] = pos++; } }
+
+#define DEF_GetHeads(name, v) DEF_GetHeads2(name, v, ;)
+
+DEF_GetHeads2(2, (p[0] | ((UInt32)p[1] << 8)), hashMask = hashMask; crc = crc; )
+DEF_GetHeads(3, (crc[p[0]] ^ p[1] ^ ((UInt32)p[2] << 8)) & hashMask)
+DEF_GetHeads(4, (crc[p[0]] ^ p[1] ^ ((UInt32)p[2] << 8) ^ (crc[p[3]] << 5)) & hashMask)
+DEF_GetHeads(4b, (crc[p[0]] ^ p[1] ^ ((UInt32)p[2] << 8) ^ ((UInt32)p[3] << 16)) & hashMask)
+/* DEF_GetHeads(5, (crc[p[0]] ^ p[1] ^ ((UInt32)p[2] << 8) ^ (crc[p[3]] << 5) ^ (crc[p[4]] << 3)) & hashMask) */
+
+void HashThreadFunc(CMatchFinderMt *mt)
+{
+ CMtSync *p = &mt->hashSync;
+ for (;;)
+ {
+ UInt32 numProcessedBlocks = 0;
+ Event_Wait(&p->canStart);
+ Event_Set(&p->wasStarted);
+ for (;;)
+ {
+ if (p->exit)
+ return;
+ if (p->stopWriting)
+ {
+ p->numProcessedBlocks = numProcessedBlocks;
+ Event_Set(&p->wasStopped);
+ break;
+ }
+
+ {
+ CMatchFinder *mf = mt->MatchFinder;
+ if (MatchFinder_NeedMove(mf))
+ {
+ CriticalSection_Enter(&mt->btSync.cs);
+ CriticalSection_Enter(&mt->hashSync.cs);
+ {
+ const Byte *beforePtr = MatchFinder_GetPointerToCurrentPos(mf);
+ const Byte *afterPtr;
+ MatchFinder_MoveBlock(mf);
+ afterPtr = MatchFinder_GetPointerToCurrentPos(mf);
+ mt->pointerToCurPos -= beforePtr - afterPtr;
+ mt->buffer -= beforePtr - afterPtr;
+ }
+ CriticalSection_Leave(&mt->btSync.cs);
+ CriticalSection_Leave(&mt->hashSync.cs);
+ continue;
+ }
+
+ Semaphore_Wait(&p->freeSemaphore);
+
+ MatchFinder_ReadIfRequired(mf);
+ if (mf->pos > (kMtMaxValForNormalize - kMtHashBlockSize))
+ {
+ UInt32 subValue = (mf->pos - mf->historySize - 1);
+ MatchFinder_ReduceOffsets(mf, subValue);
+ MatchFinder_Normalize3(subValue, mf->hash + mf->fixedHashSize, mf->hashMask + 1);
+ }
+ {
+ UInt32 *heads = mt->hashBuf + ((numProcessedBlocks++) & kMtHashNumBlocksMask) * kMtHashBlockSize;
+ UInt32 num = mf->streamPos - mf->pos;
+ heads[0] = 2;
+ heads[1] = num;
+ if (num >= mf->numHashBytes)
+ {
+ num = num - mf->numHashBytes + 1;
+ if (num > kMtHashBlockSize - 2)
+ num = kMtHashBlockSize - 2;
+ mt->GetHeadsFunc(mf->buffer, mf->pos, mf->hash + mf->fixedHashSize, mf->hashMask, heads + 2, num, mf->crc);
+ heads[0] += num;
+ }
+ mf->pos += num;
+ mf->buffer += num;
+ }
+ }
+
+ Semaphore_Release1(&p->filledSemaphore);
+ }
+ }
+}
+
+void MatchFinderMt_GetNextBlock_Hash(CMatchFinderMt *p)
+{
+ MtSync_GetNextBlock(&p->hashSync);
+ p->hashBufPosLimit = p->hashBufPos = ((p->hashSync.numProcessedBlocks - 1) & kMtHashNumBlocksMask) * kMtHashBlockSize;
+ p->hashBufPosLimit += p->hashBuf[p->hashBufPos++];
+ p->hashNumAvail = p->hashBuf[p->hashBufPos++];
+}
+
+#define kEmptyHashValue 0
+
+/* #define MFMT_GM_INLINE */
+
+#ifdef MFMT_GM_INLINE
+
+#define NO_INLINE MY_FAST_CALL
+
+Int32 NO_INLINE GetMatchesSpecN(UInt32 lenLimit, UInt32 pos, const Byte *cur, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
+ UInt32 *_distances, UInt32 _maxLen, const UInt32 *hash, Int32 limit, UInt32 size, UInt32 *posRes)
+{
+ do
+ {
+ UInt32 *distances = _distances + 1;
+ UInt32 curMatch = pos - *hash++;
+
+ CLzRef *ptr0 = son + (_cyclicBufferPos << 1) + 1;
+ CLzRef *ptr1 = son + (_cyclicBufferPos << 1);
+ UInt32 len0 = 0, len1 = 0;
+ UInt32 cutValue = _cutValue;
+ UInt32 maxLen = _maxLen;
+ for (;;)
+ {
+ UInt32 delta = pos - curMatch;
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+ {
+ *ptr0 = *ptr1 = kEmptyHashValue;
+ break;
+ }
+ {
+ CLzRef *pair = son + ((_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
+ const Byte *pb = cur - delta;
+ UInt32 len = (len0 < len1 ? len0 : len1);
+ if (pb[len] == cur[len])
+ {
+ if (++len != lenLimit && pb[len] == cur[len])
+ while (++len != lenLimit)
+ if (pb[len] != cur[len])
+ break;
+ if (maxLen < len)
+ {
+ *distances++ = maxLen = len;
+ *distances++ = delta - 1;
+ if (len == lenLimit)
+ {
+ *ptr1 = pair[0];
+ *ptr0 = pair[1];
+ break;
+ }
+ }
+ }
+ if (pb[len] < cur[len])
+ {
+ *ptr1 = curMatch;
+ ptr1 = pair + 1;
+ curMatch = *ptr1;
+ len1 = len;
+ }
+ else
+ {
+ *ptr0 = curMatch;
+ ptr0 = pair;
+ curMatch = *ptr0;
+ len0 = len;
+ }
+ }
+ }
+ pos++;
+ _cyclicBufferPos++;
+ cur++;
+ {
+ UInt32 num = (UInt32)(distances - _distances);
+ *_distances = num - 1;
+ _distances += num;
+ limit -= num;
+ }
+ }
+ while (limit > 0 && --size != 0);
+ *posRes = pos;
+ return limit;
+}
+
+#endif
+
+void BtGetMatches(CMatchFinderMt *p, UInt32 *distances)
+{
+ UInt32 numProcessed = 0;
+ UInt32 curPos = 2;
+ UInt32 limit = kMtBtBlockSize - (p->matchMaxLen * 2);
+ distances[1] = p->hashNumAvail;
+ while (curPos < limit)
+ {
+ if (p->hashBufPos == p->hashBufPosLimit)
+ {
+ MatchFinderMt_GetNextBlock_Hash(p);
+ distances[1] = numProcessed + p->hashNumAvail;
+ if (p->hashNumAvail >= p->numHashBytes)
+ continue;
+ for (; p->hashNumAvail != 0; p->hashNumAvail--)
+ distances[curPos++] = 0;
+ break;
+ }
+ {
+ UInt32 size = p->hashBufPosLimit - p->hashBufPos;
+ UInt32 lenLimit = p->matchMaxLen;
+ UInt32 pos = p->pos;
+ UInt32 cyclicBufferPos = p->cyclicBufferPos;
+ if (lenLimit >= p->hashNumAvail)
+ lenLimit = p->hashNumAvail;
+ {
+ UInt32 size2 = p->hashNumAvail - lenLimit + 1;
+ if (size2 < size)
+ size = size2;
+ size2 = p->cyclicBufferSize - cyclicBufferPos;
+ if (size2 < size)
+ size = size2;
+ }
+ #ifndef MFMT_GM_INLINE
+ while (curPos < limit && size-- != 0)
+ {
+ UInt32 *startDistances = distances + curPos;
+ UInt32 num = (UInt32)(GetMatchesSpec1(lenLimit, pos - p->hashBuf[p->hashBufPos++],
+ pos, p->buffer, p->son, cyclicBufferPos, p->cyclicBufferSize, p->cutValue,
+ startDistances + 1, p->numHashBytes - 1) - startDistances);
+ *startDistances = num - 1;
+ curPos += num;
+ cyclicBufferPos++;
+ pos++;
+ p->buffer++;
+ }
+ #else
+ {
+ UInt32 posRes;
+ curPos = limit - GetMatchesSpecN(lenLimit, pos, p->buffer, p->son, cyclicBufferPos, p->cyclicBufferSize, p->cutValue,
+ distances + curPos, p->numHashBytes - 1, p->hashBuf + p->hashBufPos, (Int32)(limit - curPos) , size, &posRes);
+ p->hashBufPos += posRes - pos;
+ cyclicBufferPos += posRes - pos;
+ p->buffer += posRes - pos;
+ pos = posRes;
+ }
+ #endif
+
+ numProcessed += pos - p->pos;
+ p->hashNumAvail -= pos - p->pos;
+ p->pos = pos;
+ if (cyclicBufferPos == p->cyclicBufferSize)
+ cyclicBufferPos = 0;
+ p->cyclicBufferPos = cyclicBufferPos;
+ }
+ }
+ distances[0] = curPos;
+}
+
+void BtFillBlock(CMatchFinderMt *p, UInt32 globalBlockIndex)
+{
+ CMtSync *sync = &p->hashSync;
+ if (!sync->needStart)
+ {
+ CriticalSection_Enter(&sync->cs);
+ sync->csWasEntered = True;
+ }
+
+ BtGetMatches(p, p->btBuf + (globalBlockIndex & kMtBtNumBlocksMask) * kMtBtBlockSize);
+
+ if (p->pos > kMtMaxValForNormalize - kMtBtBlockSize)
+ {
+ UInt32 subValue = p->pos - p->cyclicBufferSize;
+ MatchFinder_Normalize3(subValue, p->son, p->cyclicBufferSize * 2);
+ p->pos -= subValue;
+ }
+
+ if (!sync->needStart)
+ {
+ CriticalSection_Leave(&sync->cs);
+ sync->csWasEntered = False;
+ }
+}
+
+void BtThreadFunc(CMatchFinderMt *mt)
+{
+ CMtSync *p = &mt->btSync;
+ for (;;)
+ {
+ UInt32 blockIndex = 0;
+ Event_Wait(&p->canStart);
+ Event_Set(&p->wasStarted);
+ for (;;)
+ {
+ if (p->exit)
+ return;
+ if (p->stopWriting)
+ {
+ p->numProcessedBlocks = blockIndex;
+ MtSync_StopWriting(&mt->hashSync);
+ Event_Set(&p->wasStopped);
+ break;
+ }
+ Semaphore_Wait(&p->freeSemaphore);
+ BtFillBlock(mt, blockIndex++);
+ Semaphore_Release1(&p->filledSemaphore);
+ }
+ }
+}
+
+void MatchFinderMt_Construct(CMatchFinderMt *p)
+{
+ p->hashBuf = 0;
+ MtSync_Construct(&p->hashSync);
+ MtSync_Construct(&p->btSync);
+}
+
+void MatchFinderMt_FreeMem(CMatchFinderMt *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->hashBuf);
+ p->hashBuf = 0;
+}
+
+void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAlloc *alloc)
+{
+ MtSync_Destruct(&p->hashSync);
+ MtSync_Destruct(&p->btSync);
+ MatchFinderMt_FreeMem(p, alloc);
+}
+
+#define kHashBufferSize (kMtHashBlockSize * kMtHashNumBlocks)
+#define kBtBufferSize (kMtBtBlockSize * kMtBtNumBlocks)
+
+static unsigned MY_STD_CALL HashThreadFunc2(void *p) { HashThreadFunc((CMatchFinderMt *)p); return 0; }
+static unsigned MY_STD_CALL BtThreadFunc2(void *p)
+{
+ Byte allocaDummy[0x180];
+ int i = 0;
+ for (i = 0; i < 16; i++)
+ allocaDummy[i] = (Byte)i;
+ BtThreadFunc((CMatchFinderMt *)p);
+ return 0;
+}
+
+SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore,
+ UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAlloc *alloc)
+{
+ CMatchFinder *mf = p->MatchFinder;
+ p->historySize = historySize;
+ if (kMtBtBlockSize <= matchMaxLen * 4)
+ return SZ_ERROR_PARAM;
+ if (p->hashBuf == 0)
+ {
+ p->hashBuf = (UInt32 *)alloc->Alloc(alloc, (kHashBufferSize + kBtBufferSize) * sizeof(UInt32));
+ if (p->hashBuf == 0)
+ return SZ_ERROR_MEM;
+ p->btBuf = p->hashBuf + kHashBufferSize;
+ }
+ keepAddBufferBefore += (kHashBufferSize + kBtBufferSize);
+ keepAddBufferAfter += kMtHashBlockSize;
+ if (!MatchFinder_Create(mf, historySize, keepAddBufferBefore, matchMaxLen, keepAddBufferAfter, alloc))
+ return SZ_ERROR_MEM;
+
+ RINOK(MtSync_Create(&p->hashSync, HashThreadFunc2, p, kMtHashNumBlocks));
+ RINOK(MtSync_Create(&p->btSync, BtThreadFunc2, p, kMtBtNumBlocks));
+ return SZ_OK;
+}
+
+/* Call it after ReleaseStream / SetStream */
+void MatchFinderMt_Init(CMatchFinderMt *p)
+{
+ CMatchFinder *mf = p->MatchFinder;
+ p->btBufPos = p->btBufPosLimit = 0;
+ p->hashBufPos = p->hashBufPosLimit = 0;
+ MatchFinder_Init(mf);
+ p->pointerToCurPos = MatchFinder_GetPointerToCurrentPos(mf);
+ p->btNumAvailBytes = 0;
+ p->lzPos = p->historySize + 1;
+
+ p->hash = mf->hash;
+ p->fixedHashSize = mf->fixedHashSize;
+ p->crc = mf->crc;
+
+ p->son = mf->son;
+ p->matchMaxLen = mf->matchMaxLen;
+ p->numHashBytes = mf->numHashBytes;
+ p->pos = mf->pos;
+ p->buffer = mf->buffer;
+ p->cyclicBufferPos = mf->cyclicBufferPos;
+ p->cyclicBufferSize = mf->cyclicBufferSize;
+ p->cutValue = mf->cutValue;
+}
+
+/* ReleaseStream is required to finish multithreading */
+void MatchFinderMt_ReleaseStream(CMatchFinderMt *p)
+{
+ MtSync_StopWriting(&p->btSync);
+ /* p->MatchFinder->ReleaseStream(); */
+}
+
+void MatchFinderMt_Normalize(CMatchFinderMt *p)
+{
+ MatchFinder_Normalize3(p->lzPos - p->historySize - 1, p->hash, p->fixedHashSize);
+ p->lzPos = p->historySize + 1;
+}
+
+void MatchFinderMt_GetNextBlock_Bt(CMatchFinderMt *p)
+{
+ UInt32 blockIndex;
+ MtSync_GetNextBlock(&p->btSync);
+ blockIndex = ((p->btSync.numProcessedBlocks - 1) & kMtBtNumBlocksMask);
+ p->btBufPosLimit = p->btBufPos = blockIndex * kMtBtBlockSize;
+ p->btBufPosLimit += p->btBuf[p->btBufPos++];
+ p->btNumAvailBytes = p->btBuf[p->btBufPos++];
+ if (p->lzPos >= kMtMaxValForNormalize - kMtBtBlockSize)
+ MatchFinderMt_Normalize(p);
+}
+
+const Byte * MatchFinderMt_GetPointerToCurrentPos(CMatchFinderMt *p)
+{
+ return p->pointerToCurPos;
+}
+
+#define GET_NEXT_BLOCK_IF_REQUIRED if (p->btBufPos == p->btBufPosLimit) MatchFinderMt_GetNextBlock_Bt(p);
+
+UInt32 MatchFinderMt_GetNumAvailableBytes(CMatchFinderMt *p)
+{
+ GET_NEXT_BLOCK_IF_REQUIRED;
+ return p->btNumAvailBytes;
+}
+
+Byte MatchFinderMt_GetIndexByte(CMatchFinderMt *p, Int32 index)
+{
+ return p->pointerToCurPos[index];
+}
+
+UInt32 * MixMatches2(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *distances)
+{
+ UInt32 hash2Value, curMatch2;
+ UInt32 *hash = p->hash;
+ const Byte *cur = p->pointerToCurPos;
+ UInt32 lzPos = p->lzPos;
+ MT_HASH2_CALC
+
+ curMatch2 = hash[hash2Value];
+ hash[hash2Value] = lzPos;
+
+ if (curMatch2 >= matchMinPos)
+ if (cur[(ptrdiff_t)curMatch2 - lzPos] == cur[0])
+ {
+ *distances++ = 2;
+ *distances++ = lzPos - curMatch2 - 1;
+ }
+ return distances;
+}
+
+UInt32 * MixMatches3(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *distances)
+{
+ UInt32 hash2Value, hash3Value, curMatch2, curMatch3;
+ UInt32 *hash = p->hash;
+ const Byte *cur = p->pointerToCurPos;
+ UInt32 lzPos = p->lzPos;
+ MT_HASH3_CALC
+
+ curMatch2 = hash[ hash2Value];
+ curMatch3 = hash[kFix3HashSize + hash3Value];
+
+ hash[ hash2Value] =
+ hash[kFix3HashSize + hash3Value] =
+ lzPos;
+
+ if (curMatch2 >= matchMinPos && cur[(ptrdiff_t)curMatch2 - lzPos] == cur[0])
+ {
+ distances[1] = lzPos - curMatch2 - 1;
+ if (cur[(ptrdiff_t)curMatch2 - lzPos + 2] == cur[2])
+ {
+ distances[0] = 3;
+ return distances + 2;
+ }
+ distances[0] = 2;
+ distances += 2;
+ }
+ if (curMatch3 >= matchMinPos && cur[(ptrdiff_t)curMatch3 - lzPos] == cur[0])
+ {
+ *distances++ = 3;
+ *distances++ = lzPos - curMatch3 - 1;
+ }
+ return distances;
+}
+
+/*
+UInt32 *MixMatches4(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *distances)
+{
+ UInt32 hash2Value, hash3Value, hash4Value, curMatch2, curMatch3, curMatch4;
+ UInt32 *hash = p->hash;
+ const Byte *cur = p->pointerToCurPos;
+ UInt32 lzPos = p->lzPos;
+ MT_HASH4_CALC
+
+ curMatch2 = hash[ hash2Value];
+ curMatch3 = hash[kFix3HashSize + hash3Value];
+ curMatch4 = hash[kFix4HashSize + hash4Value];
+
+ hash[ hash2Value] =
+ hash[kFix3HashSize + hash3Value] =
+ hash[kFix4HashSize + hash4Value] =
+ lzPos;
+
+ if (curMatch2 >= matchMinPos && cur[(ptrdiff_t)curMatch2 - lzPos] == cur[0])
+ {
+ distances[1] = lzPos - curMatch2 - 1;
+ if (cur[(ptrdiff_t)curMatch2 - lzPos + 2] == cur[2])
+ {
+ distances[0] = (cur[(ptrdiff_t)curMatch2 - lzPos + 3] == cur[3]) ? 4 : 3;
+ return distances + 2;
+ }
+ distances[0] = 2;
+ distances += 2;
+ }
+ if (curMatch3 >= matchMinPos && cur[(ptrdiff_t)curMatch3 - lzPos] == cur[0])
+ {
+ distances[1] = lzPos - curMatch3 - 1;
+ if (cur[(ptrdiff_t)curMatch3 - lzPos + 3] == cur[3])
+ {
+ distances[0] = 4;
+ return distances + 2;
+ }
+ distances[0] = 3;
+ distances += 2;
+ }
+
+ if (curMatch4 >= matchMinPos)
+ if (
+ cur[(ptrdiff_t)curMatch4 - lzPos] == cur[0] &&
+ cur[(ptrdiff_t)curMatch4 - lzPos + 3] == cur[3]
+ )
+ {
+ *distances++ = 4;
+ *distances++ = lzPos - curMatch4 - 1;
+ }
+ return distances;
+}
+*/
+
+#define INCREASE_LZ_POS p->lzPos++; p->pointerToCurPos++;
+
+UInt32 MatchFinderMt2_GetMatches(CMatchFinderMt *p, UInt32 *distances)
+{
+ const UInt32 *btBuf = p->btBuf + p->btBufPos;
+ UInt32 len = *btBuf++;
+ p->btBufPos += 1 + len;
+ p->btNumAvailBytes--;
+ {
+ UInt32 i;
+ for (i = 0; i < len; i += 2)
+ {
+ *distances++ = *btBuf++;
+ *distances++ = *btBuf++;
+ }
+ }
+ INCREASE_LZ_POS
+ return len;
+}
+
+UInt32 MatchFinderMt_GetMatches(CMatchFinderMt *p, UInt32 *distances)
+{
+ const UInt32 *btBuf = p->btBuf + p->btBufPos;
+ UInt32 len = *btBuf++;
+ p->btBufPos += 1 + len;
+
+ if (len == 0)
+ {
+ if (p->btNumAvailBytes-- >= 4)
+ len = (UInt32)(p->MixMatchesFunc(p, p->lzPos - p->historySize, distances) - (distances));
+ }
+ else
+ {
+ /* Condition: there are matches in btBuf with length < p->numHashBytes */
+ UInt32 *distances2;
+ p->btNumAvailBytes--;
+ distances2 = p->MixMatchesFunc(p, p->lzPos - btBuf[1], distances);
+ do
+ {
+ *distances2++ = *btBuf++;
+ *distances2++ = *btBuf++;
+ }
+ while ((len -= 2) != 0);
+ len = (UInt32)(distances2 - (distances));
+ }
+ INCREASE_LZ_POS
+ return len;
+}
+
+#define SKIP_HEADER2_MT do { GET_NEXT_BLOCK_IF_REQUIRED
+#define SKIP_HEADER_MT(n) SKIP_HEADER2_MT if (p->btNumAvailBytes-- >= (n)) { const Byte *cur = p->pointerToCurPos; UInt32 *hash = p->hash;
+#define SKIP_FOOTER_MT } INCREASE_LZ_POS p->btBufPos += p->btBuf[p->btBufPos] + 1; } while (--num != 0);
+
+void MatchFinderMt0_Skip(CMatchFinderMt *p, UInt32 num)
+{
+ SKIP_HEADER2_MT { p->btNumAvailBytes--;
+ SKIP_FOOTER_MT
+}
+
+void MatchFinderMt2_Skip(CMatchFinderMt *p, UInt32 num)
+{
+ SKIP_HEADER_MT(2)
+ UInt32 hash2Value;
+ MT_HASH2_CALC
+ hash[hash2Value] = p->lzPos;
+ SKIP_FOOTER_MT
+}
+
+void MatchFinderMt3_Skip(CMatchFinderMt *p, UInt32 num)
+{
+ SKIP_HEADER_MT(3)
+ UInt32 hash2Value, hash3Value;
+ MT_HASH3_CALC
+ hash[kFix3HashSize + hash3Value] =
+ hash[ hash2Value] =
+ p->lzPos;
+ SKIP_FOOTER_MT
+}
+
+/*
+void MatchFinderMt4_Skip(CMatchFinderMt *p, UInt32 num)
+{
+ SKIP_HEADER_MT(4)
+ UInt32 hash2Value, hash3Value, hash4Value;
+ MT_HASH4_CALC
+ hash[kFix4HashSize + hash4Value] =
+ hash[kFix3HashSize + hash3Value] =
+ hash[ hash2Value] =
+ p->lzPos;
+ SKIP_FOOTER_MT
+}
+*/
+
+void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder *vTable)
+{
+ vTable->Init = (Mf_Init_Func)MatchFinderMt_Init;
+ vTable->GetIndexByte = (Mf_GetIndexByte_Func)MatchFinderMt_GetIndexByte;
+ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinderMt_GetNumAvailableBytes;
+ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinderMt_GetPointerToCurrentPos;
+ vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt_GetMatches;
+ switch(p->MatchFinder->numHashBytes)
+ {
+ case 2:
+ p->GetHeadsFunc = GetHeads2;
+ p->MixMatchesFunc = (Mf_Mix_Matches)0;
+ vTable->Skip = (Mf_Skip_Func)MatchFinderMt0_Skip;
+ vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt2_GetMatches;
+ break;
+ case 3:
+ p->GetHeadsFunc = GetHeads3;
+ p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches2;
+ vTable->Skip = (Mf_Skip_Func)MatchFinderMt2_Skip;
+ break;
+ default:
+ /* case 4: */
+ p->GetHeadsFunc = p->MatchFinder->bigHash ? GetHeads4b : GetHeads4;
+ /* p->GetHeadsFunc = GetHeads4; */
+ p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches3;
+ vTable->Skip = (Mf_Skip_Func)MatchFinderMt3_Skip;
+ break;
+ /*
+ default:
+ p->GetHeadsFunc = GetHeads5;
+ p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches4;
+ vTable->Skip = (Mf_Skip_Func)MatchFinderMt4_Skip;
+ break;
+ */
+ }
+}
diff --git a/src/libs/7zip/unix/C/LzFindMt.h b/src/libs/7zip/unix/C/LzFindMt.h
new file mode 100644
index 000000000..b985af5fe
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzFindMt.h
@@ -0,0 +1,105 @@
+/* LzFindMt.h -- multithreaded Match finder for LZ algorithms
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZ_FIND_MT_H
+#define __LZ_FIND_MT_H
+
+#include "LzFind.h"
+#include "Threads.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define kMtHashBlockSize (1 << 13)
+#define kMtHashNumBlocks (1 << 3)
+#define kMtHashNumBlocksMask (kMtHashNumBlocks - 1)
+
+#define kMtBtBlockSize (1 << 14)
+#define kMtBtNumBlocks (1 << 6)
+#define kMtBtNumBlocksMask (kMtBtNumBlocks - 1)
+
+typedef struct _CMtSync
+{
+ Bool wasCreated;
+ Bool needStart;
+ Bool exit;
+ Bool stopWriting;
+
+ CThread thread;
+ CAutoResetEvent canStart;
+ CAutoResetEvent wasStarted;
+ CAutoResetEvent wasStopped;
+ CSemaphore freeSemaphore;
+ CSemaphore filledSemaphore;
+ Bool csWasInitialized;
+ Bool csWasEntered;
+ CCriticalSection cs;
+ UInt32 numProcessedBlocks;
+} CMtSync;
+
+typedef UInt32 * (*Mf_Mix_Matches)(void *p, UInt32 matchMinPos, UInt32 *distances);
+
+/* kMtCacheLineDummy must be >= size_of_CPU_cache_line */
+#define kMtCacheLineDummy 128
+
+typedef void (*Mf_GetHeads)(const Byte *buffer, UInt32 pos,
+ UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc);
+
+typedef struct _CMatchFinderMt
+{
+ /* LZ */
+ const Byte *pointerToCurPos;
+ UInt32 *btBuf;
+ UInt32 btBufPos;
+ UInt32 btBufPosLimit;
+ UInt32 lzPos;
+ UInt32 btNumAvailBytes;
+
+ UInt32 *hash;
+ UInt32 fixedHashSize;
+ UInt32 historySize;
+ const UInt32 *crc;
+
+ Mf_Mix_Matches MixMatchesFunc;
+
+ /* LZ + BT */
+ CMtSync btSync;
+ Byte btDummy[kMtCacheLineDummy];
+
+ /* BT */
+ UInt32 *hashBuf;
+ UInt32 hashBufPos;
+ UInt32 hashBufPosLimit;
+ UInt32 hashNumAvail;
+
+ CLzRef *son;
+ UInt32 matchMaxLen;
+ UInt32 numHashBytes;
+ UInt32 pos;
+ Byte *buffer;
+ UInt32 cyclicBufferPos;
+ UInt32 cyclicBufferSize; /* it must be historySize + 1 */
+ UInt32 cutValue;
+
+ /* BT + Hash */
+ CMtSync hashSync;
+ /* Byte hashDummy[kMtCacheLineDummy]; */
+
+ /* Hash */
+ Mf_GetHeads GetHeadsFunc;
+ CMatchFinder *MatchFinder;
+} CMatchFinderMt;
+
+void MatchFinderMt_Construct(CMatchFinderMt *p);
+void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAlloc *alloc);
+SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore,
+ UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAlloc *alloc);
+void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder *vTable);
+void MatchFinderMt_ReleaseStream(CMatchFinderMt *p);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/LzHash.h b/src/libs/7zip/unix/C/LzHash.h
new file mode 100644
index 000000000..f3e89966c
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzHash.h
@@ -0,0 +1,54 @@
+/* LzHash.h -- HASH functions for LZ algorithms
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZ_HASH_H
+#define __LZ_HASH_H
+
+#define kHash2Size (1 << 10)
+#define kHash3Size (1 << 16)
+#define kHash4Size (1 << 20)
+
+#define kFix3HashSize (kHash2Size)
+#define kFix4HashSize (kHash2Size + kHash3Size)
+#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
+
+#define HASH2_CALC hashValue = cur[0] | ((UInt32)cur[1] << 8);
+
+#define HASH3_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ hash2Value = temp & (kHash2Size - 1); \
+ hashValue = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
+
+#define HASH4_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ hash2Value = temp & (kHash2Size - 1); \
+ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
+ hashValue = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
+
+#define HASH5_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ hash2Value = temp & (kHash2Size - 1); \
+ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
+ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)); \
+ hashValue = (hash4Value ^ (p->crc[cur[4]] << 3)) & p->hashMask; \
+ hash4Value &= (kHash4Size - 1); }
+
+/* #define HASH_ZIP_CALC hashValue = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
+#define HASH_ZIP_CALC hashValue = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
+
+
+#define MT_HASH2_CALC \
+ hash2Value = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
+
+#define MT_HASH3_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ hash2Value = temp & (kHash2Size - 1); \
+ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
+
+#define MT_HASH4_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ hash2Value = temp & (kHash2Size - 1); \
+ hash3Value = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); \
+ hash4Value = (temp ^ ((UInt32)cur[2] << 8) ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
+
+#endif
diff --git a/src/libs/7zip/unix/C/Lzma2Dec.c b/src/libs/7zip/unix/C/Lzma2Dec.c
new file mode 100644
index 000000000..7ea1cc953
--- /dev/null
+++ b/src/libs/7zip/unix/C/Lzma2Dec.c
@@ -0,0 +1,356 @@
+/* Lzma2Dec.c -- LZMA2 Decoder
+2009-05-03 : Igor Pavlov : Public domain */
+
+/* #define SHOW_DEBUG_INFO */
+
+#ifdef SHOW_DEBUG_INFO
+#include <stdio.h>
+#endif
+
+#include <string.h>
+
+#include "Lzma2Dec.h"
+
+/*
+00000000 - EOS
+00000001 U U - Uncompressed Reset Dic
+00000010 U U - Uncompressed No Reset
+100uuuuu U U P P - LZMA no reset
+101uuuuu U U P P - LZMA reset state
+110uuuuu U U P P S - LZMA reset state + new prop
+111uuuuu U U P P S - LZMA reset state + new prop + reset dic
+
+ u, U - Unpack Size
+ P - Pack Size
+ S - Props
+*/
+
+#define LZMA2_CONTROL_LZMA (1 << 7)
+#define LZMA2_CONTROL_COPY_NO_RESET 2
+#define LZMA2_CONTROL_COPY_RESET_DIC 1
+#define LZMA2_CONTROL_EOF 0
+
+#define LZMA2_IS_UNCOMPRESSED_STATE(p) (((p)->control & LZMA2_CONTROL_LZMA) == 0)
+
+#define LZMA2_GET_LZMA_MODE(p) (((p)->control >> 5) & 3)
+#define LZMA2_IS_THERE_PROP(mode) ((mode) >= 2)
+
+#define LZMA2_LCLP_MAX 4
+#define LZMA2_DIC_SIZE_FROM_PROP(p) (((UInt32)2 | ((p) & 1)) << ((p) / 2 + 11))
+
+#ifdef SHOW_DEBUG_INFO
+#define PRF(x) x
+#else
+#define PRF(x)
+#endif
+
+typedef enum
+{
+ LZMA2_STATE_CONTROL,
+ LZMA2_STATE_UNPACK0,
+ LZMA2_STATE_UNPACK1,
+ LZMA2_STATE_PACK0,
+ LZMA2_STATE_PACK1,
+ LZMA2_STATE_PROP,
+ LZMA2_STATE_DATA,
+ LZMA2_STATE_DATA_CONT,
+ LZMA2_STATE_FINISHED,
+ LZMA2_STATE_ERROR
+} ELzma2State;
+
+static SRes Lzma2Dec_GetOldProps(Byte prop, Byte *props)
+{
+ UInt32 dicSize;
+ if (prop > 40)
+ return SZ_ERROR_UNSUPPORTED;
+ dicSize = (prop == 40) ? 0xFFFFFFFF : LZMA2_DIC_SIZE_FROM_PROP(prop);
+ props[0] = (Byte)LZMA2_LCLP_MAX;
+ props[1] = (Byte)(dicSize);
+ props[2] = (Byte)(dicSize >> 8);
+ props[3] = (Byte)(dicSize >> 16);
+ props[4] = (Byte)(dicSize >> 24);
+ return SZ_OK;
+}
+
+SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAlloc *alloc)
+{
+ Byte props[LZMA_PROPS_SIZE];
+ RINOK(Lzma2Dec_GetOldProps(prop, props));
+ return LzmaDec_AllocateProbs(&p->decoder, props, LZMA_PROPS_SIZE, alloc);
+}
+
+SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAlloc *alloc)
+{
+ Byte props[LZMA_PROPS_SIZE];
+ RINOK(Lzma2Dec_GetOldProps(prop, props));
+ return LzmaDec_Allocate(&p->decoder, props, LZMA_PROPS_SIZE, alloc);
+}
+
+void Lzma2Dec_Init(CLzma2Dec *p)
+{
+ p->state = LZMA2_STATE_CONTROL;
+ p->needInitDic = True;
+ p->needInitState = True;
+ p->needInitProp = True;
+ LzmaDec_Init(&p->decoder);
+}
+
+static ELzma2State Lzma2Dec_UpdateState(CLzma2Dec *p, Byte b)
+{
+ switch(p->state)
+ {
+ case LZMA2_STATE_CONTROL:
+ p->control = b;
+ PRF(printf("\n %4X ", p->decoder.dicPos));
+ PRF(printf(" %2X", b));
+ if (p->control == 0)
+ return LZMA2_STATE_FINISHED;
+ if (LZMA2_IS_UNCOMPRESSED_STATE(p))
+ {
+ if ((p->control & 0x7F) > 2)
+ return LZMA2_STATE_ERROR;
+ p->unpackSize = 0;
+ }
+ else
+ p->unpackSize = (UInt32)(p->control & 0x1F) << 16;
+ return LZMA2_STATE_UNPACK0;
+
+ case LZMA2_STATE_UNPACK0:
+ p->unpackSize |= (UInt32)b << 8;
+ return LZMA2_STATE_UNPACK1;
+
+ case LZMA2_STATE_UNPACK1:
+ p->unpackSize |= (UInt32)b;
+ p->unpackSize++;
+ PRF(printf(" %8d", p->unpackSize));
+ return (LZMA2_IS_UNCOMPRESSED_STATE(p)) ? LZMA2_STATE_DATA : LZMA2_STATE_PACK0;
+
+ case LZMA2_STATE_PACK0:
+ p->packSize = (UInt32)b << 8;
+ return LZMA2_STATE_PACK1;
+
+ case LZMA2_STATE_PACK1:
+ p->packSize |= (UInt32)b;
+ p->packSize++;
+ PRF(printf(" %8d", p->packSize));
+ return LZMA2_IS_THERE_PROP(LZMA2_GET_LZMA_MODE(p)) ? LZMA2_STATE_PROP:
+ (p->needInitProp ? LZMA2_STATE_ERROR : LZMA2_STATE_DATA);
+
+ case LZMA2_STATE_PROP:
+ {
+ int lc, lp;
+ if (b >= (9 * 5 * 5))
+ return LZMA2_STATE_ERROR;
+ lc = b % 9;
+ b /= 9;
+ p->decoder.prop.pb = b / 5;
+ lp = b % 5;
+ if (lc + lp > LZMA2_LCLP_MAX)
+ return LZMA2_STATE_ERROR;
+ p->decoder.prop.lc = lc;
+ p->decoder.prop.lp = lp;
+ p->needInitProp = False;
+ return LZMA2_STATE_DATA;
+ }
+ }
+ return LZMA2_STATE_ERROR;
+}
+
+static void LzmaDec_UpdateWithUncompressed(CLzmaDec *p, const Byte *src, SizeT size)
+{
+ memcpy(p->dic + p->dicPos, src, size);
+ p->dicPos += size;
+ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= size)
+ p->checkDicSize = p->prop.dicSize;
+ p->processedPos += (UInt32)size;
+}
+
+void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState);
+
+SRes Lzma2Dec_DecodeToDic(CLzma2Dec *p, SizeT dicLimit,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+ SizeT inSize = *srcLen;
+ *srcLen = 0;
+ *status = LZMA_STATUS_NOT_SPECIFIED;
+
+ while (p->state != LZMA2_STATE_FINISHED)
+ {
+ SizeT dicPos = p->decoder.dicPos;
+ if (p->state == LZMA2_STATE_ERROR)
+ return SZ_ERROR_DATA;
+ if (dicPos == dicLimit && finishMode == LZMA_FINISH_ANY)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_OK;
+ }
+ if (p->state != LZMA2_STATE_DATA && p->state != LZMA2_STATE_DATA_CONT)
+ {
+ if (*srcLen == inSize)
+ {
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+ (*srcLen)++;
+ p->state = Lzma2Dec_UpdateState(p, *src++);
+ continue;
+ }
+ {
+ SizeT destSizeCur = dicLimit - dicPos;
+ SizeT srcSizeCur = inSize - *srcLen;
+ ELzmaFinishMode curFinishMode = LZMA_FINISH_ANY;
+
+ if (p->unpackSize <= destSizeCur)
+ {
+ destSizeCur = (SizeT)p->unpackSize;
+ curFinishMode = LZMA_FINISH_END;
+ }
+
+ if (LZMA2_IS_UNCOMPRESSED_STATE(p))
+ {
+ if (*srcLen == inSize)
+ {
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+
+ if (p->state == LZMA2_STATE_DATA)
+ {
+ Bool initDic = (p->control == LZMA2_CONTROL_COPY_RESET_DIC);
+ if (initDic)
+ p->needInitProp = p->needInitState = True;
+ else if (p->needInitDic)
+ return SZ_ERROR_DATA;
+ p->needInitDic = False;
+ LzmaDec_InitDicAndState(&p->decoder, initDic, False);
+ }
+
+ if (srcSizeCur > destSizeCur)
+ srcSizeCur = destSizeCur;
+
+ if (srcSizeCur == 0)
+ return SZ_ERROR_DATA;
+
+ LzmaDec_UpdateWithUncompressed(&p->decoder, src, srcSizeCur);
+
+ src += srcSizeCur;
+ *srcLen += srcSizeCur;
+ p->unpackSize -= (UInt32)srcSizeCur;
+ p->state = (p->unpackSize == 0) ? LZMA2_STATE_CONTROL : LZMA2_STATE_DATA_CONT;
+ }
+ else
+ {
+ SizeT outSizeProcessed;
+ SRes res;
+
+ if (p->state == LZMA2_STATE_DATA)
+ {
+ int mode = LZMA2_GET_LZMA_MODE(p);
+ Bool initDic = (mode == 3);
+ Bool initState = (mode > 0);
+ if ((!initDic && p->needInitDic) || (!initState && p->needInitState))
+ return SZ_ERROR_DATA;
+
+ LzmaDec_InitDicAndState(&p->decoder, initDic, initState);
+ p->needInitDic = False;
+ p->needInitState = False;
+ p->state = LZMA2_STATE_DATA_CONT;
+ }
+ if (srcSizeCur > p->packSize)
+ srcSizeCur = (SizeT)p->packSize;
+
+ res = LzmaDec_DecodeToDic(&p->decoder, dicPos + destSizeCur, src, &srcSizeCur, curFinishMode, status);
+
+ src += srcSizeCur;
+ *srcLen += srcSizeCur;
+ p->packSize -= (UInt32)srcSizeCur;
+
+ outSizeProcessed = p->decoder.dicPos - dicPos;
+ p->unpackSize -= (UInt32)outSizeProcessed;
+
+ RINOK(res);
+ if (*status == LZMA_STATUS_NEEDS_MORE_INPUT)
+ return res;
+
+ if (srcSizeCur == 0 && outSizeProcessed == 0)
+ {
+ if (*status != LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK ||
+ p->unpackSize != 0 || p->packSize != 0)
+ return SZ_ERROR_DATA;
+ p->state = LZMA2_STATE_CONTROL;
+ }
+ if (*status == LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK)
+ *status = LZMA_STATUS_NOT_FINISHED;
+ }
+ }
+ }
+ *status = LZMA_STATUS_FINISHED_WITH_MARK;
+ return SZ_OK;
+}
+
+SRes Lzma2Dec_DecodeToBuf(CLzma2Dec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+ SizeT outSize = *destLen, inSize = *srcLen;
+ *srcLen = *destLen = 0;
+ for (;;)
+ {
+ SizeT srcSizeCur = inSize, outSizeCur, dicPos;
+ ELzmaFinishMode curFinishMode;
+ SRes res;
+ if (p->decoder.dicPos == p->decoder.dicBufSize)
+ p->decoder.dicPos = 0;
+ dicPos = p->decoder.dicPos;
+ if (outSize > p->decoder.dicBufSize - dicPos)
+ {
+ outSizeCur = p->decoder.dicBufSize;
+ curFinishMode = LZMA_FINISH_ANY;
+ }
+ else
+ {
+ outSizeCur = dicPos + outSize;
+ curFinishMode = finishMode;
+ }
+
+ res = Lzma2Dec_DecodeToDic(p, outSizeCur, src, &srcSizeCur, curFinishMode, status);
+ src += srcSizeCur;
+ inSize -= srcSizeCur;
+ *srcLen += srcSizeCur;
+ outSizeCur = p->decoder.dicPos - dicPos;
+ memcpy(dest, p->decoder.dic + dicPos, outSizeCur);
+ dest += outSizeCur;
+ outSize -= outSizeCur;
+ *destLen += outSizeCur;
+ if (res != 0)
+ return res;
+ if (outSizeCur == 0 || outSize == 0)
+ return SZ_OK;
+ }
+}
+
+SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ Byte prop, ELzmaFinishMode finishMode, ELzmaStatus *status, ISzAlloc *alloc)
+{
+ CLzma2Dec decoder;
+ SRes res;
+ SizeT outSize = *destLen, inSize = *srcLen;
+ Byte props[LZMA_PROPS_SIZE];
+
+ Lzma2Dec_Construct(&decoder);
+
+ *destLen = *srcLen = 0;
+ *status = LZMA_STATUS_NOT_SPECIFIED;
+ decoder.decoder.dic = dest;
+ decoder.decoder.dicBufSize = outSize;
+
+ RINOK(Lzma2Dec_GetOldProps(prop, props));
+ RINOK(LzmaDec_AllocateProbs(&decoder.decoder, props, LZMA_PROPS_SIZE, alloc));
+
+ *srcLen = inSize;
+ res = Lzma2Dec_DecodeToDic(&decoder, outSize, src, srcLen, finishMode, status);
+ *destLen = decoder.decoder.dicPos;
+ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
+ res = SZ_ERROR_INPUT_EOF;
+
+ LzmaDec_FreeProbs(&decoder.decoder, alloc);
+ return res;
+}
diff --git a/src/libs/7zip/unix/C/Lzma2Dec.h b/src/libs/7zip/unix/C/Lzma2Dec.h
new file mode 100644
index 000000000..6bc07bbc1
--- /dev/null
+++ b/src/libs/7zip/unix/C/Lzma2Dec.h
@@ -0,0 +1,84 @@
+/* Lzma2Dec.h -- LZMA2 Decoder
+2009-05-03 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA2_DEC_H
+#define __LZMA2_DEC_H
+
+#include "LzmaDec.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---------- State Interface ---------- */
+
+typedef struct
+{
+ CLzmaDec decoder;
+ UInt32 packSize;
+ UInt32 unpackSize;
+ int state;
+ Byte control;
+ Bool needInitDic;
+ Bool needInitState;
+ Bool needInitProp;
+} CLzma2Dec;
+
+#define Lzma2Dec_Construct(p) LzmaDec_Construct(&(p)->decoder)
+#define Lzma2Dec_FreeProbs(p, alloc) LzmaDec_FreeProbs(&(p)->decoder, alloc);
+#define Lzma2Dec_Free(p, alloc) LzmaDec_Free(&(p)->decoder, alloc);
+
+SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAlloc *alloc);
+SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAlloc *alloc);
+void Lzma2Dec_Init(CLzma2Dec *p);
+
+
+/*
+finishMode:
+ It has meaning only if the decoding reaches output limit (*destLen or dicLimit).
+ LZMA_FINISH_ANY - use smallest number of input bytes
+ LZMA_FINISH_END - read EndOfStream marker after decoding
+
+Returns:
+ SZ_OK
+ status:
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED
+ LZMA_STATUS_NEEDS_MORE_INPUT
+ SZ_ERROR_DATA - Data error
+*/
+
+SRes Lzma2Dec_DecodeToDic(CLzma2Dec *p, SizeT dicLimit,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+
+SRes Lzma2Dec_DecodeToBuf(CLzma2Dec *p, Byte *dest, SizeT *destLen,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+
+
+/* ---------- One Call Interface ---------- */
+
+/*
+finishMode:
+ It has meaning only if the decoding reaches output limit (*destLen).
+ LZMA_FINISH_ANY - use smallest number of input bytes
+ LZMA_FINISH_END - read EndOfStream marker after decoding
+
+Returns:
+ SZ_OK
+ status:
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED
+ SZ_ERROR_DATA - Data error
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
+*/
+
+SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ Byte prop, ELzmaFinishMode finishMode, ELzmaStatus *status, ISzAlloc *alloc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/Lzma2Enc.c b/src/libs/7zip/unix/C/Lzma2Enc.c
new file mode 100644
index 000000000..e97597f63
--- /dev/null
+++ b/src/libs/7zip/unix/C/Lzma2Enc.c
@@ -0,0 +1,477 @@
+/* Lzma2Enc.c -- LZMA2 Encoder
+2010-09-24 : Igor Pavlov : Public domain */
+
+/* #include <stdio.h> */
+#include <string.h>
+
+/* #define _7ZIP_ST */
+
+#include "Lzma2Enc.h"
+
+#ifndef _7ZIP_ST
+#include "MtCoder.h"
+#else
+#define NUM_MT_CODER_THREADS_MAX 1
+#endif
+
+#define LZMA2_CONTROL_LZMA (1 << 7)
+#define LZMA2_CONTROL_COPY_NO_RESET 2
+#define LZMA2_CONTROL_COPY_RESET_DIC 1
+#define LZMA2_CONTROL_EOF 0
+
+#define LZMA2_LCLP_MAX 4
+
+#define LZMA2_DIC_SIZE_FROM_PROP(p) (((UInt32)2 | ((p) & 1)) << ((p) / 2 + 11))
+
+#define LZMA2_PACK_SIZE_MAX (1 << 16)
+#define LZMA2_COPY_CHUNK_SIZE LZMA2_PACK_SIZE_MAX
+#define LZMA2_UNPACK_SIZE_MAX (1 << 21)
+#define LZMA2_KEEP_WINDOW_SIZE LZMA2_UNPACK_SIZE_MAX
+
+#define LZMA2_CHUNK_SIZE_COMPRESSED_MAX ((1 << 16) + 16)
+
+
+#define PRF(x) /* x */
+
+/* ---------- CLzma2EncInt ---------- */
+
+typedef struct
+{
+ CLzmaEncHandle enc;
+ UInt64 srcPos;
+ Byte props;
+ Bool needInitState;
+ Bool needInitProp;
+} CLzma2EncInt;
+
+static SRes Lzma2EncInt_Init(CLzma2EncInt *p, const CLzma2EncProps *props)
+{
+ Byte propsEncoded[LZMA_PROPS_SIZE];
+ SizeT propsSize = LZMA_PROPS_SIZE;
+ RINOK(LzmaEnc_SetProps(p->enc, &props->lzmaProps));
+ RINOK(LzmaEnc_WriteProperties(p->enc, propsEncoded, &propsSize));
+ p->srcPos = 0;
+ p->props = propsEncoded[0];
+ p->needInitState = True;
+ p->needInitProp = True;
+ return SZ_OK;
+}
+
+SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, ISeqInStream *inStream, UInt32 keepWindowSize,
+ ISzAlloc *alloc, ISzAlloc *allocBig);
+SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
+ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig);
+SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
+ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize);
+const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp);
+void LzmaEnc_Finish(CLzmaEncHandle pp);
+void LzmaEnc_SaveState(CLzmaEncHandle pp);
+void LzmaEnc_RestoreState(CLzmaEncHandle pp);
+
+
+static SRes Lzma2EncInt_EncodeSubblock(CLzma2EncInt *p, Byte *outBuf,
+ size_t *packSizeRes, ISeqOutStream *outStream)
+{
+ size_t packSizeLimit = *packSizeRes;
+ size_t packSize = packSizeLimit;
+ UInt32 unpackSize = LZMA2_UNPACK_SIZE_MAX;
+ unsigned lzHeaderSize = 5 + (p->needInitProp ? 1 : 0);
+ Bool useCopyBlock;
+ SRes res;
+
+ *packSizeRes = 0;
+ if (packSize < lzHeaderSize)
+ return SZ_ERROR_OUTPUT_EOF;
+ packSize -= lzHeaderSize;
+
+ LzmaEnc_SaveState(p->enc);
+ res = LzmaEnc_CodeOneMemBlock(p->enc, p->needInitState,
+ outBuf + lzHeaderSize, &packSize, LZMA2_PACK_SIZE_MAX, &unpackSize);
+
+ PRF(printf("\npackSize = %7d unpackSize = %7d ", packSize, unpackSize));
+
+ if (unpackSize == 0)
+ return res;
+
+ if (res == SZ_OK)
+ useCopyBlock = (packSize + 2 >= unpackSize || packSize > (1 << 16));
+ else
+ {
+ if (res != SZ_ERROR_OUTPUT_EOF)
+ return res;
+ res = SZ_OK;
+ useCopyBlock = True;
+ }
+
+ if (useCopyBlock)
+ {
+ size_t destPos = 0;
+ PRF(printf("################# COPY "));
+ while (unpackSize > 0)
+ {
+ UInt32 u = (unpackSize < LZMA2_COPY_CHUNK_SIZE) ? unpackSize : LZMA2_COPY_CHUNK_SIZE;
+ if (packSizeLimit - destPos < u + 3)
+ return SZ_ERROR_OUTPUT_EOF;
+ outBuf[destPos++] = (Byte)(p->srcPos == 0 ? LZMA2_CONTROL_COPY_RESET_DIC : LZMA2_CONTROL_COPY_NO_RESET);
+ outBuf[destPos++] = (Byte)((u - 1) >> 8);
+ outBuf[destPos++] = (Byte)(u - 1);
+ memcpy(outBuf + destPos, LzmaEnc_GetCurBuf(p->enc) - unpackSize, u);
+ unpackSize -= u;
+ destPos += u;
+ p->srcPos += u;
+ if (outStream)
+ {
+ *packSizeRes += destPos;
+ if (outStream->Write(outStream, outBuf, destPos) != destPos)
+ return SZ_ERROR_WRITE;
+ destPos = 0;
+ }
+ else
+ *packSizeRes = destPos;
+ /* needInitState = True; */
+ }
+ LzmaEnc_RestoreState(p->enc);
+ return SZ_OK;
+ }
+ {
+ size_t destPos = 0;
+ UInt32 u = unpackSize - 1;
+ UInt32 pm = (UInt32)(packSize - 1);
+ unsigned mode = (p->srcPos == 0) ? 3 : (p->needInitState ? (p->needInitProp ? 2 : 1) : 0);
+
+ PRF(printf(" "));
+
+ outBuf[destPos++] = (Byte)(LZMA2_CONTROL_LZMA | (mode << 5) | ((u >> 16) & 0x1F));
+ outBuf[destPos++] = (Byte)(u >> 8);
+ outBuf[destPos++] = (Byte)u;
+ outBuf[destPos++] = (Byte)(pm >> 8);
+ outBuf[destPos++] = (Byte)pm;
+
+ if (p->needInitProp)
+ outBuf[destPos++] = p->props;
+
+ p->needInitProp = False;
+ p->needInitState = False;
+ destPos += packSize;
+ p->srcPos += unpackSize;
+
+ if (outStream)
+ if (outStream->Write(outStream, outBuf, destPos) != destPos)
+ return SZ_ERROR_WRITE;
+ *packSizeRes = destPos;
+ return SZ_OK;
+ }
+}
+
+/* ---------- Lzma2 Props ---------- */
+
+void Lzma2EncProps_Init(CLzma2EncProps *p)
+{
+ LzmaEncProps_Init(&p->lzmaProps);
+ p->numTotalThreads = -1;
+ p->numBlockThreads = -1;
+ p->blockSize = 0;
+}
+
+void Lzma2EncProps_Normalize(CLzma2EncProps *p)
+{
+ int t1, t1n, t2, t3;
+ {
+ CLzmaEncProps lzmaProps = p->lzmaProps;
+ LzmaEncProps_Normalize(&lzmaProps);
+ t1n = lzmaProps.numThreads;
+ }
+
+ t1 = p->lzmaProps.numThreads;
+ t2 = p->numBlockThreads;
+ t3 = p->numTotalThreads;
+
+ if (t2 > NUM_MT_CODER_THREADS_MAX)
+ t2 = NUM_MT_CODER_THREADS_MAX;
+
+ if (t3 <= 0)
+ {
+ if (t2 <= 0)
+ t2 = 1;
+ t3 = t1n * t2;
+ }
+ else if (t2 <= 0)
+ {
+ t2 = t3 / t1n;
+ if (t2 == 0)
+ {
+ t1 = 1;
+ t2 = t3;
+ }
+ if (t2 > NUM_MT_CODER_THREADS_MAX)
+ t2 = NUM_MT_CODER_THREADS_MAX;
+ }
+ else if (t1 <= 0)
+ {
+ t1 = t3 / t2;
+ if (t1 == 0)
+ t1 = 1;
+ }
+ else
+ t3 = t1n * t2;
+
+ p->lzmaProps.numThreads = t1;
+ p->numBlockThreads = t2;
+ p->numTotalThreads = t3;
+ LzmaEncProps_Normalize(&p->lzmaProps);
+
+ if (p->blockSize == 0)
+ {
+ UInt32 dictSize = p->lzmaProps.dictSize;
+ UInt64 blockSize = (UInt64)dictSize << 2;
+ const UInt32 kMinSize = (UInt32)1 << 20;
+ const UInt32 kMaxSize = (UInt32)1 << 28;
+ if (blockSize < kMinSize) blockSize = kMinSize;
+ if (blockSize > kMaxSize) blockSize = kMaxSize;
+ if (blockSize < dictSize) blockSize = dictSize;
+ p->blockSize = (size_t)blockSize;
+ }
+}
+
+static SRes Progress(ICompressProgress *p, UInt64 inSize, UInt64 outSize)
+{
+ return (p && p->Progress(p, inSize, outSize) != SZ_OK) ? SZ_ERROR_PROGRESS : SZ_OK;
+}
+
+/* ---------- Lzma2 ---------- */
+
+typedef struct
+{
+ Byte propEncoded;
+ CLzma2EncProps props;
+
+ Byte *outBuf;
+
+ ISzAlloc *alloc;
+ ISzAlloc *allocBig;
+
+ CLzma2EncInt coders[NUM_MT_CODER_THREADS_MAX];
+
+ #ifndef _7ZIP_ST
+ CMtCoder mtCoder;
+ #endif
+
+} CLzma2Enc;
+
+
+/* ---------- Lzma2EncThread ---------- */
+
+static SRes Lzma2Enc_EncodeMt1(CLzma2EncInt *p, CLzma2Enc *mainEncoder,
+ ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress)
+{
+ UInt64 packTotal = 0;
+ SRes res = SZ_OK;
+
+ if (mainEncoder->outBuf == 0)
+ {
+ mainEncoder->outBuf = (Byte *)IAlloc_Alloc(mainEncoder->alloc, LZMA2_CHUNK_SIZE_COMPRESSED_MAX);
+ if (mainEncoder->outBuf == 0)
+ return SZ_ERROR_MEM;
+ }
+ RINOK(Lzma2EncInt_Init(p, &mainEncoder->props));
+ RINOK(LzmaEnc_PrepareForLzma2(p->enc, inStream, LZMA2_KEEP_WINDOW_SIZE,
+ mainEncoder->alloc, mainEncoder->allocBig));
+ for (;;)
+ {
+ size_t packSize = LZMA2_CHUNK_SIZE_COMPRESSED_MAX;
+ res = Lzma2EncInt_EncodeSubblock(p, mainEncoder->outBuf, &packSize, outStream);
+ if (res != SZ_OK)
+ break;
+ packTotal += packSize;
+ res = Progress(progress, p->srcPos, packTotal);
+ if (res != SZ_OK)
+ break;
+ if (packSize == 0)
+ break;
+ }
+ LzmaEnc_Finish(p->enc);
+ if (res == SZ_OK)
+ {
+ Byte b = 0;
+ if (outStream->Write(outStream, &b, 1) != 1)
+ return SZ_ERROR_WRITE;
+ }
+ return res;
+}
+
+#ifndef _7ZIP_ST
+
+typedef struct
+{
+ IMtCoderCallback funcTable;
+ CLzma2Enc *lzma2Enc;
+} CMtCallbackImp;
+
+static SRes MtCallbackImp_Code(void *pp, unsigned index, Byte *dest, size_t *destSize,
+ const Byte *src, size_t srcSize, int finished)
+{
+ CMtCallbackImp *imp = (CMtCallbackImp *)pp;
+ CLzma2Enc *mainEncoder = imp->lzma2Enc;
+ CLzma2EncInt *p = &mainEncoder->coders[index];
+
+ SRes res = SZ_OK;
+ {
+ size_t destLim = *destSize;
+ *destSize = 0;
+
+ if (srcSize != 0)
+ {
+ RINOK(Lzma2EncInt_Init(p, &mainEncoder->props));
+
+ RINOK(LzmaEnc_MemPrepare(p->enc, src, srcSize, LZMA2_KEEP_WINDOW_SIZE,
+ mainEncoder->alloc, mainEncoder->allocBig));
+
+ while (p->srcPos < srcSize)
+ {
+ size_t packSize = destLim - *destSize;
+ res = Lzma2EncInt_EncodeSubblock(p, dest + *destSize, &packSize, NULL);
+ if (res != SZ_OK)
+ break;
+ *destSize += packSize;
+
+ if (packSize == 0)
+ {
+ res = SZ_ERROR_FAIL;
+ break;
+ }
+
+ if (MtProgress_Set(&mainEncoder->mtCoder.mtProgress, index, p->srcPos, *destSize) != SZ_OK)
+ {
+ res = SZ_ERROR_PROGRESS;
+ break;
+ }
+ }
+ LzmaEnc_Finish(p->enc);
+ if (res != SZ_OK)
+ return res;
+ }
+ if (finished)
+ {
+ if (*destSize == destLim)
+ return SZ_ERROR_OUTPUT_EOF;
+ dest[(*destSize)++] = 0;
+ }
+ }
+ return res;
+}
+
+#endif
+
+/* ---------- Lzma2Enc ---------- */
+
+CLzma2EncHandle Lzma2Enc_Create(ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ CLzma2Enc *p = (CLzma2Enc *)alloc->Alloc(alloc, sizeof(CLzma2Enc));
+ if (p == 0)
+ return NULL;
+ Lzma2EncProps_Init(&p->props);
+ Lzma2EncProps_Normalize(&p->props);
+ p->outBuf = 0;
+ p->alloc = alloc;
+ p->allocBig = allocBig;
+ {
+ unsigned i;
+ for (i = 0; i < NUM_MT_CODER_THREADS_MAX; i++)
+ p->coders[i].enc = 0;
+ }
+ #ifndef _7ZIP_ST
+ MtCoder_Construct(&p->mtCoder);
+ #endif
+
+ return p;
+}
+
+void Lzma2Enc_Destroy(CLzma2EncHandle pp)
+{
+ CLzma2Enc *p = (CLzma2Enc *)pp;
+ unsigned i;
+ for (i = 0; i < NUM_MT_CODER_THREADS_MAX; i++)
+ {
+ CLzma2EncInt *t = &p->coders[i];
+ if (t->enc)
+ {
+ LzmaEnc_Destroy(t->enc, p->alloc, p->allocBig);
+ t->enc = 0;
+ }
+ }
+
+ #ifndef _7ZIP_ST
+ MtCoder_Destruct(&p->mtCoder);
+ #endif
+
+ IAlloc_Free(p->alloc, p->outBuf);
+ IAlloc_Free(p->alloc, pp);
+}
+
+SRes Lzma2Enc_SetProps(CLzma2EncHandle pp, const CLzma2EncProps *props)
+{
+ CLzma2Enc *p = (CLzma2Enc *)pp;
+ CLzmaEncProps lzmaProps = props->lzmaProps;
+ LzmaEncProps_Normalize(&lzmaProps);
+ if (lzmaProps.lc + lzmaProps.lp > LZMA2_LCLP_MAX)
+ return SZ_ERROR_PARAM;
+ p->props = *props;
+ Lzma2EncProps_Normalize(&p->props);
+ return SZ_OK;
+}
+
+Byte Lzma2Enc_WriteProperties(CLzma2EncHandle pp)
+{
+ CLzma2Enc *p = (CLzma2Enc *)pp;
+ unsigned i;
+ UInt32 dicSize = LzmaEncProps_GetDictSize(&p->props.lzmaProps);
+ for (i = 0; i < 40; i++)
+ if (dicSize <= LZMA2_DIC_SIZE_FROM_PROP(i))
+ break;
+ return (Byte)i;
+}
+
+SRes Lzma2Enc_Encode(CLzma2EncHandle pp,
+ ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress)
+{
+ CLzma2Enc *p = (CLzma2Enc *)pp;
+ int i;
+
+ for (i = 0; i < p->props.numBlockThreads; i++)
+ {
+ CLzma2EncInt *t = &p->coders[i];
+ if (t->enc == NULL)
+ {
+ t->enc = LzmaEnc_Create(p->alloc);
+ if (t->enc == NULL)
+ return SZ_ERROR_MEM;
+ }
+ }
+
+ #ifndef _7ZIP_ST
+ if (p->props.numBlockThreads <= 1)
+ #endif
+ return Lzma2Enc_EncodeMt1(&p->coders[0], p, outStream, inStream, progress);
+
+ #ifndef _7ZIP_ST
+
+ {
+ CMtCallbackImp mtCallback;
+
+ mtCallback.funcTable.Code = MtCallbackImp_Code;
+ mtCallback.lzma2Enc = p;
+
+ p->mtCoder.progress = progress;
+ p->mtCoder.inStream = inStream;
+ p->mtCoder.outStream = outStream;
+ p->mtCoder.alloc = p->alloc;
+ p->mtCoder.mtCallback = &mtCallback.funcTable;
+
+ p->mtCoder.blockSize = p->props.blockSize;
+ p->mtCoder.destBlockSize = p->props.blockSize + (p->props.blockSize >> 10) + 16;
+ p->mtCoder.numThreads = p->props.numBlockThreads;
+
+ return MtCoder_Code(&p->mtCoder);
+ }
+ #endif
+}
diff --git a/src/libs/7zip/unix/C/Lzma2Enc.h b/src/libs/7zip/unix/C/Lzma2Enc.h
new file mode 100644
index 000000000..283525581
--- /dev/null
+++ b/src/libs/7zip/unix/C/Lzma2Enc.h
@@ -0,0 +1,66 @@
+/* Lzma2Enc.h -- LZMA2 Encoder
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA2_ENC_H
+#define __LZMA2_ENC_H
+
+#include "LzmaEnc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct
+{
+ CLzmaEncProps lzmaProps;
+ size_t blockSize;
+ int numBlockThreads;
+ int numTotalThreads;
+} CLzma2EncProps;
+
+void Lzma2EncProps_Init(CLzma2EncProps *p);
+void Lzma2EncProps_Normalize(CLzma2EncProps *p);
+
+/* ---------- CLzmaEnc2Handle Interface ---------- */
+
+/* Lzma2Enc_* functions can return the following exit codes:
+Returns:
+ SZ_OK - OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_PARAM - Incorrect paramater in props
+ SZ_ERROR_WRITE - Write callback error
+ SZ_ERROR_PROGRESS - some break from progress callback
+ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+*/
+
+typedef void * CLzma2EncHandle;
+
+CLzma2EncHandle Lzma2Enc_Create(ISzAlloc *alloc, ISzAlloc *allocBig);
+void Lzma2Enc_Destroy(CLzma2EncHandle p);
+SRes Lzma2Enc_SetProps(CLzma2EncHandle p, const CLzma2EncProps *props);
+Byte Lzma2Enc_WriteProperties(CLzma2EncHandle p);
+SRes Lzma2Enc_Encode(CLzma2EncHandle p,
+ ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress);
+
+/* ---------- One Call Interface ---------- */
+
+/* Lzma2Encode
+Return code:
+ SZ_OK - OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_PARAM - Incorrect paramater
+ SZ_ERROR_OUTPUT_EOF - output buffer overflow
+ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+*/
+
+/*
+SRes Lzma2Encode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ const CLzmaEncProps *props, Byte *propsEncoded, int writeEndMark,
+ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/LzmaDec.c b/src/libs/7zip/unix/C/LzmaDec.c
new file mode 100644
index 000000000..2036761bf
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzmaDec.c
@@ -0,0 +1,999 @@
+/* LzmaDec.c -- LZMA Decoder
+2009-09-20 : Igor Pavlov : Public domain */
+
+#include "LzmaDec.h"
+
+#include <string.h>
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+#define RC_INIT_SIZE 5
+
+#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
+
+#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
+#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
+#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
+#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
+ { UPDATE_0(p); i = (i + i); A0; } else \
+ { UPDATE_1(p); i = (i + i) + 1; A1; }
+#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;)
+
+#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); }
+#define TREE_DECODE(probs, limit, i) \
+ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
+
+/* #define _LZMA_SIZE_OPT */
+
+#ifdef _LZMA_SIZE_OPT
+#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
+#else
+#define TREE_6_DECODE(probs, i) \
+ { i = 1; \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ i -= 0x40; }
+#endif
+
+#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }
+
+#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * ttt; if (code < bound)
+#define UPDATE_0_CHECK range = bound;
+#define UPDATE_1_CHECK range -= bound; code -= bound;
+#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
+ { UPDATE_0_CHECK; i = (i + i); A0; } else \
+ { UPDATE_1_CHECK; i = (i + i) + 1; A1; }
+#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
+#define TREE_DECODE_CHECK(probs, limit, i) \
+ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
+
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
+
+
+#define kNumStates 12
+#define kNumLitStates 7
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LzmaProps_GetNumProbs(p) ((UInt32)LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+#define LZMA_DIC_MIN (1 << 12)
+
+/* First LZMA-symbol is always decoded.
+And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization
+Out:
+ Result:
+ SZ_OK - OK
+ SZ_ERROR_DATA - Error
+ p->remainLen:
+ < kMatchSpecLenStart : normal remain
+ = kMatchSpecLenStart : finished
+ = kMatchSpecLenStart + 1 : Flush marker
+ = kMatchSpecLenStart + 2 : State Init Marker
+*/
+
+static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
+{
+ CLzmaProb *probs = p->probs;
+
+ unsigned state = p->state;
+ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
+ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
+ unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1;
+ unsigned lc = p->prop.lc;
+
+ Byte *dic = p->dic;
+ SizeT dicBufSize = p->dicBufSize;
+ SizeT dicPos = p->dicPos;
+
+ UInt32 processedPos = p->processedPos;
+ UInt32 checkDicSize = p->checkDicSize;
+ unsigned len = 0;
+
+ const Byte *buf = p->buf;
+ UInt32 range = p->range;
+ UInt32 code = p->code;
+
+ do
+ {
+ CLzmaProb *prob;
+ UInt32 bound;
+ unsigned ttt;
+ unsigned posState = processedPos & pbMask;
+
+ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
+ IF_BIT_0(prob)
+ {
+ unsigned symbol;
+ UPDATE_0(prob);
+ prob = probs + Literal;
+ if (checkDicSize != 0 || processedPos != 0)
+ prob += (LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) +
+ (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc))));
+
+ if (state < kNumLitStates)
+ {
+ state -= (state < 4) ? state : 3;
+ symbol = 1;
+ do { GET_BIT(prob + symbol, symbol) } while (symbol < 0x100);
+ }
+ else
+ {
+ unsigned matchByte = p->dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
+ unsigned offs = 0x100;
+ state -= (state < 10) ? 3 : 6;
+ symbol = 1;
+ do
+ {
+ unsigned bit;
+ CLzmaProb *probLit;
+ matchByte <<= 1;
+ bit = (matchByte & offs);
+ probLit = prob + offs + bit + symbol;
+ GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit)
+ }
+ while (symbol < 0x100);
+ }
+ dic[dicPos++] = (Byte)symbol;
+ processedPos++;
+ continue;
+ }
+ else
+ {
+ UPDATE_1(prob);
+ prob = probs + IsRep + state;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ state += kNumStates;
+ prob = probs + LenCoder;
+ }
+ else
+ {
+ UPDATE_1(prob);
+ if (checkDicSize == 0 && processedPos == 0)
+ return SZ_ERROR_DATA;
+ prob = probs + IsRepG0 + state;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
+ dicPos++;
+ processedPos++;
+ state = state < kNumLitStates ? 9 : 11;
+ continue;
+ }
+ UPDATE_1(prob);
+ }
+ else
+ {
+ UInt32 distance;
+ UPDATE_1(prob);
+ prob = probs + IsRepG1 + state;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ distance = rep1;
+ }
+ else
+ {
+ UPDATE_1(prob);
+ prob = probs + IsRepG2 + state;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ distance = rep2;
+ }
+ else
+ {
+ UPDATE_1(prob);
+ distance = rep3;
+ rep3 = rep2;
+ }
+ rep2 = rep1;
+ }
+ rep1 = rep0;
+ rep0 = distance;
+ }
+ state = state < kNumLitStates ? 8 : 11;
+ prob = probs + RepLenCoder;
+ }
+ {
+ unsigned limit, offset;
+ CLzmaProb *probLen = prob + LenChoice;
+ IF_BIT_0(probLen)
+ {
+ UPDATE_0(probLen);
+ probLen = prob + LenLow + (posState << kLenNumLowBits);
+ offset = 0;
+ limit = (1 << kLenNumLowBits);
+ }
+ else
+ {
+ UPDATE_1(probLen);
+ probLen = prob + LenChoice2;
+ IF_BIT_0(probLen)
+ {
+ UPDATE_0(probLen);
+ probLen = prob + LenMid + (posState << kLenNumMidBits);
+ offset = kLenNumLowSymbols;
+ limit = (1 << kLenNumMidBits);
+ }
+ else
+ {
+ UPDATE_1(probLen);
+ probLen = prob + LenHigh;
+ offset = kLenNumLowSymbols + kLenNumMidSymbols;
+ limit = (1 << kLenNumHighBits);
+ }
+ }
+ TREE_DECODE(probLen, limit, len);
+ len += offset;
+ }
+
+ if (state >= kNumStates)
+ {
+ UInt32 distance;
+ prob = probs + PosSlot +
+ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
+ TREE_6_DECODE(prob, distance);
+ if (distance >= kStartPosModelIndex)
+ {
+ unsigned posSlot = (unsigned)distance;
+ int numDirectBits = (int)(((distance >> 1) - 1));
+ distance = (2 | (distance & 1));
+ if (posSlot < kEndPosModelIndex)
+ {
+ distance <<= numDirectBits;
+ prob = probs + SpecPos + distance - posSlot - 1;
+ {
+ UInt32 mask = 1;
+ unsigned i = 1;
+ do
+ {
+ GET_BIT2(prob + i, i, ; , distance |= mask);
+ mask <<= 1;
+ }
+ while (--numDirectBits != 0);
+ }
+ }
+ else
+ {
+ numDirectBits -= kNumAlignBits;
+ do
+ {
+ NORMALIZE
+ range >>= 1;
+
+ {
+ UInt32 t;
+ code -= range;
+ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
+ distance = (distance << 1) + (t + 1);
+ code += range & t;
+ }
+ /*
+ distance <<= 1;
+ if (code >= range)
+ {
+ code -= range;
+ distance |= 1;
+ }
+ */
+ }
+ while (--numDirectBits != 0);
+ prob = probs + Align;
+ distance <<= kNumAlignBits;
+ {
+ unsigned i = 1;
+ GET_BIT2(prob + i, i, ; , distance |= 1);
+ GET_BIT2(prob + i, i, ; , distance |= 2);
+ GET_BIT2(prob + i, i, ; , distance |= 4);
+ GET_BIT2(prob + i, i, ; , distance |= 8);
+ }
+ if (distance == (UInt32)0xFFFFFFFF)
+ {
+ len += kMatchSpecLenStart;
+ state -= kNumStates;
+ break;
+ }
+ }
+ }
+ rep3 = rep2;
+ rep2 = rep1;
+ rep1 = rep0;
+ rep0 = distance + 1;
+ if (checkDicSize == 0)
+ {
+ if (distance >= processedPos)
+ return SZ_ERROR_DATA;
+ }
+ else if (distance >= checkDicSize)
+ return SZ_ERROR_DATA;
+ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
+ }
+
+ len += kMatchMinLen;
+
+ if (limit == dicPos)
+ return SZ_ERROR_DATA;
+ {
+ SizeT rem = limit - dicPos;
+ unsigned curLen = ((rem < len) ? (unsigned)rem : len);
+ SizeT pos = (dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0);
+
+ processedPos += curLen;
+
+ len -= curLen;
+ if (pos + curLen <= dicBufSize)
+ {
+ Byte *dest = dic + dicPos;
+ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
+ const Byte *lim = dest + curLen;
+ dicPos += curLen;
+ do
+ *(dest) = (Byte)*(dest + src);
+ while (++dest != lim);
+ }
+ else
+ {
+ do
+ {
+ dic[dicPos++] = dic[pos];
+ if (++pos == dicBufSize)
+ pos = 0;
+ }
+ while (--curLen != 0);
+ }
+ }
+ }
+ }
+ while (dicPos < limit && buf < bufLimit);
+ NORMALIZE;
+ p->buf = buf;
+ p->range = range;
+ p->code = code;
+ p->remainLen = len;
+ p->dicPos = dicPos;
+ p->processedPos = processedPos;
+ p->reps[0] = rep0;
+ p->reps[1] = rep1;
+ p->reps[2] = rep2;
+ p->reps[3] = rep3;
+ p->state = state;
+
+ return SZ_OK;
+}
+
+static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
+{
+ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart)
+ {
+ Byte *dic = p->dic;
+ SizeT dicPos = p->dicPos;
+ SizeT dicBufSize = p->dicBufSize;
+ unsigned len = p->remainLen;
+ UInt32 rep0 = p->reps[0];
+ if (limit - dicPos < len)
+ len = (unsigned)(limit - dicPos);
+
+ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
+ p->checkDicSize = p->prop.dicSize;
+
+ p->processedPos += len;
+ p->remainLen -= len;
+ while (len-- != 0)
+ {
+ dic[dicPos] = dic[(dicPos - rep0) + ((dicPos < rep0) ? dicBufSize : 0)];
+ dicPos++;
+ }
+ p->dicPos = dicPos;
+ }
+}
+
+static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
+{
+ do
+ {
+ SizeT limit2 = limit;
+ if (p->checkDicSize == 0)
+ {
+ UInt32 rem = p->prop.dicSize - p->processedPos;
+ if (limit - p->dicPos > rem)
+ limit2 = p->dicPos + rem;
+ }
+ RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit));
+ if (p->processedPos >= p->prop.dicSize)
+ p->checkDicSize = p->prop.dicSize;
+ LzmaDec_WriteRem(p, limit);
+ }
+ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);
+
+ if (p->remainLen > kMatchSpecLenStart)
+ {
+ p->remainLen = kMatchSpecLenStart;
+ }
+ return 0;
+}
+
+typedef enum
+{
+ DUMMY_ERROR, /* unexpected end of input stream */
+ DUMMY_LIT,
+ DUMMY_MATCH,
+ DUMMY_REP
+} ELzmaDummy;
+
+static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize)
+{
+ UInt32 range = p->range;
+ UInt32 code = p->code;
+ const Byte *bufLimit = buf + inSize;
+ CLzmaProb *probs = p->probs;
+ unsigned state = p->state;
+ ELzmaDummy res;
+
+ {
+ CLzmaProb *prob;
+ UInt32 bound;
+ unsigned ttt;
+ unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1);
+
+ prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK
+
+ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */
+
+ prob = probs + Literal;
+ if (p->checkDicSize != 0 || p->processedPos != 0)
+ prob += (LZMA_LIT_SIZE *
+ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) +
+ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
+
+ if (state < kNumLitStates)
+ {
+ unsigned symbol = 1;
+ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
+ }
+ else
+ {
+ unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
+ ((p->dicPos < p->reps[0]) ? p->dicBufSize : 0)];
+ unsigned offs = 0x100;
+ unsigned symbol = 1;
+ do
+ {
+ unsigned bit;
+ CLzmaProb *probLit;
+ matchByte <<= 1;
+ bit = (matchByte & offs);
+ probLit = prob + offs + bit + symbol;
+ GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit)
+ }
+ while (symbol < 0x100);
+ }
+ res = DUMMY_LIT;
+ }
+ else
+ {
+ unsigned len;
+ UPDATE_1_CHECK;
+
+ prob = probs + IsRep + state;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ state = 0;
+ prob = probs + LenCoder;
+ res = DUMMY_MATCH;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ res = DUMMY_REP;
+ prob = probs + IsRepG0 + state;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ NORMALIZE_CHECK;
+ return DUMMY_REP;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ }
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ prob = probs + IsRepG1 + state;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ prob = probs + IsRepG2 + state;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ }
+ }
+ }
+ state = kNumStates;
+ prob = probs + RepLenCoder;
+ }
+ {
+ unsigned limit, offset;
+ CLzmaProb *probLen = prob + LenChoice;
+ IF_BIT_0_CHECK(probLen)
+ {
+ UPDATE_0_CHECK;
+ probLen = prob + LenLow + (posState << kLenNumLowBits);
+ offset = 0;
+ limit = 1 << kLenNumLowBits;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ probLen = prob + LenChoice2;
+ IF_BIT_0_CHECK(probLen)
+ {
+ UPDATE_0_CHECK;
+ probLen = prob + LenMid + (posState << kLenNumMidBits);
+ offset = kLenNumLowSymbols;
+ limit = 1 << kLenNumMidBits;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ probLen = prob + LenHigh;
+ offset = kLenNumLowSymbols + kLenNumMidSymbols;
+ limit = 1 << kLenNumHighBits;
+ }
+ }
+ TREE_DECODE_CHECK(probLen, limit, len);
+ len += offset;
+ }
+
+ if (state < 4)
+ {
+ unsigned posSlot;
+ prob = probs + PosSlot +
+ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
+ kNumPosSlotBits);
+ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
+ if (posSlot >= kStartPosModelIndex)
+ {
+ int numDirectBits = ((posSlot >> 1) - 1);
+
+ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */
+
+ if (posSlot < kEndPosModelIndex)
+ {
+ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1;
+ }
+ else
+ {
+ numDirectBits -= kNumAlignBits;
+ do
+ {
+ NORMALIZE_CHECK
+ range >>= 1;
+ code -= range & (((code - range) >> 31) - 1);
+ /* if (code >= range) code -= range; */
+ }
+ while (--numDirectBits != 0);
+ prob = probs + Align;
+ numDirectBits = kNumAlignBits;
+ }
+ {
+ unsigned i = 1;
+ do
+ {
+ GET_BIT_CHECK(prob + i, i);
+ }
+ while (--numDirectBits != 0);
+ }
+ }
+ }
+ }
+ }
+ NORMALIZE_CHECK;
+ return res;
+}
+
+
+static void LzmaDec_InitRc(CLzmaDec *p, const Byte *data)
+{
+ p->code = ((UInt32)data[1] << 24) | ((UInt32)data[2] << 16) | ((UInt32)data[3] << 8) | ((UInt32)data[4]);
+ p->range = 0xFFFFFFFF;
+ p->needFlush = 0;
+}
+
+void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)
+{
+ p->needFlush = 1;
+ p->remainLen = 0;
+ p->tempBufSize = 0;
+
+ if (initDic)
+ {
+ p->processedPos = 0;
+ p->checkDicSize = 0;
+ p->needInitState = 1;
+ }
+ if (initState)
+ p->needInitState = 1;
+}
+
+void LzmaDec_Init(CLzmaDec *p)
+{
+ p->dicPos = 0;
+ LzmaDec_InitDicAndState(p, True, True);
+}
+
+static void LzmaDec_InitStateReal(CLzmaDec *p)
+{
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (p->prop.lc + p->prop.lp));
+ UInt32 i;
+ CLzmaProb *probs = p->probs;
+ for (i = 0; i < numProbs; i++)
+ probs[i] = kBitModelTotal >> 1;
+ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
+ p->state = 0;
+ p->needInitState = 0;
+}
+
+SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
+ ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+ SizeT inSize = *srcLen;
+ (*srcLen) = 0;
+ LzmaDec_WriteRem(p, dicLimit);
+
+ *status = LZMA_STATUS_NOT_SPECIFIED;
+
+ while (p->remainLen != kMatchSpecLenStart)
+ {
+ int checkEndMarkNow;
+
+ if (p->needFlush != 0)
+ {
+ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
+ p->tempBuf[p->tempBufSize++] = *src++;
+ if (p->tempBufSize < RC_INIT_SIZE)
+ {
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+ if (p->tempBuf[0] != 0)
+ return SZ_ERROR_DATA;
+
+ LzmaDec_InitRc(p, p->tempBuf);
+ p->tempBufSize = 0;
+ }
+
+ checkEndMarkNow = 0;
+ if (p->dicPos >= dicLimit)
+ {
+ if (p->remainLen == 0 && p->code == 0)
+ {
+ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
+ return SZ_OK;
+ }
+ if (finishMode == LZMA_FINISH_ANY)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_OK;
+ }
+ if (p->remainLen != 0)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_ERROR_DATA;
+ }
+ checkEndMarkNow = 1;
+ }
+
+ if (p->needInitState)
+ LzmaDec_InitStateReal(p);
+
+ if (p->tempBufSize == 0)
+ {
+ SizeT processed;
+ const Byte *bufLimit;
+ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
+ {
+ int dummyRes = LzmaDec_TryDummy(p, src, inSize);
+ if (dummyRes == DUMMY_ERROR)
+ {
+ memcpy(p->tempBuf, src, inSize);
+ p->tempBufSize = (unsigned)inSize;
+ (*srcLen) += inSize;
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_ERROR_DATA;
+ }
+ bufLimit = src;
+ }
+ else
+ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
+ p->buf = src;
+ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0)
+ return SZ_ERROR_DATA;
+ processed = (SizeT)(p->buf - src);
+ (*srcLen) += processed;
+ src += processed;
+ inSize -= processed;
+ }
+ else
+ {
+ unsigned rem = p->tempBufSize, lookAhead = 0;
+ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize)
+ p->tempBuf[rem++] = src[lookAhead++];
+ p->tempBufSize = rem;
+ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
+ {
+ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, rem);
+ if (dummyRes == DUMMY_ERROR)
+ {
+ (*srcLen) += lookAhead;
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_ERROR_DATA;
+ }
+ }
+ p->buf = p->tempBuf;
+ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0)
+ return SZ_ERROR_DATA;
+ lookAhead -= (rem - (unsigned)(p->buf - p->tempBuf));
+ (*srcLen) += lookAhead;
+ src += lookAhead;
+ inSize -= lookAhead;
+ p->tempBufSize = 0;
+ }
+ }
+ if (p->code == 0)
+ *status = LZMA_STATUS_FINISHED_WITH_MARK;
+ return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;
+}
+
+SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+ SizeT outSize = *destLen;
+ SizeT inSize = *srcLen;
+ *srcLen = *destLen = 0;
+ for (;;)
+ {
+ SizeT inSizeCur = inSize, outSizeCur, dicPos;
+ ELzmaFinishMode curFinishMode;
+ SRes res;
+ if (p->dicPos == p->dicBufSize)
+ p->dicPos = 0;
+ dicPos = p->dicPos;
+ if (outSize > p->dicBufSize - dicPos)
+ {
+ outSizeCur = p->dicBufSize;
+ curFinishMode = LZMA_FINISH_ANY;
+ }
+ else
+ {
+ outSizeCur = dicPos + outSize;
+ curFinishMode = finishMode;
+ }
+
+ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
+ src += inSizeCur;
+ inSize -= inSizeCur;
+ *srcLen += inSizeCur;
+ outSizeCur = p->dicPos - dicPos;
+ memcpy(dest, p->dic + dicPos, outSizeCur);
+ dest += outSizeCur;
+ outSize -= outSizeCur;
+ *destLen += outSizeCur;
+ if (res != 0)
+ return res;
+ if (outSizeCur == 0 || outSize == 0)
+ return SZ_OK;
+ }
+}
+
+void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->probs);
+ p->probs = 0;
+}
+
+static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->dic);
+ p->dic = 0;
+}
+
+void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)
+{
+ LzmaDec_FreeProbs(p, alloc);
+ LzmaDec_FreeDict(p, alloc);
+}
+
+SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
+{
+ UInt32 dicSize;
+ Byte d;
+
+ if (size < LZMA_PROPS_SIZE)
+ return SZ_ERROR_UNSUPPORTED;
+ else
+ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
+
+ if (dicSize < LZMA_DIC_MIN)
+ dicSize = LZMA_DIC_MIN;
+ p->dicSize = dicSize;
+
+ d = data[0];
+ if (d >= (9 * 5 * 5))
+ return SZ_ERROR_UNSUPPORTED;
+
+ p->lc = d % 9;
+ d /= 9;
+ p->pb = d / 5;
+ p->lp = d % 5;
+
+ return SZ_OK;
+}
+
+static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc)
+{
+ UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
+ if (p->probs == 0 || numProbs != p->numProbs)
+ {
+ LzmaDec_FreeProbs(p, alloc);
+ p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb));
+ p->numProbs = numProbs;
+ if (p->probs == 0)
+ return SZ_ERROR_MEM;
+ }
+ return SZ_OK;
+}
+
+SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+{
+ CLzmaProps propNew;
+ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
+ p->prop = propNew;
+ return SZ_OK;
+}
+
+SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)
+{
+ CLzmaProps propNew;
+ SizeT dicBufSize;
+ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
+ dicBufSize = propNew.dicSize;
+ if (p->dic == 0 || dicBufSize != p->dicBufSize)
+ {
+ LzmaDec_FreeDict(p, alloc);
+ p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize);
+ if (p->dic == 0)
+ {
+ LzmaDec_FreeProbs(p, alloc);
+ return SZ_ERROR_MEM;
+ }
+ }
+ p->dicBufSize = dicBufSize;
+ p->prop = propNew;
+ return SZ_OK;
+}
+
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
+ ELzmaStatus *status, ISzAlloc *alloc)
+{
+ CLzmaDec p;
+ SRes res;
+ SizeT inSize = *srcLen;
+ SizeT outSize = *destLen;
+ *srcLen = *destLen = 0;
+ if (inSize < RC_INIT_SIZE)
+ return SZ_ERROR_INPUT_EOF;
+
+ LzmaDec_Construct(&p);
+ res = LzmaDec_AllocateProbs(&p, propData, propSize, alloc);
+ if (res != 0)
+ return res;
+ p.dic = dest;
+ p.dicBufSize = outSize;
+
+ LzmaDec_Init(&p);
+
+ *srcLen = inSize;
+ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
+
+ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
+ res = SZ_ERROR_INPUT_EOF;
+
+ (*destLen) = p.dicPos;
+ LzmaDec_FreeProbs(&p, alloc);
+ return res;
+}
diff --git a/src/libs/7zip/unix/C/LzmaDec.h b/src/libs/7zip/unix/C/LzmaDec.h
new file mode 100644
index 000000000..bf7f084ba
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzmaDec.h
@@ -0,0 +1,231 @@
+/* LzmaDec.h -- LZMA Decoder
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA_DEC_H
+#define __LZMA_DEC_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* #define _LZMA_PROB32 */
+/* _LZMA_PROB32 can increase the speed on some CPUs,
+ but memory usage for CLzmaDec::probs will be doubled in that case */
+
+#ifdef _LZMA_PROB32
+#define CLzmaProb UInt32
+#else
+#define CLzmaProb UInt16
+#endif
+
+
+/* ---------- LZMA Properties ---------- */
+
+#define LZMA_PROPS_SIZE 5
+
+typedef struct _CLzmaProps
+{
+ unsigned lc, lp, pb;
+ UInt32 dicSize;
+} CLzmaProps;
+
+/* LzmaProps_Decode - decodes properties
+Returns:
+ SZ_OK
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+*/
+
+SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
+
+
+/* ---------- LZMA Decoder state ---------- */
+
+/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
+ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
+
+#define LZMA_REQUIRED_INPUT_MAX 20
+
+typedef struct
+{
+ CLzmaProps prop;
+ CLzmaProb *probs;
+ Byte *dic;
+ const Byte *buf;
+ UInt32 range, code;
+ SizeT dicPos;
+ SizeT dicBufSize;
+ UInt32 processedPos;
+ UInt32 checkDicSize;
+ unsigned state;
+ UInt32 reps[4];
+ unsigned remainLen;
+ int needFlush;
+ int needInitState;
+ UInt32 numProbs;
+ unsigned tempBufSize;
+ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
+} CLzmaDec;
+
+#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }
+
+void LzmaDec_Init(CLzmaDec *p);
+
+/* There are two types of LZMA streams:
+ 0) Stream with end mark. That end mark adds about 6 bytes to compressed size.
+ 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */
+
+typedef enum
+{
+ LZMA_FINISH_ANY, /* finish at any point */
+ LZMA_FINISH_END /* block must be finished at the end */
+} ELzmaFinishMode;
+
+/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
+
+ You must use LZMA_FINISH_END, when you know that current output buffer
+ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
+
+ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
+ and output value of destLen will be less than output buffer size limit.
+ You can check status result also.
+
+ You can use multiple checks to test data integrity after full decompression:
+ 1) Check Result and "status" variable.
+ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
+ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
+ You must use correct finish mode in that case. */
+
+typedef enum
+{
+ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */
+ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
+ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */
+ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */
+} ELzmaStatus;
+
+/* ELzmaStatus is used only as output value for function call */
+
+
+/* ---------- Interfaces ---------- */
+
+/* There are 3 levels of interfaces:
+ 1) Dictionary Interface
+ 2) Buffer Interface
+ 3) One Call Interface
+ You can select any of these interfaces, but don't mix functions from different
+ groups for same object. */
+
+
+/* There are two variants to allocate state for Dictionary Interface:
+ 1) LzmaDec_Allocate / LzmaDec_Free
+ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
+ You can use variant 2, if you set dictionary buffer manually.
+ For Buffer Interface you must always use variant 1.
+
+LzmaDec_Allocate* can return:
+ SZ_OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+*/
+
+SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc);
+void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc);
+
+SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc);
+void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc);
+
+/* ---------- Dictionary Interface ---------- */
+
+/* You can use it, if you want to eliminate the overhead for data copying from
+ dictionary to some other external buffer.
+ You must work with CLzmaDec variables directly in this interface.
+
+ STEPS:
+ LzmaDec_Constr()
+ LzmaDec_Allocate()
+ for (each new stream)
+ {
+ LzmaDec_Init()
+ while (it needs more decompression)
+ {
+ LzmaDec_DecodeToDic()
+ use data from CLzmaDec::dic and update CLzmaDec::dicPos
+ }
+ }
+ LzmaDec_Free()
+*/
+
+/* LzmaDec_DecodeToDic
+
+ The decoding to internal dictionary buffer (CLzmaDec::dic).
+ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
+
+finishMode:
+ It has meaning only if the decoding reaches output limit (dicLimit).
+ LZMA_FINISH_ANY - Decode just dicLimit bytes.
+ LZMA_FINISH_END - Stream must be finished after dicLimit.
+
+Returns:
+ SZ_OK
+ status:
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED
+ LZMA_STATUS_NEEDS_MORE_INPUT
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
+ SZ_ERROR_DATA - Data error
+*/
+
+SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+
+
+/* ---------- Buffer Interface ---------- */
+
+/* It's zlib-like interface.
+ See LzmaDec_DecodeToDic description for information about STEPS and return results,
+ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
+ to work with CLzmaDec variables manually.
+
+finishMode:
+ It has meaning only if the decoding reaches output limit (*destLen).
+ LZMA_FINISH_ANY - Decode just destLen bytes.
+ LZMA_FINISH_END - Stream must be finished after (*destLen).
+*/
+
+SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+
+
+/* ---------- One Call Interface ---------- */
+
+/* LzmaDecode
+
+finishMode:
+ It has meaning only if the decoding reaches output limit (*destLen).
+ LZMA_FINISH_ANY - Decode just destLen bytes.
+ LZMA_FINISH_END - Stream must be finished after (*destLen).
+
+Returns:
+ SZ_OK
+ status:
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
+ SZ_ERROR_DATA - Data error
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
+*/
+
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
+ ELzmaStatus *status, ISzAlloc *alloc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/LzmaEnc.c b/src/libs/7zip/unix/C/LzmaEnc.c
new file mode 100644
index 000000000..cf131388a
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzmaEnc.c
@@ -0,0 +1,2268 @@
+/* LzmaEnc.c -- LZMA Encoder
+2010-04-16 : Igor Pavlov : Public domain */
+
+#include <string.h>
+
+/* #define SHOW_STAT */
+/* #define SHOW_STAT2 */
+
+#if defined(SHOW_STAT) || defined(SHOW_STAT2)
+#include <stdio.h>
+#endif
+
+#include "LzmaEnc.h"
+
+#include "LzFind.h"
+#ifndef _7ZIP_ST
+#include "LzFindMt.h"
+#endif
+
+#ifdef SHOW_STAT
+static int ttt = 0;
+#endif
+
+#define kBlockSizeMax ((1 << LZMA_NUM_BLOCK_SIZE_BITS) - 1)
+
+#define kBlockSize (9 << 10)
+#define kUnpackBlockSize (1 << 18)
+#define kMatchArraySize (1 << 21)
+#define kMatchRecordMaxSize ((LZMA_MATCH_LEN_MAX * 2 + 3) * LZMA_MATCH_LEN_MAX)
+
+#define kNumMaxDirectBits (31)
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+#define kProbInitValue (kBitModelTotal >> 1)
+
+#define kNumMoveReducingBits 4
+#define kNumBitPriceShiftBits 4
+#define kBitPrice (1 << kNumBitPriceShiftBits)
+
+void LzmaEncProps_Init(CLzmaEncProps *p)
+{
+ p->level = 5;
+ p->dictSize = p->mc = 0;
+ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
+ p->writeEndMark = 0;
+}
+
+void LzmaEncProps_Normalize(CLzmaEncProps *p)
+{
+ int level = p->level;
+ if (level < 0) level = 5;
+ p->level = level;
+ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level == 6 ? (1 << 25) : (1 << 26)));
+ if (p->lc < 0) p->lc = 3;
+ if (p->lp < 0) p->lp = 0;
+ if (p->pb < 0) p->pb = 2;
+ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
+ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
+ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
+ if (p->numHashBytes < 0) p->numHashBytes = 4;
+ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1);
+ if (p->numThreads < 0)
+ p->numThreads =
+ #ifndef _7ZIP_ST
+ ((p->btMode && p->algo) ? 2 : 1);
+ #else
+ 1;
+ #endif
+}
+
+UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
+{
+ CLzmaEncProps props = *props2;
+ LzmaEncProps_Normalize(&props);
+ return props.dictSize;
+}
+
+/* #define LZMA_LOG_BSR */
+/* Define it for Intel's CPU */
+
+
+#ifdef LZMA_LOG_BSR
+
+#define kDicLogSizeMaxCompress 30
+
+#define BSR2_RET(pos, res) { unsigned long i; _BitScanReverse(&i, (pos)); res = (i + i) + ((pos >> (i - 1)) & 1); }
+
+UInt32 GetPosSlot1(UInt32 pos)
+{
+ UInt32 res;
+ BSR2_RET(pos, res);
+ return res;
+}
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
+#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
+
+#else
+
+#define kNumLogBits (9 + (int)sizeof(size_t) / 2)
+#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
+
+void LzmaEnc_FastPosInit(Byte *g_FastPos)
+{
+ int c = 2, slotFast;
+ g_FastPos[0] = 0;
+ g_FastPos[1] = 1;
+
+ for (slotFast = 2; slotFast < kNumLogBits * 2; slotFast++)
+ {
+ UInt32 k = (1 << ((slotFast >> 1) - 1));
+ UInt32 j;
+ for (j = 0; j < k; j++, c++)
+ g_FastPos[c] = (Byte)slotFast;
+ }
+}
+
+#define BSR2_RET(pos, res) { UInt32 i = 6 + ((kNumLogBits - 1) & \
+ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
+ res = p->g_FastPos[pos >> i] + (i * 2); }
+/*
+#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
+ p->g_FastPos[pos >> 6] + 12 : \
+ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
+*/
+
+#define GetPosSlot1(pos) p->g_FastPos[pos]
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
+#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos]; else BSR2_RET(pos, res); }
+
+#endif
+
+
+#define LZMA_NUM_REPS 4
+
+typedef unsigned CState;
+
+typedef struct
+{
+ UInt32 price;
+
+ CState state;
+ int prev1IsChar;
+ int prev2;
+
+ UInt32 posPrev2;
+ UInt32 backPrev2;
+
+ UInt32 posPrev;
+ UInt32 backPrev;
+ UInt32 backs[LZMA_NUM_REPS];
+} COptimal;
+
+#define kNumOpts (1 << 12)
+
+#define kNumLenToPosStates 4
+#define kNumPosSlotBits 6
+#define kDicLogSizeMin 0
+#define kDicLogSizeMax 32
+#define kDistTableSizeMax (kDicLogSizeMax * 2)
+
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+#define kAlignMask (kAlignTableSize - 1)
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumPosModels (kEndPosModelIndex - kStartPosModelIndex)
+
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#ifdef _LZMA_PROB32
+#define CLzmaProb UInt32
+#else
+#define CLzmaProb UInt16
+#endif
+
+#define LZMA_PB_MAX 4
+#define LZMA_LC_MAX 8
+#define LZMA_LP_MAX 4
+
+#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
+
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define kLenNumSymbolsTotal (kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)
+
+#define LZMA_MATCH_LEN_MIN 2
+#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
+
+#define kNumStates 12
+
+typedef struct
+{
+ CLzmaProb choice;
+ CLzmaProb choice2;
+ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << kLenNumLowBits];
+ CLzmaProb mid[LZMA_NUM_PB_STATES_MAX << kLenNumMidBits];
+ CLzmaProb high[kLenNumHighSymbols];
+} CLenEnc;
+
+typedef struct
+{
+ CLenEnc p;
+ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
+ UInt32 tableSize;
+ UInt32 counters[LZMA_NUM_PB_STATES_MAX];
+} CLenPriceEnc;
+
+typedef struct
+{
+ UInt32 range;
+ Byte cache;
+ UInt64 low;
+ UInt64 cacheSize;
+ Byte *buf;
+ Byte *bufLim;
+ Byte *bufBase;
+ ISeqOutStream *outStream;
+ UInt64 processed;
+ SRes res;
+} CRangeEnc;
+
+typedef struct
+{
+ CLzmaProb *litProbs;
+
+ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
+ CLzmaProb isRep[kNumStates];
+ CLzmaProb isRepG0[kNumStates];
+ CLzmaProb isRepG1[kNumStates];
+ CLzmaProb isRepG2[kNumStates];
+ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
+
+ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
+ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
+ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
+
+ CLenPriceEnc lenEnc;
+ CLenPriceEnc repLenEnc;
+
+ UInt32 reps[LZMA_NUM_REPS];
+ UInt32 state;
+} CSaveState;
+
+typedef struct
+{
+ IMatchFinder matchFinder;
+ void *matchFinderObj;
+
+ #ifndef _7ZIP_ST
+ Bool mtMode;
+ CMatchFinderMt matchFinderMt;
+ #endif
+
+ CMatchFinder matchFinderBase;
+
+ #ifndef _7ZIP_ST
+ Byte pad[128];
+ #endif
+
+ UInt32 optimumEndIndex;
+ UInt32 optimumCurrentIndex;
+
+ UInt32 longestMatchLength;
+ UInt32 numPairs;
+ UInt32 numAvail;
+ COptimal opt[kNumOpts];
+
+ #ifndef LZMA_LOG_BSR
+ Byte g_FastPos[1 << kNumLogBits];
+ #endif
+
+ UInt32 ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
+ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
+ UInt32 numFastBytes;
+ UInt32 additionalOffset;
+ UInt32 reps[LZMA_NUM_REPS];
+ UInt32 state;
+
+ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
+ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
+ UInt32 alignPrices[kAlignTableSize];
+ UInt32 alignPriceCount;
+
+ UInt32 distTableSize;
+
+ unsigned lc, lp, pb;
+ unsigned lpMask, pbMask;
+
+ CLzmaProb *litProbs;
+
+ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
+ CLzmaProb isRep[kNumStates];
+ CLzmaProb isRepG0[kNumStates];
+ CLzmaProb isRepG1[kNumStates];
+ CLzmaProb isRepG2[kNumStates];
+ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
+
+ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
+ CLzmaProb posEncoders[kNumFullDistances - kEndPosModelIndex];
+ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
+
+ CLenPriceEnc lenEnc;
+ CLenPriceEnc repLenEnc;
+
+ unsigned lclp;
+
+ Bool fastMode;
+
+ CRangeEnc rc;
+
+ Bool writeEndMark;
+ UInt64 nowPos64;
+ UInt32 matchPriceCount;
+ Bool finished;
+ Bool multiThread;
+
+ SRes result;
+ UInt32 dictSize;
+ UInt32 matchFinderCycles;
+
+ int needInit;
+
+ CSaveState saveState;
+} CLzmaEnc;
+
+void LzmaEnc_SaveState(CLzmaEncHandle pp)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ CSaveState *dest = &p->saveState;
+ int i;
+ dest->lenEnc = p->lenEnc;
+ dest->repLenEnc = p->repLenEnc;
+ dest->state = p->state;
+
+ for (i = 0; i < kNumStates; i++)
+ {
+ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
+ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
+ }
+ for (i = 0; i < kNumLenToPosStates; i++)
+ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
+ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
+ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
+ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
+ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
+ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
+ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
+ memcpy(dest->reps, p->reps, sizeof(p->reps));
+ memcpy(dest->litProbs, p->litProbs, (0x300 << p->lclp) * sizeof(CLzmaProb));
+}
+
+void LzmaEnc_RestoreState(CLzmaEncHandle pp)
+{
+ CLzmaEnc *dest = (CLzmaEnc *)pp;
+ const CSaveState *p = &dest->saveState;
+ int i;
+ dest->lenEnc = p->lenEnc;
+ dest->repLenEnc = p->repLenEnc;
+ dest->state = p->state;
+
+ for (i = 0; i < kNumStates; i++)
+ {
+ memcpy(dest->isMatch[i], p->isMatch[i], sizeof(p->isMatch[i]));
+ memcpy(dest->isRep0Long[i], p->isRep0Long[i], sizeof(p->isRep0Long[i]));
+ }
+ for (i = 0; i < kNumLenToPosStates; i++)
+ memcpy(dest->posSlotEncoder[i], p->posSlotEncoder[i], sizeof(p->posSlotEncoder[i]));
+ memcpy(dest->isRep, p->isRep, sizeof(p->isRep));
+ memcpy(dest->isRepG0, p->isRepG0, sizeof(p->isRepG0));
+ memcpy(dest->isRepG1, p->isRepG1, sizeof(p->isRepG1));
+ memcpy(dest->isRepG2, p->isRepG2, sizeof(p->isRepG2));
+ memcpy(dest->posEncoders, p->posEncoders, sizeof(p->posEncoders));
+ memcpy(dest->posAlignEncoder, p->posAlignEncoder, sizeof(p->posAlignEncoder));
+ memcpy(dest->reps, p->reps, sizeof(p->reps));
+ memcpy(dest->litProbs, p->litProbs, (0x300 << dest->lclp) * sizeof(CLzmaProb));
+}
+
+SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ CLzmaEncProps props = *props2;
+ LzmaEncProps_Normalize(&props);
+
+ if (props.lc > LZMA_LC_MAX || props.lp > LZMA_LP_MAX || props.pb > LZMA_PB_MAX ||
+ props.dictSize > ((UInt32)1 << kDicLogSizeMaxCompress) || props.dictSize > ((UInt32)1 << 30))
+ return SZ_ERROR_PARAM;
+ p->dictSize = props.dictSize;
+ p->matchFinderCycles = props.mc;
+ {
+ unsigned fb = props.fb;
+ if (fb < 5)
+ fb = 5;
+ if (fb > LZMA_MATCH_LEN_MAX)
+ fb = LZMA_MATCH_LEN_MAX;
+ p->numFastBytes = fb;
+ }
+ p->lc = props.lc;
+ p->lp = props.lp;
+ p->pb = props.pb;
+ p->fastMode = (props.algo == 0);
+ p->matchFinderBase.btMode = props.btMode;
+ {
+ UInt32 numHashBytes = 4;
+ if (props.btMode)
+ {
+ if (props.numHashBytes < 2)
+ numHashBytes = 2;
+ else if (props.numHashBytes < 4)
+ numHashBytes = props.numHashBytes;
+ }
+ p->matchFinderBase.numHashBytes = numHashBytes;
+ }
+
+ p->matchFinderBase.cutValue = props.mc;
+
+ p->writeEndMark = props.writeEndMark;
+
+ #ifndef _7ZIP_ST
+ /*
+ if (newMultiThread != _multiThread)
+ {
+ ReleaseMatchFinder();
+ _multiThread = newMultiThread;
+ }
+ */
+ p->multiThread = (props.numThreads > 1);
+ #endif
+
+ return SZ_OK;
+}
+
+static const int kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
+static const int kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
+static const int kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
+static const int kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
+
+#define IsCharState(s) ((s) < 7)
+
+#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
+
+#define kInfinityPrice (1 << 30)
+
+static void RangeEnc_Construct(CRangeEnc *p)
+{
+ p->outStream = 0;
+ p->bufBase = 0;
+}
+
+#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize)
+
+#define RC_BUF_SIZE (1 << 16)
+static int RangeEnc_Alloc(CRangeEnc *p, ISzAlloc *alloc)
+{
+ if (p->bufBase == 0)
+ {
+ p->bufBase = (Byte *)alloc->Alloc(alloc, RC_BUF_SIZE);
+ if (p->bufBase == 0)
+ return 0;
+ p->bufLim = p->bufBase + RC_BUF_SIZE;
+ }
+ return 1;
+}
+
+static void RangeEnc_Free(CRangeEnc *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->bufBase);
+ p->bufBase = 0;
+}
+
+static void RangeEnc_Init(CRangeEnc *p)
+{
+ /* Stream.Init(); */
+ p->low = 0;
+ p->range = 0xFFFFFFFF;
+ p->cacheSize = 1;
+ p->cache = 0;
+
+ p->buf = p->bufBase;
+
+ p->processed = 0;
+ p->res = SZ_OK;
+}
+
+static void RangeEnc_FlushStream(CRangeEnc *p)
+{
+ size_t num;
+ if (p->res != SZ_OK)
+ return;
+ num = p->buf - p->bufBase;
+ if (num != p->outStream->Write(p->outStream, p->bufBase, num))
+ p->res = SZ_ERROR_WRITE;
+ p->processed += num;
+ p->buf = p->bufBase;
+}
+
+static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
+{
+ if ((UInt32)p->low < (UInt32)0xFF000000 || (int)(p->low >> 32) != 0)
+ {
+ Byte temp = p->cache;
+ do
+ {
+ Byte *buf = p->buf;
+ *buf++ = (Byte)(temp + (Byte)(p->low >> 32));
+ p->buf = buf;
+ if (buf == p->bufLim)
+ RangeEnc_FlushStream(p);
+ temp = 0xFF;
+ }
+ while (--p->cacheSize != 0);
+ p->cache = (Byte)((UInt32)p->low >> 24);
+ }
+ p->cacheSize++;
+ p->low = (UInt32)p->low << 8;
+}
+
+static void RangeEnc_FlushData(CRangeEnc *p)
+{
+ int i;
+ for (i = 0; i < 5; i++)
+ RangeEnc_ShiftLow(p);
+}
+
+static void RangeEnc_EncodeDirectBits(CRangeEnc *p, UInt32 value, int numBits)
+{
+ do
+ {
+ p->range >>= 1;
+ p->low += p->range & (0 - ((value >> --numBits) & 1));
+ if (p->range < kTopValue)
+ {
+ p->range <<= 8;
+ RangeEnc_ShiftLow(p);
+ }
+ }
+ while (numBits != 0);
+}
+
+static void RangeEnc_EncodeBit(CRangeEnc *p, CLzmaProb *prob, UInt32 symbol)
+{
+ UInt32 ttt = *prob;
+ UInt32 newBound = (p->range >> kNumBitModelTotalBits) * ttt;
+ if (symbol == 0)
+ {
+ p->range = newBound;
+ ttt += (kBitModelTotal - ttt) >> kNumMoveBits;
+ }
+ else
+ {
+ p->low += newBound;
+ p->range -= newBound;
+ ttt -= ttt >> kNumMoveBits;
+ }
+ *prob = (CLzmaProb)ttt;
+ if (p->range < kTopValue)
+ {
+ p->range <<= 8;
+ RangeEnc_ShiftLow(p);
+ }
+}
+
+static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol)
+{
+ symbol |= 0x100;
+ do
+ {
+ RangeEnc_EncodeBit(p, probs + (symbol >> 8), (symbol >> 7) & 1);
+ symbol <<= 1;
+ }
+ while (symbol < 0x10000);
+}
+
+static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 symbol, UInt32 matchByte)
+{
+ UInt32 offs = 0x100;
+ symbol |= 0x100;
+ do
+ {
+ matchByte <<= 1;
+ RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (symbol >> 8)), (symbol >> 7) & 1);
+ symbol <<= 1;
+ offs &= ~(matchByte ^ symbol);
+ }
+ while (symbol < 0x10000);
+}
+
+void LzmaEnc_InitPriceTables(UInt32 *ProbPrices)
+{
+ UInt32 i;
+ for (i = (1 << kNumMoveReducingBits) / 2; i < kBitModelTotal; i += (1 << kNumMoveReducingBits))
+ {
+ const int kCyclesBits = kNumBitPriceShiftBits;
+ UInt32 w = i;
+ UInt32 bitCount = 0;
+ int j;
+ for (j = 0; j < kCyclesBits; j++)
+ {
+ w = w * w;
+ bitCount <<= 1;
+ while (w >= ((UInt32)1 << 16))
+ {
+ w >>= 1;
+ bitCount++;
+ }
+ }
+ ProbPrices[i >> kNumMoveReducingBits] = ((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
+ }
+}
+
+
+#define GET_PRICE(prob, symbol) \
+ p->ProbPrices[((prob) ^ (((-(int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
+
+#define GET_PRICEa(prob, symbol) \
+ ProbPrices[((prob) ^ ((-((int)(symbol))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
+
+#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
+#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
+
+#define GET_PRICE_0a(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
+#define GET_PRICE_1a(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
+
+static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 symbol, UInt32 *ProbPrices)
+{
+ UInt32 price = 0;
+ symbol |= 0x100;
+ do
+ {
+ price += GET_PRICEa(probs[symbol >> 8], (symbol >> 7) & 1);
+ symbol <<= 1;
+ }
+ while (symbol < 0x10000);
+ return price;
+}
+
+static UInt32 LitEnc_GetPriceMatched(const CLzmaProb *probs, UInt32 symbol, UInt32 matchByte, UInt32 *ProbPrices)
+{
+ UInt32 price = 0;
+ UInt32 offs = 0x100;
+ symbol |= 0x100;
+ do
+ {
+ matchByte <<= 1;
+ price += GET_PRICEa(probs[offs + (matchByte & offs) + (symbol >> 8)], (symbol >> 7) & 1);
+ symbol <<= 1;
+ offs &= ~(matchByte ^ symbol);
+ }
+ while (symbol < 0x10000);
+ return price;
+}
+
+
+static void RcTree_Encode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
+{
+ UInt32 m = 1;
+ int i;
+ for (i = numBitLevels; i != 0;)
+ {
+ UInt32 bit;
+ i--;
+ bit = (symbol >> i) & 1;
+ RangeEnc_EncodeBit(rc, probs + m, bit);
+ m = (m << 1) | bit;
+ }
+}
+
+static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, int numBitLevels, UInt32 symbol)
+{
+ UInt32 m = 1;
+ int i;
+ for (i = 0; i < numBitLevels; i++)
+ {
+ UInt32 bit = symbol & 1;
+ RangeEnc_EncodeBit(rc, probs + m, bit);
+ m = (m << 1) | bit;
+ symbol >>= 1;
+ }
+}
+
+static UInt32 RcTree_GetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
+{
+ UInt32 price = 0;
+ symbol |= (1 << numBitLevels);
+ while (symbol != 1)
+ {
+ price += GET_PRICEa(probs[symbol >> 1], symbol & 1);
+ symbol >>= 1;
+ }
+ return price;
+}
+
+static UInt32 RcTree_ReverseGetPrice(const CLzmaProb *probs, int numBitLevels, UInt32 symbol, UInt32 *ProbPrices)
+{
+ UInt32 price = 0;
+ UInt32 m = 1;
+ int i;
+ for (i = numBitLevels; i != 0; i--)
+ {
+ UInt32 bit = symbol & 1;
+ symbol >>= 1;
+ price += GET_PRICEa(probs[m], bit);
+ m = (m << 1) | bit;
+ }
+ return price;
+}
+
+
+static void LenEnc_Init(CLenEnc *p)
+{
+ unsigned i;
+ p->choice = p->choice2 = kProbInitValue;
+ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumLowBits); i++)
+ p->low[i] = kProbInitValue;
+ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << kLenNumMidBits); i++)
+ p->mid[i] = kProbInitValue;
+ for (i = 0; i < kLenNumHighSymbols; i++)
+ p->high[i] = kProbInitValue;
+}
+
+static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState)
+{
+ if (symbol < kLenNumLowSymbols)
+ {
+ RangeEnc_EncodeBit(rc, &p->choice, 0);
+ RcTree_Encode(rc, p->low + (posState << kLenNumLowBits), kLenNumLowBits, symbol);
+ }
+ else
+ {
+ RangeEnc_EncodeBit(rc, &p->choice, 1);
+ if (symbol < kLenNumLowSymbols + kLenNumMidSymbols)
+ {
+ RangeEnc_EncodeBit(rc, &p->choice2, 0);
+ RcTree_Encode(rc, p->mid + (posState << kLenNumMidBits), kLenNumMidBits, symbol - kLenNumLowSymbols);
+ }
+ else
+ {
+ RangeEnc_EncodeBit(rc, &p->choice2, 1);
+ RcTree_Encode(rc, p->high, kLenNumHighBits, symbol - kLenNumLowSymbols - kLenNumMidSymbols);
+ }
+ }
+}
+
+static void LenEnc_SetPrices(CLenEnc *p, UInt32 posState, UInt32 numSymbols, UInt32 *prices, UInt32 *ProbPrices)
+{
+ UInt32 a0 = GET_PRICE_0a(p->choice);
+ UInt32 a1 = GET_PRICE_1a(p->choice);
+ UInt32 b0 = a1 + GET_PRICE_0a(p->choice2);
+ UInt32 b1 = a1 + GET_PRICE_1a(p->choice2);
+ UInt32 i = 0;
+ for (i = 0; i < kLenNumLowSymbols; i++)
+ {
+ if (i >= numSymbols)
+ return;
+ prices[i] = a0 + RcTree_GetPrice(p->low + (posState << kLenNumLowBits), kLenNumLowBits, i, ProbPrices);
+ }
+ for (; i < kLenNumLowSymbols + kLenNumMidSymbols; i++)
+ {
+ if (i >= numSymbols)
+ return;
+ prices[i] = b0 + RcTree_GetPrice(p->mid + (posState << kLenNumMidBits), kLenNumMidBits, i - kLenNumLowSymbols, ProbPrices);
+ }
+ for (; i < numSymbols; i++)
+ prices[i] = b1 + RcTree_GetPrice(p->high, kLenNumHighBits, i - kLenNumLowSymbols - kLenNumMidSymbols, ProbPrices);
+}
+
+static void MY_FAST_CALL LenPriceEnc_UpdateTable(CLenPriceEnc *p, UInt32 posState, UInt32 *ProbPrices)
+{
+ LenEnc_SetPrices(&p->p, posState, p->tableSize, p->prices[posState], ProbPrices);
+ p->counters[posState] = p->tableSize;
+}
+
+static void LenPriceEnc_UpdateTables(CLenPriceEnc *p, UInt32 numPosStates, UInt32 *ProbPrices)
+{
+ UInt32 posState;
+ for (posState = 0; posState < numPosStates; posState++)
+ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
+}
+
+static void LenEnc_Encode2(CLenPriceEnc *p, CRangeEnc *rc, UInt32 symbol, UInt32 posState, Bool updatePrice, UInt32 *ProbPrices)
+{
+ LenEnc_Encode(&p->p, rc, symbol, posState);
+ if (updatePrice)
+ if (--p->counters[posState] == 0)
+ LenPriceEnc_UpdateTable(p, posState, ProbPrices);
+}
+
+
+
+
+static void MovePos(CLzmaEnc *p, UInt32 num)
+{
+ #ifdef SHOW_STAT
+ ttt += num;
+ printf("\n MovePos %d", num);
+ #endif
+ if (num != 0)
+ {
+ p->additionalOffset += num;
+ p->matchFinder.Skip(p->matchFinderObj, num);
+ }
+}
+
+static UInt32 ReadMatchDistances(CLzmaEnc *p, UInt32 *numDistancePairsRes)
+{
+ UInt32 lenRes = 0, numPairs;
+ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
+ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
+ #ifdef SHOW_STAT
+ printf("\n i = %d numPairs = %d ", ttt, numPairs / 2);
+ ttt++;
+ {
+ UInt32 i;
+ for (i = 0; i < numPairs; i += 2)
+ printf("%2d %6d | ", p->matches[i], p->matches[i + 1]);
+ }
+ #endif
+ if (numPairs > 0)
+ {
+ lenRes = p->matches[numPairs - 2];
+ if (lenRes == p->numFastBytes)
+ {
+ const Byte *pby = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+ UInt32 distance = p->matches[numPairs - 1] + 1;
+ UInt32 numAvail = p->numAvail;
+ if (numAvail > LZMA_MATCH_LEN_MAX)
+ numAvail = LZMA_MATCH_LEN_MAX;
+ {
+ const Byte *pby2 = pby - distance;
+ for (; lenRes < numAvail && pby[lenRes] == pby2[lenRes]; lenRes++);
+ }
+ }
+ }
+ p->additionalOffset++;
+ *numDistancePairsRes = numPairs;
+ return lenRes;
+}
+
+
+#define MakeAsChar(p) (p)->backPrev = (UInt32)(-1); (p)->prev1IsChar = False;
+#define MakeAsShortRep(p) (p)->backPrev = 0; (p)->prev1IsChar = False;
+#define IsShortRep(p) ((p)->backPrev == 0)
+
+static UInt32 GetRepLen1Price(CLzmaEnc *p, UInt32 state, UInt32 posState)
+{
+ return
+ GET_PRICE_0(p->isRepG0[state]) +
+ GET_PRICE_0(p->isRep0Long[state][posState]);
+}
+
+static UInt32 GetPureRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 state, UInt32 posState)
+{
+ UInt32 price;
+ if (repIndex == 0)
+ {
+ price = GET_PRICE_0(p->isRepG0[state]);
+ price += GET_PRICE_1(p->isRep0Long[state][posState]);
+ }
+ else
+ {
+ price = GET_PRICE_1(p->isRepG0[state]);
+ if (repIndex == 1)
+ price += GET_PRICE_0(p->isRepG1[state]);
+ else
+ {
+ price += GET_PRICE_1(p->isRepG1[state]);
+ price += GET_PRICE(p->isRepG2[state], repIndex - 2);
+ }
+ }
+ return price;
+}
+
+static UInt32 GetRepPrice(CLzmaEnc *p, UInt32 repIndex, UInt32 len, UInt32 state, UInt32 posState)
+{
+ return p->repLenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN] +
+ GetPureRepPrice(p, repIndex, state, posState);
+}
+
+static UInt32 Backward(CLzmaEnc *p, UInt32 *backRes, UInt32 cur)
+{
+ UInt32 posMem = p->opt[cur].posPrev;
+ UInt32 backMem = p->opt[cur].backPrev;
+ p->optimumEndIndex = cur;
+ do
+ {
+ if (p->opt[cur].prev1IsChar)
+ {
+ MakeAsChar(&p->opt[posMem])
+ p->opt[posMem].posPrev = posMem - 1;
+ if (p->opt[cur].prev2)
+ {
+ p->opt[posMem - 1].prev1IsChar = False;
+ p->opt[posMem - 1].posPrev = p->opt[cur].posPrev2;
+ p->opt[posMem - 1].backPrev = p->opt[cur].backPrev2;
+ }
+ }
+ {
+ UInt32 posPrev = posMem;
+ UInt32 backCur = backMem;
+
+ backMem = p->opt[posPrev].backPrev;
+ posMem = p->opt[posPrev].posPrev;
+
+ p->opt[posPrev].backPrev = backCur;
+ p->opt[posPrev].posPrev = cur;
+ cur = posPrev;
+ }
+ }
+ while (cur != 0);
+ *backRes = p->opt[0].backPrev;
+ p->optimumCurrentIndex = p->opt[0].posPrev;
+ return p->optimumCurrentIndex;
+}
+
+#define LIT_PROBS(pos, prevByte) (p->litProbs + ((((pos) & p->lpMask) << p->lc) + ((prevByte) >> (8 - p->lc))) * 0x300)
+
+static UInt32 GetOptimum(CLzmaEnc *p, UInt32 position, UInt32 *backRes)
+{
+ UInt32 numAvail, mainLen, numPairs, repMaxIndex, i, posState, lenEnd, len, cur;
+ UInt32 matchPrice, repMatchPrice, normalMatchPrice;
+ UInt32 reps[LZMA_NUM_REPS], repLens[LZMA_NUM_REPS];
+ UInt32 *matches;
+ const Byte *data;
+ Byte curByte, matchByte;
+ if (p->optimumEndIndex != p->optimumCurrentIndex)
+ {
+ const COptimal *opt = &p->opt[p->optimumCurrentIndex];
+ UInt32 lenRes = opt->posPrev - p->optimumCurrentIndex;
+ *backRes = opt->backPrev;
+ p->optimumCurrentIndex = opt->posPrev;
+ return lenRes;
+ }
+ p->optimumCurrentIndex = p->optimumEndIndex = 0;
+
+ if (p->additionalOffset == 0)
+ mainLen = ReadMatchDistances(p, &numPairs);
+ else
+ {
+ mainLen = p->longestMatchLength;
+ numPairs = p->numPairs;
+ }
+
+ numAvail = p->numAvail;
+ if (numAvail < 2)
+ {
+ *backRes = (UInt32)(-1);
+ return 1;
+ }
+ if (numAvail > LZMA_MATCH_LEN_MAX)
+ numAvail = LZMA_MATCH_LEN_MAX;
+
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+ repMaxIndex = 0;
+ for (i = 0; i < LZMA_NUM_REPS; i++)
+ {
+ UInt32 lenTest;
+ const Byte *data2;
+ reps[i] = p->reps[i];
+ data2 = data - (reps[i] + 1);
+ if (data[0] != data2[0] || data[1] != data2[1])
+ {
+ repLens[i] = 0;
+ continue;
+ }
+ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
+ repLens[i] = lenTest;
+ if (lenTest > repLens[repMaxIndex])
+ repMaxIndex = i;
+ }
+ if (repLens[repMaxIndex] >= p->numFastBytes)
+ {
+ UInt32 lenRes;
+ *backRes = repMaxIndex;
+ lenRes = repLens[repMaxIndex];
+ MovePos(p, lenRes - 1);
+ return lenRes;
+ }
+
+ matches = p->matches;
+ if (mainLen >= p->numFastBytes)
+ {
+ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
+ MovePos(p, mainLen - 1);
+ return mainLen;
+ }
+ curByte = *data;
+ matchByte = *(data - (reps[0] + 1));
+
+ if (mainLen < 2 && curByte != matchByte && repLens[repMaxIndex] < 2)
+ {
+ *backRes = (UInt32)-1;
+ return 1;
+ }
+
+ p->opt[0].state = (CState)p->state;
+
+ posState = (position & p->pbMask);
+
+ {
+ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
+ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
+ (!IsCharState(p->state) ?
+ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
+ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
+ }
+
+ MakeAsChar(&p->opt[1]);
+
+ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
+ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
+
+ if (matchByte == curByte)
+ {
+ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, p->state, posState);
+ if (shortRepPrice < p->opt[1].price)
+ {
+ p->opt[1].price = shortRepPrice;
+ MakeAsShortRep(&p->opt[1]);
+ }
+ }
+ lenEnd = ((mainLen >= repLens[repMaxIndex]) ? mainLen : repLens[repMaxIndex]);
+
+ if (lenEnd < 2)
+ {
+ *backRes = p->opt[1].backPrev;
+ return 1;
+ }
+
+ p->opt[1].posPrev = 0;
+ for (i = 0; i < LZMA_NUM_REPS; i++)
+ p->opt[0].backs[i] = reps[i];
+
+ len = lenEnd;
+ do
+ p->opt[len--].price = kInfinityPrice;
+ while (len >= 2);
+
+ for (i = 0; i < LZMA_NUM_REPS; i++)
+ {
+ UInt32 repLen = repLens[i];
+ UInt32 price;
+ if (repLen < 2)
+ continue;
+ price = repMatchPrice + GetPureRepPrice(p, i, p->state, posState);
+ do
+ {
+ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][repLen - 2];
+ COptimal *opt = &p->opt[repLen];
+ if (curAndLenPrice < opt->price)
+ {
+ opt->price = curAndLenPrice;
+ opt->posPrev = 0;
+ opt->backPrev = i;
+ opt->prev1IsChar = False;
+ }
+ }
+ while (--repLen >= 2);
+ }
+
+ normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
+
+ len = ((repLens[0] >= 2) ? repLens[0] + 1 : 2);
+ if (len <= mainLen)
+ {
+ UInt32 offs = 0;
+ while (len > matches[offs])
+ offs += 2;
+ for (; ; len++)
+ {
+ COptimal *opt;
+ UInt32 distance = matches[offs + 1];
+
+ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][len - LZMA_MATCH_LEN_MIN];
+ UInt32 lenToPosState = GetLenToPosState(len);
+ if (distance < kNumFullDistances)
+ curAndLenPrice += p->distancesPrices[lenToPosState][distance];
+ else
+ {
+ UInt32 slot;
+ GetPosSlot2(distance, slot);
+ curAndLenPrice += p->alignPrices[distance & kAlignMask] + p->posSlotPrices[lenToPosState][slot];
+ }
+ opt = &p->opt[len];
+ if (curAndLenPrice < opt->price)
+ {
+ opt->price = curAndLenPrice;
+ opt->posPrev = 0;
+ opt->backPrev = distance + LZMA_NUM_REPS;
+ opt->prev1IsChar = False;
+ }
+ if (len == matches[offs])
+ {
+ offs += 2;
+ if (offs == numPairs)
+ break;
+ }
+ }
+ }
+
+ cur = 0;
+
+ #ifdef SHOW_STAT2
+ if (position >= 0)
+ {
+ unsigned i;
+ printf("\n pos = %4X", position);
+ for (i = cur; i <= lenEnd; i++)
+ printf("\nprice[%4X] = %d", position - cur + i, p->opt[i].price);
+ }
+ #endif
+
+ for (;;)
+ {
+ UInt32 numAvailFull, newLen, numPairs, posPrev, state, posState, startLen;
+ UInt32 curPrice, curAnd1Price, matchPrice, repMatchPrice;
+ Bool nextIsChar;
+ Byte curByte, matchByte;
+ const Byte *data;
+ COptimal *curOpt;
+ COptimal *nextOpt;
+
+ cur++;
+ if (cur == lenEnd)
+ return Backward(p, backRes, cur);
+
+ newLen = ReadMatchDistances(p, &numPairs);
+ if (newLen >= p->numFastBytes)
+ {
+ p->numPairs = numPairs;
+ p->longestMatchLength = newLen;
+ return Backward(p, backRes, cur);
+ }
+ position++;
+ curOpt = &p->opt[cur];
+ posPrev = curOpt->posPrev;
+ if (curOpt->prev1IsChar)
+ {
+ posPrev--;
+ if (curOpt->prev2)
+ {
+ state = p->opt[curOpt->posPrev2].state;
+ if (curOpt->backPrev2 < LZMA_NUM_REPS)
+ state = kRepNextStates[state];
+ else
+ state = kMatchNextStates[state];
+ }
+ else
+ state = p->opt[posPrev].state;
+ state = kLiteralNextStates[state];
+ }
+ else
+ state = p->opt[posPrev].state;
+ if (posPrev == cur - 1)
+ {
+ if (IsShortRep(curOpt))
+ state = kShortRepNextStates[state];
+ else
+ state = kLiteralNextStates[state];
+ }
+ else
+ {
+ UInt32 pos;
+ const COptimal *prevOpt;
+ if (curOpt->prev1IsChar && curOpt->prev2)
+ {
+ posPrev = curOpt->posPrev2;
+ pos = curOpt->backPrev2;
+ state = kRepNextStates[state];
+ }
+ else
+ {
+ pos = curOpt->backPrev;
+ if (pos < LZMA_NUM_REPS)
+ state = kRepNextStates[state];
+ else
+ state = kMatchNextStates[state];
+ }
+ prevOpt = &p->opt[posPrev];
+ if (pos < LZMA_NUM_REPS)
+ {
+ UInt32 i;
+ reps[0] = prevOpt->backs[pos];
+ for (i = 1; i <= pos; i++)
+ reps[i] = prevOpt->backs[i - 1];
+ for (; i < LZMA_NUM_REPS; i++)
+ reps[i] = prevOpt->backs[i];
+ }
+ else
+ {
+ UInt32 i;
+ reps[0] = (pos - LZMA_NUM_REPS);
+ for (i = 1; i < LZMA_NUM_REPS; i++)
+ reps[i] = prevOpt->backs[i - 1];
+ }
+ }
+ curOpt->state = (CState)state;
+
+ curOpt->backs[0] = reps[0];
+ curOpt->backs[1] = reps[1];
+ curOpt->backs[2] = reps[2];
+ curOpt->backs[3] = reps[3];
+
+ curPrice = curOpt->price;
+ nextIsChar = False;
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+ curByte = *data;
+ matchByte = *(data - (reps[0] + 1));
+
+ posState = (position & p->pbMask);
+
+ curAnd1Price = curPrice + GET_PRICE_0(p->isMatch[state][posState]);
+ {
+ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
+ curAnd1Price +=
+ (!IsCharState(state) ?
+ LitEnc_GetPriceMatched(probs, curByte, matchByte, p->ProbPrices) :
+ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
+ }
+
+ nextOpt = &p->opt[cur + 1];
+
+ if (curAnd1Price < nextOpt->price)
+ {
+ nextOpt->price = curAnd1Price;
+ nextOpt->posPrev = cur;
+ MakeAsChar(nextOpt);
+ nextIsChar = True;
+ }
+
+ matchPrice = curPrice + GET_PRICE_1(p->isMatch[state][posState]);
+ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
+
+ if (matchByte == curByte && !(nextOpt->posPrev < cur && nextOpt->backPrev == 0))
+ {
+ UInt32 shortRepPrice = repMatchPrice + GetRepLen1Price(p, state, posState);
+ if (shortRepPrice <= nextOpt->price)
+ {
+ nextOpt->price = shortRepPrice;
+ nextOpt->posPrev = cur;
+ MakeAsShortRep(nextOpt);
+ nextIsChar = True;
+ }
+ }
+ numAvailFull = p->numAvail;
+ {
+ UInt32 temp = kNumOpts - 1 - cur;
+ if (temp < numAvailFull)
+ numAvailFull = temp;
+ }
+
+ if (numAvailFull < 2)
+ continue;
+ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
+
+ if (!nextIsChar && matchByte != curByte) /* speed optimization */
+ {
+ /* try Literal + rep0 */
+ UInt32 temp;
+ UInt32 lenTest2;
+ const Byte *data2 = data - (reps[0] + 1);
+ UInt32 limit = p->numFastBytes + 1;
+ if (limit > numAvailFull)
+ limit = numAvailFull;
+
+ for (temp = 1; temp < limit && data[temp] == data2[temp]; temp++);
+ lenTest2 = temp - 1;
+ if (lenTest2 >= 2)
+ {
+ UInt32 state2 = kLiteralNextStates[state];
+ UInt32 posStateNext = (position + 1) & p->pbMask;
+ UInt32 nextRepMatchPrice = curAnd1Price +
+ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
+ GET_PRICE_1(p->isRep[state2]);
+ /* for (; lenTest2 >= 2; lenTest2--) */
+ {
+ UInt32 curAndLenPrice;
+ COptimal *opt;
+ UInt32 offset = cur + 1 + lenTest2;
+ while (lenEnd < offset)
+ p->opt[++lenEnd].price = kInfinityPrice;
+ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
+ opt = &p->opt[offset];
+ if (curAndLenPrice < opt->price)
+ {
+ opt->price = curAndLenPrice;
+ opt->posPrev = cur + 1;
+ opt->backPrev = 0;
+ opt->prev1IsChar = True;
+ opt->prev2 = False;
+ }
+ }
+ }
+ }
+
+ startLen = 2; /* speed optimization */
+ {
+ UInt32 repIndex;
+ for (repIndex = 0; repIndex < LZMA_NUM_REPS; repIndex++)
+ {
+ UInt32 lenTest;
+ UInt32 lenTestTemp;
+ UInt32 price;
+ const Byte *data2 = data - (reps[repIndex] + 1);
+ if (data[0] != data2[0] || data[1] != data2[1])
+ continue;
+ for (lenTest = 2; lenTest < numAvail && data[lenTest] == data2[lenTest]; lenTest++);
+ while (lenEnd < cur + lenTest)
+ p->opt[++lenEnd].price = kInfinityPrice;
+ lenTestTemp = lenTest;
+ price = repMatchPrice + GetPureRepPrice(p, repIndex, state, posState);
+ do
+ {
+ UInt32 curAndLenPrice = price + p->repLenEnc.prices[posState][lenTest - 2];
+ COptimal *opt = &p->opt[cur + lenTest];
+ if (curAndLenPrice < opt->price)
+ {
+ opt->price = curAndLenPrice;
+ opt->posPrev = cur;
+ opt->backPrev = repIndex;
+ opt->prev1IsChar = False;
+ }
+ }
+ while (--lenTest >= 2);
+ lenTest = lenTestTemp;
+
+ if (repIndex == 0)
+ startLen = lenTest + 1;
+
+ /* if (_maxMode) */
+ {
+ UInt32 lenTest2 = lenTest + 1;
+ UInt32 limit = lenTest2 + p->numFastBytes;
+ UInt32 nextRepMatchPrice;
+ if (limit > numAvailFull)
+ limit = numAvailFull;
+ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
+ lenTest2 -= lenTest + 1;
+ if (lenTest2 >= 2)
+ {
+ UInt32 state2 = kRepNextStates[state];
+ UInt32 posStateNext = (position + lenTest) & p->pbMask;
+ UInt32 curAndLenCharPrice =
+ price + p->repLenEnc.prices[posState][lenTest - 2] +
+ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
+ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
+ data[lenTest], data2[lenTest], p->ProbPrices);
+ state2 = kLiteralNextStates[state2];
+ posStateNext = (position + lenTest + 1) & p->pbMask;
+ nextRepMatchPrice = curAndLenCharPrice +
+ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
+ GET_PRICE_1(p->isRep[state2]);
+
+ /* for (; lenTest2 >= 2; lenTest2--) */
+ {
+ UInt32 curAndLenPrice;
+ COptimal *opt;
+ UInt32 offset = cur + lenTest + 1 + lenTest2;
+ while (lenEnd < offset)
+ p->opt[++lenEnd].price = kInfinityPrice;
+ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
+ opt = &p->opt[offset];
+ if (curAndLenPrice < opt->price)
+ {
+ opt->price = curAndLenPrice;
+ opt->posPrev = cur + lenTest + 1;
+ opt->backPrev = 0;
+ opt->prev1IsChar = True;
+ opt->prev2 = True;
+ opt->posPrev2 = cur;
+ opt->backPrev2 = repIndex;
+ }
+ }
+ }
+ }
+ }
+ }
+ /* for (UInt32 lenTest = 2; lenTest <= newLen; lenTest++) */
+ if (newLen > numAvail)
+ {
+ newLen = numAvail;
+ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
+ matches[numPairs] = newLen;
+ numPairs += 2;
+ }
+ if (newLen >= startLen)
+ {
+ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
+ UInt32 offs, curBack, posSlot;
+ UInt32 lenTest;
+ while (lenEnd < cur + newLen)
+ p->opt[++lenEnd].price = kInfinityPrice;
+
+ offs = 0;
+ while (startLen > matches[offs])
+ offs += 2;
+ curBack = matches[offs + 1];
+ GetPosSlot2(curBack, posSlot);
+ for (lenTest = /*2*/ startLen; ; lenTest++)
+ {
+ UInt32 curAndLenPrice = normalMatchPrice + p->lenEnc.prices[posState][lenTest - LZMA_MATCH_LEN_MIN];
+ UInt32 lenToPosState = GetLenToPosState(lenTest);
+ COptimal *opt;
+ if (curBack < kNumFullDistances)
+ curAndLenPrice += p->distancesPrices[lenToPosState][curBack];
+ else
+ curAndLenPrice += p->posSlotPrices[lenToPosState][posSlot] + p->alignPrices[curBack & kAlignMask];
+
+ opt = &p->opt[cur + lenTest];
+ if (curAndLenPrice < opt->price)
+ {
+ opt->price = curAndLenPrice;
+ opt->posPrev = cur;
+ opt->backPrev = curBack + LZMA_NUM_REPS;
+ opt->prev1IsChar = False;
+ }
+
+ if (/*_maxMode && */lenTest == matches[offs])
+ {
+ /* Try Match + Literal + Rep0 */
+ const Byte *data2 = data - (curBack + 1);
+ UInt32 lenTest2 = lenTest + 1;
+ UInt32 limit = lenTest2 + p->numFastBytes;
+ UInt32 nextRepMatchPrice;
+ if (limit > numAvailFull)
+ limit = numAvailFull;
+ for (; lenTest2 < limit && data[lenTest2] == data2[lenTest2]; lenTest2++);
+ lenTest2 -= lenTest + 1;
+ if (lenTest2 >= 2)
+ {
+ UInt32 state2 = kMatchNextStates[state];
+ UInt32 posStateNext = (position + lenTest) & p->pbMask;
+ UInt32 curAndLenCharPrice = curAndLenPrice +
+ GET_PRICE_0(p->isMatch[state2][posStateNext]) +
+ LitEnc_GetPriceMatched(LIT_PROBS(position + lenTest, data[lenTest - 1]),
+ data[lenTest], data2[lenTest], p->ProbPrices);
+ state2 = kLiteralNextStates[state2];
+ posStateNext = (posStateNext + 1) & p->pbMask;
+ nextRepMatchPrice = curAndLenCharPrice +
+ GET_PRICE_1(p->isMatch[state2][posStateNext]) +
+ GET_PRICE_1(p->isRep[state2]);
+
+ /* for (; lenTest2 >= 2; lenTest2--) */
+ {
+ UInt32 offset = cur + lenTest + 1 + lenTest2;
+ UInt32 curAndLenPrice;
+ COptimal *opt;
+ while (lenEnd < offset)
+ p->opt[++lenEnd].price = kInfinityPrice;
+ curAndLenPrice = nextRepMatchPrice + GetRepPrice(p, 0, lenTest2, state2, posStateNext);
+ opt = &p->opt[offset];
+ if (curAndLenPrice < opt->price)
+ {
+ opt->price = curAndLenPrice;
+ opt->posPrev = cur + lenTest + 1;
+ opt->backPrev = 0;
+ opt->prev1IsChar = True;
+ opt->prev2 = True;
+ opt->posPrev2 = cur;
+ opt->backPrev2 = curBack + LZMA_NUM_REPS;
+ }
+ }
+ }
+ offs += 2;
+ if (offs == numPairs)
+ break;
+ curBack = matches[offs + 1];
+ if (curBack >= kNumFullDistances)
+ GetPosSlot2(curBack, posSlot);
+ }
+ }
+ }
+ }
+}
+
+#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
+
+static UInt32 GetOptimumFast(CLzmaEnc *p, UInt32 *backRes)
+{
+ UInt32 numAvail, mainLen, mainDist, numPairs, repIndex, repLen, i;
+ const Byte *data;
+ const UInt32 *matches;
+
+ if (p->additionalOffset == 0)
+ mainLen = ReadMatchDistances(p, &numPairs);
+ else
+ {
+ mainLen = p->longestMatchLength;
+ numPairs = p->numPairs;
+ }
+
+ numAvail = p->numAvail;
+ *backRes = (UInt32)-1;
+ if (numAvail < 2)
+ return 1;
+ if (numAvail > LZMA_MATCH_LEN_MAX)
+ numAvail = LZMA_MATCH_LEN_MAX;
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+
+ repLen = repIndex = 0;
+ for (i = 0; i < LZMA_NUM_REPS; i++)
+ {
+ UInt32 len;
+ const Byte *data2 = data - (p->reps[i] + 1);
+ if (data[0] != data2[0] || data[1] != data2[1])
+ continue;
+ for (len = 2; len < numAvail && data[len] == data2[len]; len++);
+ if (len >= p->numFastBytes)
+ {
+ *backRes = i;
+ MovePos(p, len - 1);
+ return len;
+ }
+ if (len > repLen)
+ {
+ repIndex = i;
+ repLen = len;
+ }
+ }
+
+ matches = p->matches;
+ if (mainLen >= p->numFastBytes)
+ {
+ *backRes = matches[numPairs - 1] + LZMA_NUM_REPS;
+ MovePos(p, mainLen - 1);
+ return mainLen;
+ }
+
+ mainDist = 0; /* for GCC */
+ if (mainLen >= 2)
+ {
+ mainDist = matches[numPairs - 1];
+ while (numPairs > 2 && mainLen == matches[numPairs - 4] + 1)
+ {
+ if (!ChangePair(matches[numPairs - 3], mainDist))
+ break;
+ numPairs -= 2;
+ mainLen = matches[numPairs - 2];
+ mainDist = matches[numPairs - 1];
+ }
+ if (mainLen == 2 && mainDist >= 0x80)
+ mainLen = 1;
+ }
+
+ if (repLen >= 2 && (
+ (repLen + 1 >= mainLen) ||
+ (repLen + 2 >= mainLen && mainDist >= (1 << 9)) ||
+ (repLen + 3 >= mainLen && mainDist >= (1 << 15))))
+ {
+ *backRes = repIndex;
+ MovePos(p, repLen - 1);
+ return repLen;
+ }
+
+ if (mainLen < 2 || numAvail <= 2)
+ return 1;
+
+ p->longestMatchLength = ReadMatchDistances(p, &p->numPairs);
+ if (p->longestMatchLength >= 2)
+ {
+ UInt32 newDistance = matches[p->numPairs - 1];
+ if ((p->longestMatchLength >= mainLen && newDistance < mainDist) ||
+ (p->longestMatchLength == mainLen + 1 && !ChangePair(mainDist, newDistance)) ||
+ (p->longestMatchLength > mainLen + 1) ||
+ (p->longestMatchLength + 1 >= mainLen && mainLen >= 3 && ChangePair(newDistance, mainDist)))
+ return 1;
+ }
+
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+ for (i = 0; i < LZMA_NUM_REPS; i++)
+ {
+ UInt32 len, limit;
+ const Byte *data2 = data - (p->reps[i] + 1);
+ if (data[0] != data2[0] || data[1] != data2[1])
+ continue;
+ limit = mainLen - 1;
+ for (len = 2; len < limit && data[len] == data2[len]; len++);
+ if (len >= limit)
+ return 1;
+ }
+ *backRes = mainDist + LZMA_NUM_REPS;
+ MovePos(p, mainLen - 2);
+ return mainLen;
+}
+
+static void WriteEndMarker(CLzmaEnc *p, UInt32 posState)
+{
+ UInt32 len;
+ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
+ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
+ p->state = kMatchNextStates[p->state];
+ len = LZMA_MATCH_LEN_MIN;
+ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
+ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, (1 << kNumPosSlotBits) - 1);
+ RangeEnc_EncodeDirectBits(&p->rc, (((UInt32)1 << 30) - 1) >> kNumAlignBits, 30 - kNumAlignBits);
+ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
+}
+
+static SRes CheckErrors(CLzmaEnc *p)
+{
+ if (p->result != SZ_OK)
+ return p->result;
+ if (p->rc.res != SZ_OK)
+ p->result = SZ_ERROR_WRITE;
+ if (p->matchFinderBase.result != SZ_OK)
+ p->result = SZ_ERROR_READ;
+ if (p->result != SZ_OK)
+ p->finished = True;
+ return p->result;
+}
+
+static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
+{
+ /* ReleaseMFStream(); */
+ p->finished = True;
+ if (p->writeEndMark)
+ WriteEndMarker(p, nowPos & p->pbMask);
+ RangeEnc_FlushData(&p->rc);
+ RangeEnc_FlushStream(&p->rc);
+ return CheckErrors(p);
+}
+
+static void FillAlignPrices(CLzmaEnc *p)
+{
+ UInt32 i;
+ for (i = 0; i < kAlignTableSize; i++)
+ p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
+ p->alignPriceCount = 0;
+}
+
+static void FillDistancesPrices(CLzmaEnc *p)
+{
+ UInt32 tempPrices[kNumFullDistances];
+ UInt32 i, lenToPosState;
+ for (i = kStartPosModelIndex; i < kNumFullDistances; i++)
+ {
+ UInt32 posSlot = GetPosSlot1(i);
+ UInt32 footerBits = ((posSlot >> 1) - 1);
+ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
+ tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base - posSlot - 1, footerBits, i - base, p->ProbPrices);
+ }
+
+ for (lenToPosState = 0; lenToPosState < kNumLenToPosStates; lenToPosState++)
+ {
+ UInt32 posSlot;
+ const CLzmaProb *encoder = p->posSlotEncoder[lenToPosState];
+ UInt32 *posSlotPrices = p->posSlotPrices[lenToPosState];
+ for (posSlot = 0; posSlot < p->distTableSize; posSlot++)
+ posSlotPrices[posSlot] = RcTree_GetPrice(encoder, kNumPosSlotBits, posSlot, p->ProbPrices);
+ for (posSlot = kEndPosModelIndex; posSlot < p->distTableSize; posSlot++)
+ posSlotPrices[posSlot] += ((((posSlot >> 1) - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
+
+ {
+ UInt32 *distancesPrices = p->distancesPrices[lenToPosState];
+ UInt32 i;
+ for (i = 0; i < kStartPosModelIndex; i++)
+ distancesPrices[i] = posSlotPrices[i];
+ for (; i < kNumFullDistances; i++)
+ distancesPrices[i] = posSlotPrices[GetPosSlot1(i)] + tempPrices[i];
+ }
+ }
+ p->matchPriceCount = 0;
+}
+
+void LzmaEnc_Construct(CLzmaEnc *p)
+{
+ RangeEnc_Construct(&p->rc);
+ MatchFinder_Construct(&p->matchFinderBase);
+ #ifndef _7ZIP_ST
+ MatchFinderMt_Construct(&p->matchFinderMt);
+ p->matchFinderMt.MatchFinder = &p->matchFinderBase;
+ #endif
+
+ {
+ CLzmaEncProps props;
+ LzmaEncProps_Init(&props);
+ LzmaEnc_SetProps(p, &props);
+ }
+
+ #ifndef LZMA_LOG_BSR
+ LzmaEnc_FastPosInit(p->g_FastPos);
+ #endif
+
+ LzmaEnc_InitPriceTables(p->ProbPrices);
+ p->litProbs = 0;
+ p->saveState.litProbs = 0;
+}
+
+CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc)
+{
+ void *p;
+ p = alloc->Alloc(alloc, sizeof(CLzmaEnc));
+ if (p != 0)
+ LzmaEnc_Construct((CLzmaEnc *)p);
+ return p;
+}
+
+void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->litProbs);
+ alloc->Free(alloc, p->saveState.litProbs);
+ p->litProbs = 0;
+ p->saveState.litProbs = 0;
+}
+
+void LzmaEnc_Destruct(CLzmaEnc *p, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ #ifndef _7ZIP_ST
+ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
+ #endif
+ MatchFinder_Free(&p->matchFinderBase, allocBig);
+ LzmaEnc_FreeLits(p, alloc);
+ RangeEnc_Free(&p->rc, alloc);
+}
+
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
+ alloc->Free(alloc, p);
+}
+
+static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, Bool useLimits, UInt32 maxPackSize, UInt32 maxUnpackSize)
+{
+ UInt32 nowPos32, startPos32;
+ if (p->needInit)
+ {
+ p->matchFinder.Init(p->matchFinderObj);
+ p->needInit = 0;
+ }
+
+ if (p->finished)
+ return p->result;
+ RINOK(CheckErrors(p));
+
+ nowPos32 = (UInt32)p->nowPos64;
+ startPos32 = nowPos32;
+
+ if (p->nowPos64 == 0)
+ {
+ UInt32 numPairs;
+ Byte curByte;
+ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
+ return Flush(p, nowPos32);
+ ReadMatchDistances(p, &numPairs);
+ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][0], 0);
+ p->state = kLiteralNextStates[p->state];
+ curByte = p->matchFinder.GetIndexByte(p->matchFinderObj, 0 - p->additionalOffset);
+ LitEnc_Encode(&p->rc, p->litProbs, curByte);
+ p->additionalOffset--;
+ nowPos32++;
+ }
+
+ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
+ for (;;)
+ {
+ UInt32 pos, len, posState;
+
+ if (p->fastMode)
+ len = GetOptimumFast(p, &pos);
+ else
+ len = GetOptimum(p, nowPos32, &pos);
+
+ #ifdef SHOW_STAT2
+ printf("\n pos = %4X, len = %d pos = %d", nowPos32, len, pos);
+ #endif
+
+ posState = nowPos32 & p->pbMask;
+ if (len == 1 && pos == (UInt32)-1)
+ {
+ Byte curByte;
+ CLzmaProb *probs;
+ const Byte *data;
+
+ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 0);
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
+ curByte = *data;
+ probs = LIT_PROBS(nowPos32, *(data - 1));
+ if (IsCharState(p->state))
+ LitEnc_Encode(&p->rc, probs, curByte);
+ else
+ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0] - 1));
+ p->state = kLiteralNextStates[p->state];
+ }
+ else
+ {
+ RangeEnc_EncodeBit(&p->rc, &p->isMatch[p->state][posState], 1);
+ if (pos < LZMA_NUM_REPS)
+ {
+ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 1);
+ if (pos == 0)
+ {
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 0);
+ RangeEnc_EncodeBit(&p->rc, &p->isRep0Long[p->state][posState], ((len == 1) ? 0 : 1));
+ }
+ else
+ {
+ UInt32 distance = p->reps[pos];
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG0[p->state], 1);
+ if (pos == 1)
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 0);
+ else
+ {
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG1[p->state], 1);
+ RangeEnc_EncodeBit(&p->rc, &p->isRepG2[p->state], pos - 2);
+ if (pos == 3)
+ p->reps[3] = p->reps[2];
+ p->reps[2] = p->reps[1];
+ }
+ p->reps[1] = p->reps[0];
+ p->reps[0] = distance;
+ }
+ if (len == 1)
+ p->state = kShortRepNextStates[p->state];
+ else
+ {
+ LenEnc_Encode2(&p->repLenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
+ p->state = kRepNextStates[p->state];
+ }
+ }
+ else
+ {
+ UInt32 posSlot;
+ RangeEnc_EncodeBit(&p->rc, &p->isRep[p->state], 0);
+ p->state = kMatchNextStates[p->state];
+ LenEnc_Encode2(&p->lenEnc, &p->rc, len - LZMA_MATCH_LEN_MIN, posState, !p->fastMode, p->ProbPrices);
+ pos -= LZMA_NUM_REPS;
+ GetPosSlot(pos, posSlot);
+ RcTree_Encode(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], kNumPosSlotBits, posSlot);
+
+ if (posSlot >= kStartPosModelIndex)
+ {
+ UInt32 footerBits = ((posSlot >> 1) - 1);
+ UInt32 base = ((2 | (posSlot & 1)) << footerBits);
+ UInt32 posReduced = pos - base;
+
+ if (posSlot < kEndPosModelIndex)
+ RcTree_ReverseEncode(&p->rc, p->posEncoders + base - posSlot - 1, footerBits, posReduced);
+ else
+ {
+ RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
+ RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
+ p->alignPriceCount++;
+ }
+ }
+ p->reps[3] = p->reps[2];
+ p->reps[2] = p->reps[1];
+ p->reps[1] = p->reps[0];
+ p->reps[0] = pos;
+ p->matchPriceCount++;
+ }
+ }
+ p->additionalOffset -= len;
+ nowPos32 += len;
+ if (p->additionalOffset == 0)
+ {
+ UInt32 processed;
+ if (!p->fastMode)
+ {
+ if (p->matchPriceCount >= (1 << 7))
+ FillDistancesPrices(p);
+ if (p->alignPriceCount >= kAlignTableSize)
+ FillAlignPrices(p);
+ }
+ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
+ break;
+ processed = nowPos32 - startPos32;
+ if (useLimits)
+ {
+ if (processed + kNumOpts + 300 >= maxUnpackSize ||
+ RangeEnc_GetProcessed(&p->rc) + kNumOpts * 2 >= maxPackSize)
+ break;
+ }
+ else if (processed >= (1 << 15))
+ {
+ p->nowPos64 += nowPos32 - startPos32;
+ return CheckErrors(p);
+ }
+ }
+ }
+ p->nowPos64 += nowPos32 - startPos32;
+ return Flush(p, nowPos32);
+}
+
+#define kBigHashDicLimit ((UInt32)1 << 24)
+
+static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ UInt32 beforeSize = kNumOpts;
+ Bool btMode;
+ if (!RangeEnc_Alloc(&p->rc, alloc))
+ return SZ_ERROR_MEM;
+ btMode = (p->matchFinderBase.btMode != 0);
+ #ifndef _7ZIP_ST
+ p->mtMode = (p->multiThread && !p->fastMode && btMode);
+ #endif
+
+ {
+ unsigned lclp = p->lc + p->lp;
+ if (p->litProbs == 0 || p->saveState.litProbs == 0 || p->lclp != lclp)
+ {
+ LzmaEnc_FreeLits(p, alloc);
+ p->litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
+ p->saveState.litProbs = (CLzmaProb *)alloc->Alloc(alloc, (0x300 << lclp) * sizeof(CLzmaProb));
+ if (p->litProbs == 0 || p->saveState.litProbs == 0)
+ {
+ LzmaEnc_FreeLits(p, alloc);
+ return SZ_ERROR_MEM;
+ }
+ p->lclp = lclp;
+ }
+ }
+
+ p->matchFinderBase.bigHash = (p->dictSize > kBigHashDicLimit);
+
+ if (beforeSize + p->dictSize < keepWindowSize)
+ beforeSize = keepWindowSize - p->dictSize;
+
+ #ifndef _7ZIP_ST
+ if (p->mtMode)
+ {
+ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig));
+ p->matchFinderObj = &p->matchFinderMt;
+ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
+ }
+ else
+ #endif
+ {
+ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
+ return SZ_ERROR_MEM;
+ p->matchFinderObj = &p->matchFinderBase;
+ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
+ }
+ return SZ_OK;
+}
+
+void LzmaEnc_Init(CLzmaEnc *p)
+{
+ UInt32 i;
+ p->state = 0;
+ for (i = 0 ; i < LZMA_NUM_REPS; i++)
+ p->reps[i] = 0;
+
+ RangeEnc_Init(&p->rc);
+
+
+ for (i = 0; i < kNumStates; i++)
+ {
+ UInt32 j;
+ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
+ {
+ p->isMatch[i][j] = kProbInitValue;
+ p->isRep0Long[i][j] = kProbInitValue;
+ }
+ p->isRep[i] = kProbInitValue;
+ p->isRepG0[i] = kProbInitValue;
+ p->isRepG1[i] = kProbInitValue;
+ p->isRepG2[i] = kProbInitValue;
+ }
+
+ {
+ UInt32 num = 0x300 << (p->lp + p->lc);
+ for (i = 0; i < num; i++)
+ p->litProbs[i] = kProbInitValue;
+ }
+
+ {
+ for (i = 0; i < kNumLenToPosStates; i++)
+ {
+ CLzmaProb *probs = p->posSlotEncoder[i];
+ UInt32 j;
+ for (j = 0; j < (1 << kNumPosSlotBits); j++)
+ probs[j] = kProbInitValue;
+ }
+ }
+ {
+ for (i = 0; i < kNumFullDistances - kEndPosModelIndex; i++)
+ p->posEncoders[i] = kProbInitValue;
+ }
+
+ LenEnc_Init(&p->lenEnc.p);
+ LenEnc_Init(&p->repLenEnc.p);
+
+ for (i = 0; i < (1 << kNumAlignBits); i++)
+ p->posAlignEncoder[i] = kProbInitValue;
+
+ p->optimumEndIndex = 0;
+ p->optimumCurrentIndex = 0;
+ p->additionalOffset = 0;
+
+ p->pbMask = (1 << p->pb) - 1;
+ p->lpMask = (1 << p->lp) - 1;
+}
+
+void LzmaEnc_InitPrices(CLzmaEnc *p)
+{
+ if (!p->fastMode)
+ {
+ FillDistancesPrices(p);
+ FillAlignPrices(p);
+ }
+
+ p->lenEnc.tableSize =
+ p->repLenEnc.tableSize =
+ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
+ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, p->ProbPrices);
+ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, p->ProbPrices);
+}
+
+static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ UInt32 i;
+ for (i = 0; i < (UInt32)kDicLogSizeMaxCompress; i++)
+ if (p->dictSize <= ((UInt32)1 << i))
+ break;
+ p->distTableSize = i * 2;
+
+ p->finished = False;
+ p->result = SZ_OK;
+ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
+ LzmaEnc_Init(p);
+ LzmaEnc_InitPrices(p);
+ p->nowPos64 = 0;
+ return SZ_OK;
+}
+
+static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
+ ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ p->matchFinderBase.stream = inStream;
+ p->needInit = 1;
+ p->rc.outStream = outStream;
+ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
+}
+
+SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
+ ISeqInStream *inStream, UInt32 keepWindowSize,
+ ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ p->matchFinderBase.stream = inStream;
+ p->needInit = 1;
+ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
+}
+
+static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
+{
+ p->matchFinderBase.directInput = 1;
+ p->matchFinderBase.bufferBase = (Byte *)src;
+ p->matchFinderBase.directInputRem = srcLen;
+}
+
+SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
+ UInt32 keepWindowSize, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ LzmaEnc_SetInputBuf(p, src, srcLen);
+ p->needInit = 1;
+
+ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
+}
+
+void LzmaEnc_Finish(CLzmaEncHandle pp)
+{
+ #ifndef _7ZIP_ST
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ if (p->mtMode)
+ MatchFinderMt_ReleaseStream(&p->matchFinderMt);
+ #else
+ pp = pp;
+ #endif
+}
+
+typedef struct
+{
+ ISeqOutStream funcTable;
+ Byte *data;
+ SizeT rem;
+ Bool overflow;
+} CSeqOutStreamBuf;
+
+static size_t MyWrite(void *pp, const void *data, size_t size)
+{
+ CSeqOutStreamBuf *p = (CSeqOutStreamBuf *)pp;
+ if (p->rem < size)
+ {
+ size = p->rem;
+ p->overflow = True;
+ }
+ memcpy(p->data, data, size);
+ p->rem -= size;
+ p->data += size;
+ return size;
+}
+
+
+UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
+{
+ const CLzmaEnc *p = (CLzmaEnc *)pp;
+ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
+}
+
+const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
+{
+ const CLzmaEnc *p = (CLzmaEnc *)pp;
+ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
+}
+
+SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, Bool reInit,
+ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ UInt64 nowPos64;
+ SRes res;
+ CSeqOutStreamBuf outStream;
+
+ outStream.funcTable.Write = MyWrite;
+ outStream.data = dest;
+ outStream.rem = *destLen;
+ outStream.overflow = False;
+
+ p->writeEndMark = False;
+ p->finished = False;
+ p->result = SZ_OK;
+
+ if (reInit)
+ LzmaEnc_Init(p);
+ LzmaEnc_InitPrices(p);
+ nowPos64 = p->nowPos64;
+ RangeEnc_Init(&p->rc);
+ p->rc.outStream = &outStream.funcTable;
+
+ res = LzmaEnc_CodeOneBlock(p, True, desiredPackSize, *unpackSize);
+
+ *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
+ *destLen -= outStream.rem;
+ if (outStream.overflow)
+ return SZ_ERROR_OUTPUT_EOF;
+
+ return res;
+}
+
+static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
+{
+ SRes res = SZ_OK;
+
+ #ifndef _7ZIP_ST
+ Byte allocaDummy[0x300];
+ int i = 0;
+ for (i = 0; i < 16; i++)
+ allocaDummy[i] = (Byte)i;
+ #endif
+
+ for (;;)
+ {
+ res = LzmaEnc_CodeOneBlock(p, False, 0, 0);
+ if (res != SZ_OK || p->finished != 0)
+ break;
+ if (progress != 0)
+ {
+ res = progress->Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
+ if (res != SZ_OK)
+ {
+ res = SZ_ERROR_PROGRESS;
+ break;
+ }
+ }
+ }
+ LzmaEnc_Finish(p);
+ return res;
+}
+
+SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
+ ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig));
+ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress);
+}
+
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ int i;
+ UInt32 dictSize = p->dictSize;
+ if (*size < LZMA_PROPS_SIZE)
+ return SZ_ERROR_PARAM;
+ *size = LZMA_PROPS_SIZE;
+ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
+
+ for (i = 11; i <= 30; i++)
+ {
+ if (dictSize <= ((UInt32)2 << i))
+ {
+ dictSize = (2 << i);
+ break;
+ }
+ if (dictSize <= ((UInt32)3 << i))
+ {
+ dictSize = (3 << i);
+ break;
+ }
+ }
+
+ for (i = 0; i < 4; i++)
+ props[1 + i] = (Byte)(dictSize >> (8 * i));
+ return SZ_OK;
+}
+
+SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ SRes res;
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+
+ CSeqOutStreamBuf outStream;
+
+ LzmaEnc_SetInputBuf(p, src, srcLen);
+
+ outStream.funcTable.Write = MyWrite;
+ outStream.data = dest;
+ outStream.rem = *destLen;
+ outStream.overflow = False;
+
+ p->writeEndMark = writeEndMark;
+
+ p->rc.outStream = &outStream.funcTable;
+ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
+ if (res == SZ_OK)
+ res = LzmaEnc_Encode2(p, progress);
+
+ *destLen -= outStream.rem;
+ if (outStream.overflow)
+ return SZ_ERROR_OUTPUT_EOF;
+ return res;
+}
+
+SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
+ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig)
+{
+ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc);
+ SRes res;
+ if (p == 0)
+ return SZ_ERROR_MEM;
+
+ res = LzmaEnc_SetProps(p, props);
+ if (res == SZ_OK)
+ {
+ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
+ if (res == SZ_OK)
+ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
+ writeEndMark, progress, alloc, allocBig);
+ }
+
+ LzmaEnc_Destroy(p, alloc, allocBig);
+ return res;
+}
diff --git a/src/libs/7zip/unix/C/LzmaEnc.h b/src/libs/7zip/unix/C/LzmaEnc.h
new file mode 100644
index 000000000..200d60eb8
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzmaEnc.h
@@ -0,0 +1,80 @@
+/* LzmaEnc.h -- LZMA Encoder
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA_ENC_H
+#define __LZMA_ENC_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LZMA_PROPS_SIZE 5
+
+typedef struct _CLzmaEncProps
+{
+ int level; /* 0 <= level <= 9 */
+ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
+ (1 << 12) <= dictSize <= (1 << 30) for 64-bit version
+ default = (1 << 24) */
+ int lc; /* 0 <= lc <= 8, default = 3 */
+ int lp; /* 0 <= lp <= 4, default = 0 */
+ int pb; /* 0 <= pb <= 4, default = 2 */
+ int algo; /* 0 - fast, 1 - normal, default = 1 */
+ int fb; /* 5 <= fb <= 273, default = 32 */
+ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
+ int numHashBytes; /* 2, 3 or 4, default = 4 */
+ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
+ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
+ int numThreads; /* 1 or 2, default = 2 */
+} CLzmaEncProps;
+
+void LzmaEncProps_Init(CLzmaEncProps *p);
+void LzmaEncProps_Normalize(CLzmaEncProps *p);
+UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
+
+
+/* ---------- CLzmaEncHandle Interface ---------- */
+
+/* LzmaEnc_* functions can return the following exit codes:
+Returns:
+ SZ_OK - OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_PARAM - Incorrect paramater in props
+ SZ_ERROR_WRITE - Write callback error.
+ SZ_ERROR_PROGRESS - some break from progress callback
+ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+*/
+
+typedef void * CLzmaEncHandle;
+
+CLzmaEncHandle LzmaEnc_Create(ISzAlloc *alloc);
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAlloc *alloc, ISzAlloc *allocBig);
+SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
+SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
+ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ int writeEndMark, ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+
+/* ---------- One Call Interface ---------- */
+
+/* LzmaEncode
+Return code:
+ SZ_OK - OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_PARAM - Incorrect paramater
+ SZ_ERROR_OUTPUT_EOF - output buffer overflow
+ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+*/
+
+SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
+ ICompressProgress *progress, ISzAlloc *alloc, ISzAlloc *allocBig);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/LzmaUtil/Lzma86Dec.c b/src/libs/7zip/unix/C/LzmaUtil/Lzma86Dec.c
new file mode 100644
index 000000000..b801dd1ca
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzmaUtil/Lzma86Dec.c
@@ -0,0 +1,61 @@
+/* Lzma86Dec.c -- LZMA + x86 (BCJ) Filter Decoder
+2008-04-07
+Igor Pavlov
+Public domain */
+
+#include "Lzma86Dec.h"
+
+#include "../Alloc.h"
+#include "../Bra.h"
+#include "../LzmaDec.h"
+
+#define LZMA86_SIZE_OFFSET (1 + LZMA_PROPS_SIZE)
+#define LZMA86_HEADER_SIZE (LZMA86_SIZE_OFFSET + 8)
+
+static void *SzAlloc(void *p, size_t size) { p = p; return MyAlloc(size); }
+static void SzFree(void *p, void *address) { p = p; MyFree(address); }
+static ISzAlloc g_Alloc = { SzAlloc, SzFree };
+
+SRes Lzma86_GetUnpackSize(const Byte *src, SizeT srcLen, UInt64 *unpackSize)
+{
+ unsigned i;
+ if (srcLen < LZMA86_HEADER_SIZE)
+ return SZ_ERROR_INPUT_EOF;
+ *unpackSize = 0;
+ for (i = 0; i < sizeof(UInt64); i++)
+ *unpackSize += ((UInt64)src[LZMA86_SIZE_OFFSET + i]) << (8 * i);
+ return SZ_OK;
+}
+
+SRes Lzma86_Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen)
+{
+ SRes res;
+ int useFilter;
+ SizeT inSizePure;
+ ELzmaStatus status;
+
+ if (*srcLen < LZMA86_HEADER_SIZE)
+ return SZ_ERROR_INPUT_EOF;
+
+ useFilter = src[0];
+
+ if (useFilter > 1)
+ {
+ *destLen = 0;
+ return SZ_ERROR_UNSUPPORTED;
+ }
+
+ inSizePure = *srcLen - LZMA86_HEADER_SIZE;
+ res = LzmaDecode(dest, destLen, src + LZMA86_HEADER_SIZE, &inSizePure,
+ src + 1, LZMA_PROPS_SIZE, LZMA_FINISH_ANY, &status, &g_Alloc);
+ *srcLen = inSizePure + LZMA86_HEADER_SIZE;
+ if (res != SZ_OK)
+ return res;
+ if (useFilter == 1)
+ {
+ UInt32 x86State;
+ x86_Convert_Init(x86State);
+ x86_Convert(dest, *destLen, 0, &x86State, 0);
+ }
+ return SZ_OK;
+}
diff --git a/src/libs/7zip/unix/C/LzmaUtil/Lzma86Dec.h b/src/libs/7zip/unix/C/LzmaUtil/Lzma86Dec.h
new file mode 100644
index 000000000..138ce1ff7
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzmaUtil/Lzma86Dec.h
@@ -0,0 +1,51 @@
+/* Lzma86Dec.h -- LZMA + x86 (BCJ) Filter Decoder
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA86_DEC_H
+#define __LZMA86_DEC_H
+
+#include "../Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+Lzma86_GetUnpackSize:
+ In:
+ src - input data
+ srcLen - input data size
+ Out:
+ unpackSize - size of uncompressed stream
+ Return code:
+ SZ_OK - OK
+ SZ_ERROR_INPUT_EOF - Error in headers
+*/
+
+SRes Lzma86_GetUnpackSize(const Byte *src, SizeT srcLen, UInt64 *unpackSize);
+
+/*
+Lzma86_Decode:
+ In:
+ dest - output data
+ destLen - output data size
+ src - input data
+ srcLen - input data size
+ Out:
+ destLen - processed output size
+ srcLen - processed input size
+ Return code:
+ SZ_OK - OK
+ SZ_ERROR_DATA - Data error
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_UNSUPPORTED - unsupported file
+ SZ_ERROR_INPUT_EOF - it needs more bytes in input buffer
+*/
+
+SRes Lzma86_Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/LzmaUtil/Lzma86Enc.c b/src/libs/7zip/unix/C/LzmaUtil/Lzma86Enc.c
new file mode 100644
index 000000000..efc81ea35
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzmaUtil/Lzma86Enc.c
@@ -0,0 +1,113 @@
+/* Lzma86Enc.c -- LZMA + x86 (BCJ) Filter Encoder
+2008-08-05
+Igor Pavlov
+Public domain */
+
+#include <string.h>
+
+#include "Lzma86Enc.h"
+
+#include "../Alloc.h"
+#include "../Bra.h"
+#include "../LzmaEnc.h"
+
+#define SZE_OUT_OVERFLOW SZE_DATA_ERROR
+
+static void *SzAlloc(void *p, size_t size) { p = p; return MyAlloc(size); }
+static void SzFree(void *p, void *address) { p = p; MyFree(address); }
+static ISzAlloc g_Alloc = { SzAlloc, SzFree };
+
+#define LZMA86_SIZE_OFFSET (1 + LZMA_PROPS_SIZE)
+#define LZMA86_HEADER_SIZE (LZMA86_SIZE_OFFSET + 8)
+
+int Lzma86_Encode(Byte *dest, size_t *destLen, const Byte *src, size_t srcLen,
+ int level, UInt32 dictSize, int filterMode)
+{
+ size_t outSize2 = *destLen;
+ Byte *filteredStream;
+ Bool useFilter;
+ int mainResult = SZ_ERROR_OUTPUT_EOF;
+ CLzmaEncProps props;
+ LzmaEncProps_Init(&props);
+ props.level = level;
+ props.dictSize = dictSize;
+
+ *destLen = 0;
+ if (outSize2 < LZMA86_HEADER_SIZE)
+ return SZ_ERROR_OUTPUT_EOF;
+
+ {
+ int i;
+ UInt64 t = srcLen;
+ for (i = 0; i < 8; i++, t >>= 8)
+ dest[LZMA86_SIZE_OFFSET + i] = (Byte)t;
+ }
+
+ filteredStream = 0;
+ useFilter = (filterMode != SZ_FILTER_NO);
+ if (useFilter)
+ {
+ if (srcLen != 0)
+ {
+ filteredStream = (Byte *)MyAlloc(srcLen);
+ if (filteredStream == 0)
+ return SZ_ERROR_MEM;
+ memcpy(filteredStream, src, srcLen);
+ }
+ {
+ UInt32 x86State;
+ x86_Convert_Init(x86State);
+ x86_Convert(filteredStream, srcLen, 0, &x86State, 1);
+ }
+ }
+
+ {
+ size_t minSize = 0;
+ Bool bestIsFiltered = False;
+
+ /* passes for SZ_FILTER_AUTO:
+ 0 - BCJ + LZMA
+ 1 - LZMA
+ 2 - BCJ + LZMA agaian, if pass 0 (BCJ + LZMA) is better.
+ */
+ int numPasses = (filterMode == SZ_FILTER_AUTO) ? 3 : 1;
+
+ int i;
+ for (i = 0; i < numPasses; i++)
+ {
+ size_t outSizeProcessed = outSize2 - LZMA86_HEADER_SIZE;
+ size_t outPropsSize = 5;
+ SRes curRes;
+ Bool curModeIsFiltered = (numPasses > 1 && i == numPasses - 1);
+ if (curModeIsFiltered && !bestIsFiltered)
+ break;
+ if (useFilter && i == 0)
+ curModeIsFiltered = True;
+
+ curRes = LzmaEncode(dest + LZMA86_HEADER_SIZE, &outSizeProcessed,
+ curModeIsFiltered ? filteredStream : src, srcLen,
+ &props, dest + 1, &outPropsSize, 0,
+ NULL, &g_Alloc, &g_Alloc);
+
+ if (curRes != SZ_ERROR_OUTPUT_EOF)
+ {
+ if (curRes != SZ_OK)
+ {
+ mainResult = curRes;
+ break;
+ }
+ if (outSizeProcessed <= minSize || mainResult != SZ_OK)
+ {
+ minSize = outSizeProcessed;
+ bestIsFiltered = curModeIsFiltered;
+ mainResult = SZ_OK;
+ }
+ }
+ }
+ dest[0] = (bestIsFiltered ? 1 : 0);
+ *destLen = LZMA86_HEADER_SIZE + minSize;
+ }
+ if (useFilter)
+ MyFree(filteredStream);
+ return mainResult;
+}
diff --git a/src/libs/7zip/unix/C/LzmaUtil/Lzma86Enc.h b/src/libs/7zip/unix/C/LzmaUtil/Lzma86Enc.h
new file mode 100644
index 000000000..355bf343c
--- /dev/null
+++ b/src/libs/7zip/unix/C/LzmaUtil/Lzma86Enc.h
@@ -0,0 +1,78 @@
+/* Lzma86Enc.h -- LZMA + x86 (BCJ) Filter Encoder
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA86_ENC_H
+#define __LZMA86_ENC_H
+
+#include "../Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+It's an example for LZMA + x86 Filter use.
+You can use .lzma86 extension, if you write that stream to file.
+.lzma86 header adds one additional byte to standard .lzma header.
+.lzma86 header (14 bytes):
+ Offset Size Description
+ 0 1 = 0 - no filter,
+ = 1 - x86 filter
+ 1 1 lc, lp and pb in encoded form
+ 2 4 dictSize (little endian)
+ 6 8 uncompressed size (little endian)
+
+
+Lzma86_Encode
+-------------
+level - compression level: 0 <= level <= 9, the default value for "level" is 5.
+
+
+dictSize - The dictionary size in bytes. The maximum value is
+ 128 MB = (1 << 27) bytes for 32-bit version
+ 1 GB = (1 << 30) bytes for 64-bit version
+ The default value is 16 MB = (1 << 24) bytes, for level = 5.
+ It's recommended to use the dictionary that is larger than 4 KB and
+ that can be calculated as (1 << N) or (3 << N) sizes.
+ For better compression ratio dictSize must be >= inSize.
+
+filterMode:
+ SZ_FILTER_NO - no Filter
+ SZ_FILTER_YES - x86 Filter
+ SZ_FILTER_AUTO - it tries both alternatives to select best.
+ Encoder will use 2 or 3 passes:
+ 2 passes when FILTER_NO provides better compression.
+ 3 passes when FILTER_YES provides better compression.
+
+Lzma86Encode allocates Data with MyAlloc functions.
+RAM Requirements for compressing:
+ RamSize = dictionarySize * 11.5 + 6MB + FilterBlockSize
+ filterMode FilterBlockSize
+ SZ_FILTER_NO 0
+ SZ_FILTER_YES inSize
+ SZ_FILTER_AUTO inSize
+
+
+Return code:
+ SZ_OK - OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_PARAM - Incorrect paramater
+ SZ_ERROR_OUTPUT_EOF - output buffer overflow
+ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+*/
+
+enum ESzFilterMode
+{
+ SZ_FILTER_NO,
+ SZ_FILTER_YES,
+ SZ_FILTER_AUTO
+};
+
+SRes Lzma86_Encode(Byte *dest, size_t *destLen, const Byte *src, size_t srcLen,
+ int level, UInt32 dictSize, int filterMode);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/MtCoder.c b/src/libs/7zip/unix/C/MtCoder.c
new file mode 100644
index 000000000..946fbbc70
--- /dev/null
+++ b/src/libs/7zip/unix/C/MtCoder.c
@@ -0,0 +1,327 @@
+/* MtCoder.c -- Multi-thread Coder
+2010-09-24 : Igor Pavlov : Public domain */
+
+#include <stdio.h>
+
+#include "MtCoder.h"
+
+void LoopThread_Construct(CLoopThread *p)
+{
+ Thread_Construct(&p->thread);
+ Event_Construct(&p->startEvent);
+ Event_Construct(&p->finishedEvent);
+}
+
+void LoopThread_Close(CLoopThread *p)
+{
+ Thread_Close(&p->thread);
+ Event_Close(&p->startEvent);
+ Event_Close(&p->finishedEvent);
+}
+
+static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE LoopThreadFunc(void *pp)
+{
+ CLoopThread *p = (CLoopThread *)pp;
+ for (;;)
+ {
+ if (Event_Wait(&p->startEvent) != 0)
+ return SZ_ERROR_THREAD;
+ if (p->stop)
+ return 0;
+ p->res = p->func(p->param);
+ if (Event_Set(&p->finishedEvent) != 0)
+ return SZ_ERROR_THREAD;
+ }
+}
+
+WRes LoopThread_Create(CLoopThread *p)
+{
+ p->stop = 0;
+ RINOK(AutoResetEvent_CreateNotSignaled(&p->startEvent));
+ RINOK(AutoResetEvent_CreateNotSignaled(&p->finishedEvent));
+ return Thread_Create(&p->thread, LoopThreadFunc, p);
+}
+
+WRes LoopThread_StopAndWait(CLoopThread *p)
+{
+ p->stop = 1;
+ if (Event_Set(&p->startEvent) != 0)
+ return SZ_ERROR_THREAD;
+ return Thread_Wait(&p->thread);
+}
+
+WRes LoopThread_StartSubThread(CLoopThread *p) { return Event_Set(&p->startEvent); }
+WRes LoopThread_WaitSubThread(CLoopThread *p) { return Event_Wait(&p->finishedEvent); }
+
+static SRes Progress(ICompressProgress *p, UInt64 inSize, UInt64 outSize)
+{
+ return (p && p->Progress(p, inSize, outSize) != SZ_OK) ? SZ_ERROR_PROGRESS : SZ_OK;
+}
+
+static void MtProgress_Init(CMtProgress *p, ICompressProgress *progress)
+{
+ unsigned i;
+ for (i = 0; i < NUM_MT_CODER_THREADS_MAX; i++)
+ p->inSizes[i] = p->outSizes[i] = 0;
+ p->totalInSize = p->totalOutSize = 0;
+ p->progress = progress;
+ p->res = SZ_OK;
+}
+
+static void MtProgress_Reinit(CMtProgress *p, unsigned index)
+{
+ p->inSizes[index] = 0;
+ p->outSizes[index] = 0;
+}
+
+#define UPDATE_PROGRESS(size, prev, total) \
+ if (size != (UInt64)(Int64)-1) { total += size - prev; prev = size; }
+
+SRes MtProgress_Set(CMtProgress *p, unsigned index, UInt64 inSize, UInt64 outSize)
+{
+ SRes res;
+ CriticalSection_Enter(&p->cs);
+ UPDATE_PROGRESS(inSize, p->inSizes[index], p->totalInSize)
+ UPDATE_PROGRESS(outSize, p->outSizes[index], p->totalOutSize)
+ if (p->res == SZ_OK)
+ p->res = Progress(p->progress, p->totalInSize, p->totalOutSize);
+ res = p->res;
+ CriticalSection_Leave(&p->cs);
+ return res;
+}
+
+static void MtProgress_SetError(CMtProgress *p, SRes res)
+{
+ CriticalSection_Enter(&p->cs);
+ if (p->res == SZ_OK)
+ p->res = res;
+ CriticalSection_Leave(&p->cs);
+}
+
+static void MtCoder_SetError(CMtCoder* p, SRes res)
+{
+ CriticalSection_Enter(&p->cs);
+ if (p->res == SZ_OK)
+ p->res = res;
+ CriticalSection_Leave(&p->cs);
+}
+
+/* ---------- MtThread ---------- */
+
+void CMtThread_Construct(CMtThread *p, CMtCoder *mtCoder)
+{
+ p->mtCoder = mtCoder;
+ p->outBuf = 0;
+ p->inBuf = 0;
+ Event_Construct(&p->canRead);
+ Event_Construct(&p->canWrite);
+ LoopThread_Construct(&p->thread);
+}
+
+#define RINOK_THREAD(x) { if((x) != 0) return SZ_ERROR_THREAD; }
+
+static void CMtThread_CloseEvents(CMtThread *p)
+{
+ Event_Close(&p->canRead);
+ Event_Close(&p->canWrite);
+}
+
+static void CMtThread_Destruct(CMtThread *p)
+{
+ CMtThread_CloseEvents(p);
+
+ if (Thread_WasCreated(&p->thread.thread))
+ {
+ LoopThread_StopAndWait(&p->thread);
+ LoopThread_Close(&p->thread);
+ }
+
+ if (p->mtCoder->alloc)
+ IAlloc_Free(p->mtCoder->alloc, p->outBuf);
+ p->outBuf = 0;
+
+ if (p->mtCoder->alloc)
+ IAlloc_Free(p->mtCoder->alloc, p->inBuf);
+ p->inBuf = 0;
+}
+
+#define MY_BUF_ALLOC(buf, size, newSize) \
+ if (buf == 0 || size != newSize) \
+ { IAlloc_Free(p->mtCoder->alloc, buf); \
+ size = newSize; buf = (Byte *)IAlloc_Alloc(p->mtCoder->alloc, size); \
+ if (buf == 0) return SZ_ERROR_MEM; }
+
+static SRes CMtThread_Prepare(CMtThread *p)
+{
+ MY_BUF_ALLOC(p->inBuf, p->inBufSize, p->mtCoder->blockSize)
+ MY_BUF_ALLOC(p->outBuf, p->outBufSize, p->mtCoder->destBlockSize)
+
+ p->stopReading = False;
+ p->stopWriting = False;
+ RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->canRead));
+ RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->canWrite));
+
+ return SZ_OK;
+}
+
+static SRes FullRead(ISeqInStream *stream, Byte *data, size_t *processedSize)
+{
+ size_t size = *processedSize;
+ *processedSize = 0;
+ while (size != 0)
+ {
+ size_t curSize = size;
+ SRes res = stream->Read(stream, data, &curSize);
+ *processedSize += curSize;
+ data += curSize;
+ size -= curSize;
+ RINOK(res);
+ if (curSize == 0)
+ return SZ_OK;
+ }
+ return SZ_OK;
+}
+
+#define GET_NEXT_THREAD(p) &p->mtCoder->threads[p->index == p->mtCoder->numThreads - 1 ? 0 : p->index + 1]
+
+static SRes MtThread_Process(CMtThread *p, Bool *stop)
+{
+ CMtThread *next;
+ *stop = True;
+ if (Event_Wait(&p->canRead) != 0)
+ return SZ_ERROR_THREAD;
+
+ next = GET_NEXT_THREAD(p);
+
+ if (p->stopReading)
+ {
+ next->stopReading = True;
+ return Event_Set(&next->canRead) == 0 ? SZ_OK : SZ_ERROR_THREAD;
+ }
+
+ {
+ size_t size = p->mtCoder->blockSize;
+ size_t destSize = p->outBufSize;
+
+ RINOK(FullRead(p->mtCoder->inStream, p->inBuf, &size));
+ next->stopReading = *stop = (size != p->mtCoder->blockSize);
+ if (Event_Set(&next->canRead) != 0)
+ return SZ_ERROR_THREAD;
+
+ RINOK(p->mtCoder->mtCallback->Code(p->mtCoder->mtCallback, p->index,
+ p->outBuf, &destSize, p->inBuf, size, *stop));
+
+ MtProgress_Reinit(&p->mtCoder->mtProgress, p->index);
+
+ if (Event_Wait(&p->canWrite) != 0)
+ return SZ_ERROR_THREAD;
+ if (p->stopWriting)
+ return SZ_ERROR_FAIL;
+ if (p->mtCoder->outStream->Write(p->mtCoder->outStream, p->outBuf, destSize) != destSize)
+ return SZ_ERROR_WRITE;
+ return Event_Set(&next->canWrite) == 0 ? SZ_OK : SZ_ERROR_THREAD;
+ }
+}
+
+static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc(void *pp)
+{
+ CMtThread *p = (CMtThread *)pp;
+ for (;;)
+ {
+ Bool stop;
+ CMtThread *next = GET_NEXT_THREAD(p);
+ SRes res = MtThread_Process(p, &stop);
+ if (res != SZ_OK)
+ {
+ MtCoder_SetError(p->mtCoder, res);
+ MtProgress_SetError(&p->mtCoder->mtProgress, res);
+ next->stopReading = True;
+ next->stopWriting = True;
+ Event_Set(&next->canRead);
+ Event_Set(&next->canWrite);
+ return res;
+ }
+ if (stop)
+ return 0;
+ }
+}
+
+void MtCoder_Construct(CMtCoder* p)
+{
+ unsigned i;
+ p->alloc = 0;
+ for (i = 0; i < NUM_MT_CODER_THREADS_MAX; i++)
+ {
+ CMtThread *t = &p->threads[i];
+ t->index = i;
+ CMtThread_Construct(t, p);
+ }
+ CriticalSection_Init(&p->cs);
+ CriticalSection_Init(&p->mtProgress.cs);
+}
+
+void MtCoder_Destruct(CMtCoder* p)
+{
+ unsigned i;
+ for (i = 0; i < NUM_MT_CODER_THREADS_MAX; i++)
+ CMtThread_Destruct(&p->threads[i]);
+ CriticalSection_Delete(&p->cs);
+ CriticalSection_Delete(&p->mtProgress.cs);
+}
+
+SRes MtCoder_Code(CMtCoder *p)
+{
+ unsigned i, numThreads = p->numThreads;
+ SRes res = SZ_OK;
+ p->res = SZ_OK;
+
+ MtProgress_Init(&p->mtProgress, p->progress);
+
+ for (i = 0; i < numThreads; i++)
+ {
+ RINOK(CMtThread_Prepare(&p->threads[i]));
+ }
+
+ for (i = 0; i < numThreads; i++)
+ {
+ CMtThread *t = &p->threads[i];
+ CLoopThread *lt = &t->thread;
+
+ if (!Thread_WasCreated(&lt->thread))
+ {
+ lt->func = ThreadFunc;
+ lt->param = t;
+
+ if (LoopThread_Create(lt) != SZ_OK)
+ {
+ res = SZ_ERROR_THREAD;
+ break;
+ }
+ }
+ }
+
+ if (res == SZ_OK)
+ {
+ unsigned j;
+ for (i = 0; i < numThreads; i++)
+ {
+ CMtThread *t = &p->threads[i];
+ if (LoopThread_StartSubThread(&t->thread) != SZ_OK)
+ {
+ res = SZ_ERROR_THREAD;
+ p->threads[0].stopReading = True;
+ break;
+ }
+ }
+
+ Event_Set(&p->threads[0].canWrite);
+ Event_Set(&p->threads[0].canRead);
+
+ for (j = 0; j < i; j++)
+ LoopThread_WaitSubThread(&p->threads[j].thread);
+ }
+
+ for (i = 0; i < numThreads; i++)
+ CMtThread_CloseEvents(&p->threads[i]);
+ return (res == SZ_OK) ? p->res : res;
+}
diff --git a/src/libs/7zip/unix/C/MtCoder.h b/src/libs/7zip/unix/C/MtCoder.h
new file mode 100644
index 000000000..f0f06da28
--- /dev/null
+++ b/src/libs/7zip/unix/C/MtCoder.h
@@ -0,0 +1,98 @@
+/* MtCoder.h -- Multi-thread Coder
+2009-11-19 : Igor Pavlov : Public domain */
+
+#ifndef __MT_CODER_H
+#define __MT_CODER_H
+
+#include "Threads.h"
+
+EXTERN_C_BEGIN
+
+typedef struct
+{
+ CThread thread;
+ CAutoResetEvent startEvent;
+ CAutoResetEvent finishedEvent;
+ int stop;
+
+ THREAD_FUNC_TYPE func;
+ LPVOID param;
+ THREAD_FUNC_RET_TYPE res;
+} CLoopThread;
+
+void LoopThread_Construct(CLoopThread *p);
+void LoopThread_Close(CLoopThread *p);
+WRes LoopThread_Create(CLoopThread *p);
+WRes LoopThread_StopAndWait(CLoopThread *p);
+WRes LoopThread_StartSubThread(CLoopThread *p);
+WRes LoopThread_WaitSubThread(CLoopThread *p);
+
+#ifndef _7ZIP_ST
+#define NUM_MT_CODER_THREADS_MAX 32
+#else
+#define NUM_MT_CODER_THREADS_MAX 1
+#endif
+
+typedef struct
+{
+ UInt64 totalInSize;
+ UInt64 totalOutSize;
+ ICompressProgress *progress;
+ SRes res;
+ CCriticalSection cs;
+ UInt64 inSizes[NUM_MT_CODER_THREADS_MAX];
+ UInt64 outSizes[NUM_MT_CODER_THREADS_MAX];
+} CMtProgress;
+
+SRes MtProgress_Set(CMtProgress *p, unsigned index, UInt64 inSize, UInt64 outSize);
+
+struct _CMtCoder;
+
+typedef struct
+{
+ struct _CMtCoder *mtCoder;
+ Byte *outBuf;
+ size_t outBufSize;
+ Byte *inBuf;
+ size_t inBufSize;
+ unsigned index;
+ CLoopThread thread;
+
+ Bool stopReading;
+ Bool stopWriting;
+ CAutoResetEvent canRead;
+ CAutoResetEvent canWrite;
+} CMtThread;
+
+typedef struct
+{
+ SRes (*Code)(void *p, unsigned index, Byte *dest, size_t *destSize,
+ const Byte *src, size_t srcSize, int finished);
+} IMtCoderCallback;
+
+typedef struct _CMtCoder
+{
+ size_t blockSize;
+ size_t destBlockSize;
+ unsigned numThreads;
+
+ ISeqInStream *inStream;
+ ISeqOutStream *outStream;
+ ICompressProgress *progress;
+ ISzAlloc *alloc;
+
+ IMtCoderCallback *mtCallback;
+ CCriticalSection cs;
+ SRes res;
+
+ CMtProgress mtProgress;
+ CMtThread threads[NUM_MT_CODER_THREADS_MAX];
+} CMtCoder;
+
+void MtCoder_Construct(CMtCoder* p);
+void MtCoder_Destruct(CMtCoder* p);
+SRes MtCoder_Code(CMtCoder *p);
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/Ppmd.h b/src/libs/7zip/unix/C/Ppmd.h
new file mode 100644
index 000000000..621d92703
--- /dev/null
+++ b/src/libs/7zip/unix/C/Ppmd.h
@@ -0,0 +1,85 @@
+/* Ppmd.h -- PPMD codec common code
+2010-03-12 : Igor Pavlov : Public domain
+This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
+
+#ifndef __PPMD_H
+#define __PPMD_H
+
+#include "Types.h"
+#include "CpuArch.h"
+
+EXTERN_C_BEGIN
+
+#ifdef MY_CPU_32BIT
+ #define PPMD_32BIT
+#endif
+
+#define PPMD_INT_BITS 7
+#define PPMD_PERIOD_BITS 7
+#define PPMD_BIN_SCALE (1 << (PPMD_INT_BITS + PPMD_PERIOD_BITS))
+
+#define PPMD_GET_MEAN_SPEC(summ, shift, round) (((summ) + (1 << ((shift) - (round)))) >> (shift))
+#define PPMD_GET_MEAN(summ) PPMD_GET_MEAN_SPEC((summ), PPMD_PERIOD_BITS, 2)
+#define PPMD_UPDATE_PROB_0(prob) ((prob) + (1 << PPMD_INT_BITS) - PPMD_GET_MEAN(prob))
+#define PPMD_UPDATE_PROB_1(prob) ((prob) - PPMD_GET_MEAN(prob))
+
+#define PPMD_N1 4
+#define PPMD_N2 4
+#define PPMD_N3 4
+#define PPMD_N4 ((128 + 3 - 1 * PPMD_N1 - 2 * PPMD_N2 - 3 * PPMD_N3) / 4)
+#define PPMD_NUM_INDEXES (PPMD_N1 + PPMD_N2 + PPMD_N3 + PPMD_N4)
+
+#pragma pack(push,1)
+
+/* SEE-contexts for PPM-contexts with masked symbols */
+typedef struct
+{
+ UInt16 Summ; /* Freq */
+ Byte Shift; /* Speed of Freq change; low Shift is for fast change */
+ Byte Count; /* Count to next change of Shift */
+} CPpmd_See;
+
+#define Ppmd_See_Update(p) if ((p)->Shift < PPMD_PERIOD_BITS && --(p)->Count == 0) \
+ { (p)->Summ <<= 1; (p)->Count = (Byte)(3 << (p)->Shift++); }
+
+typedef struct
+{
+ Byte Symbol;
+ Byte Freq;
+ UInt16 SuccessorLow;
+ UInt16 SuccessorHigh;
+} CPpmd_State;
+
+#pragma pack(pop)
+
+typedef
+ #ifdef PPMD_32BIT
+ CPpmd_State *
+ #else
+ UInt32
+ #endif
+ CPpmd_State_Ref;
+
+typedef
+ #ifdef PPMD_32BIT
+ void *
+ #else
+ UInt32
+ #endif
+ CPpmd_Void_Ref;
+
+typedef
+ #ifdef PPMD_32BIT
+ Byte *
+ #else
+ UInt32
+ #endif
+ CPpmd_Byte_Ref;
+
+#define PPMD_SetAllBitsIn256Bytes(p) \
+ { unsigned i; for (i = 0; i < 256 / sizeof(p[0]); i += 8) { \
+ p[i+7] = p[i+6] = p[i+5] = p[i+4] = p[i+3] = p[i+2] = p[i+1] = p[i+0] = ~(size_t)0; }}
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/Ppmd7.c b/src/libs/7zip/unix/C/Ppmd7.c
new file mode 100644
index 000000000..060d86d2e
--- /dev/null
+++ b/src/libs/7zip/unix/C/Ppmd7.c
@@ -0,0 +1,708 @@
+/* Ppmd7.c -- PPMdH codec
+2010-03-12 : Igor Pavlov : Public domain
+This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
+
+#include <memory.h>
+
+#include "Ppmd7.h"
+
+const Byte PPMD7_kExpEscape[16] = { 25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 };
+static const UInt16 kInitBinEsc[] = { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051};
+
+#define MAX_FREQ 124
+#define UNIT_SIZE 12
+
+#define U2B(nu) ((UInt32)(nu) * UNIT_SIZE)
+#define U2I(nu) (p->Units2Indx[(nu) - 1])
+#define I2U(indx) (p->Indx2Units[indx])
+
+#ifdef PPMD_32BIT
+ #define REF(ptr) (ptr)
+#else
+ #define REF(ptr) ((UInt32)((Byte *)(ptr) - (p)->Base))
+#endif
+
+#define STATS_REF(ptr) ((CPpmd_State_Ref)REF(ptr))
+
+#define CTX(ref) ((CPpmd7_Context *)Ppmd7_GetContext(p, ref))
+#define STATS(ctx) Ppmd7_GetStats(p, ctx)
+#define ONE_STATE(ctx) Ppmd7Context_OneState(ctx)
+#define SUFFIX(ctx) CTX((ctx)->Suffix)
+
+typedef CPpmd7_Context * CTX_PTR;
+
+struct CPpmd7_Node_;
+
+typedef
+ #ifdef PPMD_32BIT
+ struct CPpmd7_Node_ *
+ #else
+ UInt32
+ #endif
+ CPpmd7_Node_Ref;
+
+typedef struct CPpmd7_Node_
+{
+ UInt16 Stamp; /* must be at offset 0 as CPpmd7_Context::NumStats. Stamp=0 means free */
+ UInt16 NU;
+ CPpmd7_Node_Ref Next; /* must be at offset >= 4 */
+ CPpmd7_Node_Ref Prev;
+} CPpmd7_Node;
+
+#ifdef PPMD_32BIT
+ #define NODE(ptr) (ptr)
+#else
+ #define NODE(offs) ((CPpmd7_Node *)(p->Base + (offs)))
+#endif
+
+void Ppmd7_Construct(CPpmd7 *p)
+{
+ unsigned i, k, m;
+
+ p->Base = 0;
+
+ for (i = 0, k = 0; i < PPMD_NUM_INDEXES; i++)
+ {
+ unsigned step = (i >= 12 ? 4 : (i >> 2) + 1);
+ do { p->Units2Indx[k++] = (Byte)i; } while(--step);
+ p->Indx2Units[i] = (Byte)k;
+ }
+
+ p->NS2BSIndx[0] = (0 << 1);
+ p->NS2BSIndx[1] = (1 << 1);
+ memset(p->NS2BSIndx + 2, (2 << 1), 9);
+ memset(p->NS2BSIndx + 11, (3 << 1), 256 - 11);
+
+ for (i = 0; i < 3; i++)
+ p->NS2Indx[i] = (Byte)i;
+ for (m = i, k = 1; i < 256; i++)
+ {
+ p->NS2Indx[i] = (Byte)m;
+ if (--k == 0)
+ k = (++m) - 2;
+ }
+
+ memset(p->HB2Flag, 0, 0x40);
+ memset(p->HB2Flag + 0x40, 8, 0x100 - 0x40);
+}
+
+void Ppmd7_Free(CPpmd7 *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->Base);
+ p->Size = 0;
+ p->Base = 0;
+}
+
+Bool Ppmd7_Alloc(CPpmd7 *p, UInt32 size, ISzAlloc *alloc)
+{
+ if (p->Base == 0 || p->Size != size)
+ {
+ Ppmd7_Free(p, alloc);
+ p->AlignOffset =
+ #ifdef PPMD_32BIT
+ (4 - size) & 3;
+ #else
+ 4 - (size & 3);
+ #endif
+ if ((p->Base = (Byte *)alloc->Alloc(alloc, p->AlignOffset + size
+ #ifndef PPMD_32BIT
+ + UNIT_SIZE
+ #endif
+ )) == 0)
+ return False;
+ p->Size = size;
+ }
+ return True;
+}
+
+static void InsertNode(CPpmd7 *p, void *node, unsigned indx)
+{
+ *((CPpmd_Void_Ref *)node) = p->FreeList[indx];
+ p->FreeList[indx] = REF(node);
+}
+
+static void *RemoveNode(CPpmd7 *p, unsigned indx)
+{
+ CPpmd_Void_Ref *node = (CPpmd_Void_Ref *)Ppmd7_GetPtr(p, p->FreeList[indx]);
+ p->FreeList[indx] = *node;
+ return node;
+}
+
+static void SplitBlock(CPpmd7 *p, void *ptr, unsigned oldIndx, unsigned newIndx)
+{
+ unsigned i, nu = I2U(oldIndx) - I2U(newIndx);
+ ptr = (Byte *)ptr + U2B(I2U(newIndx));
+ if (I2U(i = U2I(nu)) != nu)
+ {
+ unsigned k = I2U(--i);
+ InsertNode(p, ((Byte *)ptr) + U2B(k), nu - k - 1);
+ }
+ InsertNode(p, ptr, i);
+}
+
+static void GlueFreeBlocks(CPpmd7 *p)
+{
+ #ifdef PPMD_32BIT
+ CPpmd7_Node headItem;
+ CPpmd7_Node_Ref head = &headItem;
+ #else
+ CPpmd7_Node_Ref head = p->AlignOffset + p->Size;
+ #endif
+
+ CPpmd7_Node_Ref n = head;
+ unsigned i;
+
+ p->GlueCount = 255;
+
+ /* create doubly-linked list of free blocks */
+ for (i = 0; i < PPMD_NUM_INDEXES; i++)
+ {
+ UInt16 nu = I2U(i);
+ CPpmd7_Node_Ref next = (CPpmd7_Node_Ref)p->FreeList[i];
+ p->FreeList[i] = 0;
+ while (next != 0)
+ {
+ CPpmd7_Node *node = NODE(next);
+ node->Next = n;
+ n = NODE(n)->Prev = next;
+ next = *(const CPpmd7_Node_Ref *)node;
+ node->Stamp = 0;
+ node->NU = (UInt16)nu;
+ }
+ }
+ NODE(head)->Stamp = 1;
+ NODE(head)->Next = n;
+ NODE(n)->Prev = head;
+ if (p->LoUnit != p->HiUnit)
+ ((CPpmd7_Node *)p->LoUnit)->Stamp = 1;
+
+ /* Glue free blocks */
+ while (n != head)
+ {
+ CPpmd7_Node *node = NODE(n);
+ UInt32 nu = (UInt32)node->NU;
+ for (;;)
+ {
+ CPpmd7_Node *node2 = NODE(n) + nu;
+ nu += node2->NU;
+ if (node2->Stamp != 0 || nu >= 0x10000)
+ break;
+ NODE(node2->Prev)->Next = node2->Next;
+ NODE(node2->Next)->Prev = node2->Prev;
+ node->NU = (UInt16)nu;
+ }
+ n = node->Next;
+ }
+
+ /* Fill lists of free blocks */
+ for (n = NODE(head)->Next; n != head;)
+ {
+ CPpmd7_Node *node = NODE(n);
+ unsigned nu;
+ CPpmd7_Node_Ref next = node->Next;
+ for (nu = node->NU; nu > 128; nu -= 128, node += 128)
+ InsertNode(p, node, PPMD_NUM_INDEXES - 1);
+ if (I2U(i = U2I(nu)) != nu)
+ {
+ unsigned k = I2U(--i);
+ InsertNode(p, node + k, nu - k - 1);
+ }
+ InsertNode(p, node, i);
+ n = next;
+ }
+}
+
+static void *AllocUnitsRare(CPpmd7 *p, unsigned indx)
+{
+ unsigned i;
+ void *retVal;
+ if (p->GlueCount == 0)
+ {
+ GlueFreeBlocks(p);
+ if (p->FreeList[indx] != 0)
+ return RemoveNode(p, indx);
+ }
+ i = indx;
+ do
+ {
+ if (++i == PPMD_NUM_INDEXES)
+ {
+ UInt32 numBytes = U2B(I2U(indx));
+ p->GlueCount--;
+ return ((UInt32)(p->UnitsStart - p->Text) > numBytes) ? (p->UnitsStart -= numBytes) : (NULL);
+ }
+ }
+ while (p->FreeList[i] == 0);
+ retVal = RemoveNode(p, i);
+ SplitBlock(p, retVal, i, indx);
+ return retVal;
+}
+
+static void *AllocUnits(CPpmd7 *p, unsigned indx)
+{
+ UInt32 numBytes;
+ if (p->FreeList[indx] != 0)
+ return RemoveNode(p, indx);
+ numBytes = U2B(I2U(indx));
+ if (numBytes <= (UInt32)(p->HiUnit - p->LoUnit))
+ {
+ void *retVal = p->LoUnit;
+ p->LoUnit += numBytes;
+ return retVal;
+ }
+ return AllocUnitsRare(p, indx);
+}
+
+#define MyMem12Cpy(dest, src, num) \
+ { UInt32 *d = (UInt32 *)dest; const UInt32 *s = (const UInt32 *)src; UInt32 n = num; \
+ do { d[0] = s[0]; d[1] = s[1]; d[2] = s[2]; s += 3; d += 3; } while(--n); }
+
+static void *ShrinkUnits(CPpmd7 *p, void *oldPtr, unsigned oldNU, unsigned newNU)
+{
+ unsigned i0 = U2I(oldNU);
+ unsigned i1 = U2I(newNU);
+ if (i0 == i1)
+ return oldPtr;
+ if (p->FreeList[i1] != 0)
+ {
+ void *ptr = RemoveNode(p, i1);
+ MyMem12Cpy(ptr, oldPtr, newNU);
+ InsertNode(p, oldPtr, i0);
+ return ptr;
+ }
+ SplitBlock(p, oldPtr, i0, i1);
+ return oldPtr;
+}
+
+#define SUCCESSOR(p) ((CPpmd_Void_Ref)((p)->SuccessorLow | ((UInt32)(p)->SuccessorHigh << 16)))
+
+static void SetSuccessor(CPpmd_State *p, CPpmd_Void_Ref v)
+{
+ (p)->SuccessorLow = (UInt16)((UInt32)(v) & 0xFFFF);
+ (p)->SuccessorHigh = (UInt16)(((UInt32)(v) >> 16) & 0xFFFF);
+}
+
+static void RestartModel(CPpmd7 *p)
+{
+ unsigned i, k, m;
+
+ memset(p->FreeList, 0, sizeof(p->FreeList));
+ p->Text = p->Base + p->AlignOffset;
+ p->HiUnit = p->Text + p->Size;
+ p->LoUnit = p->UnitsStart = p->HiUnit - p->Size / 8 / UNIT_SIZE * 7 * UNIT_SIZE;
+ p->GlueCount = 0;
+
+ p->OrderFall = p->MaxOrder;
+ p->RunLength = p->InitRL = -(Int32)((p->MaxOrder < 12) ? p->MaxOrder : 12) - 1;
+ p->PrevSuccess = 0;
+
+ p->MinContext = p->MaxContext = (CTX_PTR)(p->HiUnit -= UNIT_SIZE); /* AllocContext(p); */
+ p->MinContext->Suffix = 0;
+ p->MinContext->NumStats = 256;
+ p->MinContext->SummFreq = 256 + 1;
+ p->FoundState = (CPpmd_State *)p->LoUnit; /* AllocUnits(p, PPMD_NUM_INDEXES - 1); */
+ p->LoUnit += U2B(256 / 2);
+ p->MinContext->Stats = REF(p->FoundState);
+ for (i = 0; i < 256; i++)
+ {
+ CPpmd_State *s = &p->FoundState[i];
+ s->Symbol = (Byte)i;
+ s->Freq = 1;
+ SetSuccessor(s, 0);
+ }
+
+ for (i = 0; i < 128; i++)
+ for (k = 0; k < 8; k++)
+ {
+ UInt16 *dest = p->BinSumm[i] + k;
+ UInt16 val = (UInt16)(PPMD_BIN_SCALE - kInitBinEsc[k] / (i + 2));
+ for (m = 0; m < 64; m += 8)
+ dest[m] = val;
+ }
+
+ for (i = 0; i < 25; i++)
+ for (k = 0; k < 16; k++)
+ {
+ CPpmd_See *s = &p->See[i][k];
+ s->Summ = (UInt16)((5 * i + 10) << (s->Shift = PPMD_PERIOD_BITS - 4));
+ s->Count = 4;
+ }
+}
+
+void Ppmd7_Init(CPpmd7 *p, unsigned maxOrder)
+{
+ p->MaxOrder = maxOrder;
+ RestartModel(p);
+ p->DummySee.Shift = PPMD_PERIOD_BITS;
+ p->DummySee.Summ = 0; /* unused */
+ p->DummySee.Count = 64; /* unused */
+}
+
+static CTX_PTR CreateSuccessors(CPpmd7 *p, Bool skip)
+{
+ CPpmd_State upState;
+ CTX_PTR c = p->MinContext;
+ CPpmd_Byte_Ref upBranch = (CPpmd_Byte_Ref)SUCCESSOR(p->FoundState);
+ CPpmd_State *ps[PPMD7_MAX_ORDER];
+ unsigned numPs = 0;
+
+ if (!skip)
+ ps[numPs++] = p->FoundState;
+
+ while (c->Suffix)
+ {
+ CPpmd_Void_Ref successor;
+ CPpmd_State *s;
+ c = SUFFIX(c);
+ if (c->NumStats != 1)
+ {
+ for (s = STATS(c); s->Symbol != p->FoundState->Symbol; s++);
+ }
+ else
+ s = ONE_STATE(c);
+ successor = SUCCESSOR(s);
+ if (successor != upBranch)
+ {
+ c = CTX(successor);
+ if (numPs == 0)
+ return c;
+ break;
+ }
+ ps[numPs++] = s;
+ }
+
+ upState.Symbol = *(const Byte *)Ppmd7_GetPtr(p, upBranch);
+ SetSuccessor(&upState, upBranch + 1);
+
+ if (c->NumStats == 1)
+ upState.Freq = ONE_STATE(c)->Freq;
+ else
+ {
+ UInt32 cf, s0;
+ CPpmd_State *s;
+ for (s = STATS(c); s->Symbol != upState.Symbol; s++);
+ cf = s->Freq - 1;
+ s0 = c->SummFreq - c->NumStats - cf;
+ upState.Freq = (Byte)(1 + ((2 * cf <= s0) ? (5 * cf > s0) : ((2 * cf + 3 * s0 - 1) / (2 * s0))));
+ }
+
+ do
+ {
+ /* Create Child */
+ CTX_PTR c1; /* = AllocContext(p); */
+ if (p->HiUnit != p->LoUnit)
+ c1 = (CTX_PTR)(p->HiUnit -= UNIT_SIZE);
+ else if (p->FreeList[0] != 0)
+ c1 = (CTX_PTR)RemoveNode(p, 0);
+ else
+ {
+ c1 = (CTX_PTR)AllocUnitsRare(p, 0);
+ if (!c1)
+ return NULL;
+ }
+ c1->NumStats = 1;
+ *ONE_STATE(c1) = upState;
+ c1->Suffix = REF(c);
+ SetSuccessor(ps[--numPs], REF(c1));
+ c = c1;
+ }
+ while (numPs != 0);
+
+ return c;
+}
+
+static void SwapStates(CPpmd_State *t1, CPpmd_State *t2)
+{
+ CPpmd_State tmp = *t1;
+ *t1 = *t2;
+ *t2 = tmp;
+}
+
+static void UpdateModel(CPpmd7 *p)
+{
+ CPpmd_Void_Ref successor, fSuccessor = SUCCESSOR(p->FoundState);
+ CTX_PTR c;
+ unsigned s0, ns;
+
+ if (p->FoundState->Freq < MAX_FREQ / 4 && p->MinContext->Suffix != 0)
+ {
+ c = SUFFIX(p->MinContext);
+
+ if (c->NumStats == 1)
+ {
+ CPpmd_State *s = ONE_STATE(c);
+ if (s->Freq < 32)
+ s->Freq++;
+ }
+ else
+ {
+ CPpmd_State *s = STATS(c);
+ if (s->Symbol != p->FoundState->Symbol)
+ {
+ do { s++; } while (s->Symbol != p->FoundState->Symbol);
+ if (s[0].Freq >= s[-1].Freq)
+ {
+ SwapStates(&s[0], &s[-1]);
+ s--;
+ }
+ }
+ if (s->Freq < MAX_FREQ - 9)
+ {
+ s->Freq += 2;
+ c->SummFreq += 2;
+ }
+ }
+ }
+
+ if (p->OrderFall == 0)
+ {
+ p->MinContext = p->MaxContext = CreateSuccessors(p, True);
+ if (p->MinContext == 0)
+ {
+ RestartModel(p);
+ return;
+ }
+ SetSuccessor(p->FoundState, REF(p->MinContext));
+ return;
+ }
+
+ *p->Text++ = p->FoundState->Symbol;
+ successor = REF(p->Text);
+ if (p->Text >= p->UnitsStart)
+ {
+ RestartModel(p);
+ return;
+ }
+
+ if (fSuccessor)
+ {
+ if (fSuccessor <= successor)
+ {
+ CTX_PTR cs = CreateSuccessors(p, False);
+ if (cs == NULL)
+ {
+ RestartModel(p);
+ return;
+ }
+ fSuccessor = REF(cs);
+ }
+ if (--p->OrderFall == 0)
+ {
+ successor = fSuccessor;
+ p->Text -= (p->MaxContext != p->MinContext);
+ }
+ }
+ else
+ {
+ SetSuccessor(p->FoundState, successor);
+ fSuccessor = REF(p->MinContext);
+ }
+
+ s0 = p->MinContext->SummFreq - (ns = p->MinContext->NumStats) - (p->FoundState->Freq - 1);
+
+ for (c = p->MaxContext; c != p->MinContext; c = SUFFIX(c))
+ {
+ unsigned ns1;
+ UInt32 cf, sf;
+ if ((ns1 = c->NumStats) != 1)
+ {
+ if ((ns1 & 1) == 0)
+ {
+ /* Expand for one UNIT */
+ unsigned oldNU = ns1 >> 1;
+ unsigned i = U2I(oldNU);
+ if (i != U2I(oldNU + 1))
+ {
+ void *ptr = AllocUnits(p, i + 1);
+ void *oldPtr;
+ if (!ptr)
+ {
+ RestartModel(p);
+ return;
+ }
+ oldPtr = STATS(c);
+ MyMem12Cpy(ptr, oldPtr, oldNU);
+ InsertNode(p, oldPtr, i);
+ c->Stats = STATS_REF(ptr);
+ }
+ }
+ c->SummFreq = (UInt16)(c->SummFreq + (2 * ns1 < ns) + 2 * ((4 * ns1 <= ns) & (c->SummFreq <= 8 * ns1)));
+ }
+ else
+ {
+ CPpmd_State *s = (CPpmd_State*)AllocUnits(p, 0);
+ if (!s)
+ {
+ RestartModel(p);
+ return;
+ }
+ *s = *ONE_STATE(c);
+ c->Stats = REF(s);
+ if (s->Freq < MAX_FREQ / 4 - 1)
+ s->Freq <<= 1;
+ else
+ s->Freq = MAX_FREQ - 4;
+ c->SummFreq = (UInt16)(s->Freq + p->InitEsc + (ns > 3));
+ }
+ cf = 2 * (UInt32)p->FoundState->Freq * (c->SummFreq + 6);
+ sf = (UInt32)s0 + c->SummFreq;
+ if (cf < 6 * sf)
+ {
+ cf = 1 + (cf > sf) + (cf >= 4 * sf);
+ c->SummFreq += 3;
+ }
+ else
+ {
+ cf = 4 + (cf >= 9 * sf) + (cf >= 12 * sf) + (cf >= 15 * sf);
+ c->SummFreq = (UInt16)(c->SummFreq + cf);
+ }
+ {
+ CPpmd_State *s = STATS(c) + ns1;
+ SetSuccessor(s, successor);
+ s->Symbol = p->FoundState->Symbol;
+ s->Freq = (Byte)cf;
+ c->NumStats = (UInt16)(ns1 + 1);
+ }
+ }
+ p->MaxContext = p->MinContext = CTX(fSuccessor);
+}
+
+static void Rescale(CPpmd7 *p)
+{
+ unsigned i, adder, sumFreq, escFreq;
+ CPpmd_State *stats = STATS(p->MinContext);
+ CPpmd_State *s = p->FoundState;
+ {
+ CPpmd_State tmp = *s;
+ for (; s != stats; s--)
+ s[0] = s[-1];
+ *s = tmp;
+ }
+ escFreq = p->MinContext->SummFreq - s->Freq;
+ s->Freq += 4;
+ adder = (p->OrderFall != 0);
+ s->Freq = (Byte)((s->Freq + adder) >> 1);
+ sumFreq = s->Freq;
+
+ i = p->MinContext->NumStats - 1;
+ do
+ {
+ escFreq -= (++s)->Freq;
+ s->Freq = (Byte)((s->Freq + adder) >> 1);
+ sumFreq += s->Freq;
+ if (s[0].Freq > s[-1].Freq)
+ {
+ CPpmd_State *s1 = s;
+ CPpmd_State tmp = *s1;
+ do
+ s1[0] = s1[-1];
+ while (--s1 != stats && tmp.Freq > s1[-1].Freq);
+ *s1 = tmp;
+ }
+ }
+ while (--i);
+
+ if (s->Freq == 0)
+ {
+ unsigned numStats = p->MinContext->NumStats;
+ unsigned n0, n1;
+ do { i++; } while ((--s)->Freq == 0);
+ escFreq += i;
+ p->MinContext->NumStats = (UInt16)(p->MinContext->NumStats - i);
+ if (p->MinContext->NumStats == 1)
+ {
+ CPpmd_State tmp = *stats;
+ do
+ {
+ tmp.Freq = (Byte)(tmp.Freq - (tmp.Freq >> 1));
+ escFreq >>= 1;
+ }
+ while (escFreq > 1);
+ InsertNode(p, stats, U2I(((numStats + 1) >> 1)));
+ *(p->FoundState = ONE_STATE(p->MinContext)) = tmp;
+ return;
+ }
+ n0 = (numStats + 1) >> 1;
+ n1 = (p->MinContext->NumStats + 1) >> 1;
+ if (n0 != n1)
+ p->MinContext->Stats = STATS_REF(ShrinkUnits(p, stats, n0, n1));
+ }
+ p->MinContext->SummFreq = (UInt16)(sumFreq + escFreq - (escFreq >> 1));
+ p->FoundState = STATS(p->MinContext);
+}
+
+CPpmd_See *Ppmd7_MakeEscFreq(CPpmd7 *p, unsigned numMasked, UInt32 *escFreq)
+{
+ CPpmd_See *see;
+ unsigned nonMasked = p->MinContext->NumStats - numMasked;
+ if (p->MinContext->NumStats != 256)
+ {
+ see = p->See[p->NS2Indx[nonMasked - 1]] +
+ (nonMasked < (unsigned)SUFFIX(p->MinContext)->NumStats - p->MinContext->NumStats) +
+ 2 * (p->MinContext->SummFreq < 11 * p->MinContext->NumStats) +
+ 4 * (numMasked > nonMasked) +
+ p->HiBitsFlag;
+ {
+ unsigned r = (see->Summ >> see->Shift);
+ see->Summ = (UInt16)(see->Summ - r);
+ *escFreq = r + (r == 0);
+ }
+ }
+ else
+ {
+ see = &p->DummySee;
+ *escFreq = 1;
+ }
+ return see;
+}
+
+static void NextContext(CPpmd7 *p)
+{
+ CTX_PTR c = CTX(SUCCESSOR(p->FoundState));
+ if (p->OrderFall == 0 && (Byte *)c > p->Text)
+ p->MinContext = p->MaxContext = c;
+ else
+ UpdateModel(p);
+}
+
+void Ppmd7_Update1(CPpmd7 *p)
+{
+ CPpmd_State *s = p->FoundState;
+ s->Freq += 4;
+ p->MinContext->SummFreq += 4;
+ if (s[0].Freq > s[-1].Freq)
+ {
+ SwapStates(&s[0], &s[-1]);
+ p->FoundState = --s;
+ if (s->Freq > MAX_FREQ)
+ Rescale(p);
+ }
+ NextContext(p);
+}
+
+void Ppmd7_Update1_0(CPpmd7 *p)
+{
+ p->PrevSuccess = (2 * p->FoundState->Freq > p->MinContext->SummFreq);
+ p->RunLength += p->PrevSuccess;
+ p->MinContext->SummFreq += 4;
+ if ((p->FoundState->Freq += 4) > MAX_FREQ)
+ Rescale(p);
+ NextContext(p);
+}
+
+void Ppmd7_UpdateBin(CPpmd7 *p)
+{
+ p->FoundState->Freq = (Byte)(p->FoundState->Freq + (p->FoundState->Freq < 128 ? 1: 0));
+ p->PrevSuccess = 1;
+ p->RunLength++;
+ NextContext(p);
+}
+
+void Ppmd7_Update2(CPpmd7 *p)
+{
+ p->MinContext->SummFreq += 4;
+ if ((p->FoundState->Freq += 4) > MAX_FREQ)
+ Rescale(p);
+ p->RunLength = p->InitRL;
+ UpdateModel(p);
+}
diff --git a/src/libs/7zip/unix/C/Ppmd7.h b/src/libs/7zip/unix/C/Ppmd7.h
new file mode 100644
index 000000000..96521c31f
--- /dev/null
+++ b/src/libs/7zip/unix/C/Ppmd7.h
@@ -0,0 +1,140 @@
+/* Ppmd7.h -- PPMdH compression codec
+2010-03-12 : Igor Pavlov : Public domain
+This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
+
+/* This code supports virtual RangeDecoder and includes the implementation
+of RangeCoder from 7z, instead of RangeCoder from original PPMd var.H.
+If you need the compatibility with original PPMd var.H, you can use external RangeDecoder */
+
+#ifndef __PPMD7_H
+#define __PPMD7_H
+
+#include "Ppmd.h"
+
+EXTERN_C_BEGIN
+
+#define PPMD7_MIN_ORDER 2
+#define PPMD7_MAX_ORDER 64
+
+#define PPMD7_MIN_MEM_SIZE (1 << 11)
+#define PPMD7_MAX_MEM_SIZE (0xFFFFFFFF - 12 * 3)
+
+struct CPpmd7_Context_;
+
+typedef
+ #ifdef PPMD_32BIT
+ struct CPpmd7_Context_ *
+ #else
+ UInt32
+ #endif
+ CPpmd7_Context_Ref;
+
+typedef struct CPpmd7_Context_
+{
+ UInt16 NumStats;
+ UInt16 SummFreq;
+ CPpmd_State_Ref Stats;
+ CPpmd7_Context_Ref Suffix;
+} CPpmd7_Context;
+
+#define Ppmd7Context_OneState(p) ((CPpmd_State *)&(p)->SummFreq)
+
+typedef struct
+{
+ CPpmd7_Context *MinContext, *MaxContext;
+ CPpmd_State *FoundState;
+ unsigned OrderFall, InitEsc, PrevSuccess, MaxOrder, HiBitsFlag;
+ Int32 RunLength, InitRL; /* must be 32-bit at least */
+
+ UInt32 Size;
+ UInt32 GlueCount;
+ Byte *Base, *LoUnit, *HiUnit, *Text, *UnitsStart;
+ UInt32 AlignOffset;
+
+ Byte Indx2Units[PPMD_NUM_INDEXES];
+ Byte Units2Indx[128];
+ CPpmd_Void_Ref FreeList[PPMD_NUM_INDEXES];
+ Byte NS2Indx[256], NS2BSIndx[256], HB2Flag[256];
+ CPpmd_See DummySee, See[25][16];
+ UInt16 BinSumm[128][64];
+} CPpmd7;
+
+void Ppmd7_Construct(CPpmd7 *p);
+Bool Ppmd7_Alloc(CPpmd7 *p, UInt32 size, ISzAlloc *alloc);
+void Ppmd7_Free(CPpmd7 *p, ISzAlloc *alloc);
+void Ppmd7_Init(CPpmd7 *p, unsigned maxOrder);
+#define Ppmd7_WasAllocated(p) ((p)->Base != NULL)
+
+
+/* ---------- Internal Functions ---------- */
+
+extern const Byte PPMD7_kExpEscape[16];
+
+#ifdef PPMD_32BIT
+ #define Ppmd7_GetPtr(p, ptr) (ptr)
+ #define Ppmd7_GetContext(p, ptr) (ptr)
+ #define Ppmd7_GetStats(p, ctx) ((ctx)->Stats)
+#else
+ #define Ppmd7_GetPtr(p, offs) ((void *)((p)->Base + (offs)))
+ #define Ppmd7_GetContext(p, offs) ((CPpmd7_Context *)Ppmd7_GetPtr((p), (offs)))
+ #define Ppmd7_GetStats(p, ctx) ((CPpmd_State *)Ppmd7_GetPtr((p), ((ctx)->Stats)))
+#endif
+
+void Ppmd7_Update1(CPpmd7 *p);
+void Ppmd7_Update1_0(CPpmd7 *p);
+void Ppmd7_Update2(CPpmd7 *p);
+void Ppmd7_UpdateBin(CPpmd7 *p);
+
+#define Ppmd7_GetBinSumm(p) \
+ &p->BinSumm[Ppmd7Context_OneState(p->MinContext)->Freq - 1][p->PrevSuccess + \
+ p->NS2BSIndx[Ppmd7_GetContext(p, p->MinContext->Suffix)->NumStats - 1] + \
+ (p->HiBitsFlag = p->HB2Flag[p->FoundState->Symbol]) + \
+ 2 * p->HB2Flag[Ppmd7Context_OneState(p->MinContext)->Symbol] + \
+ ((p->RunLength >> 26) & 0x20)]
+
+CPpmd_See *Ppmd7_MakeEscFreq(CPpmd7 *p, unsigned numMasked, UInt32 *scale);
+
+
+/* ---------- Decode ---------- */
+
+typedef struct
+{
+ UInt32 (*GetThreshold)(void *p, UInt32 total);
+ void (*Decode)(void *p, UInt32 start, UInt32 size);
+ UInt32 (*DecodeBit)(void *p, UInt32 size0);
+} IPpmd7_RangeDec;
+
+typedef struct
+{
+ IPpmd7_RangeDec p;
+ UInt32 Range;
+ UInt32 Code;
+ IByteIn *Stream;
+} CPpmd7z_RangeDec;
+
+void Ppmd7z_RangeDec_CreateVTable(CPpmd7z_RangeDec *p);
+Bool Ppmd7z_RangeDec_Init(CPpmd7z_RangeDec *p);
+#define Ppmd7z_RangeDec_IsFinishedOK(p) ((p)->Code == 0)
+
+int Ppmd7_DecodeSymbol(CPpmd7 *p, IPpmd7_RangeDec *rc);
+
+
+/* ---------- Encode ---------- */
+
+typedef struct
+{
+ UInt64 Low;
+ UInt32 Range;
+ Byte Cache;
+ UInt64 CacheSize;
+ IByteOut *Stream;
+} CPpmd7z_RangeEnc;
+
+void Ppmd7z_RangeEnc_Init(CPpmd7z_RangeEnc *p);
+void Ppmd7z_RangeEnc_FlushData(CPpmd7z_RangeEnc *p);
+
+void Ppmd7_EncodeSymbol(CPpmd7 *p, CPpmd7z_RangeEnc *rc, int symbol);
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/Ppmd7Dec.c b/src/libs/7zip/unix/C/Ppmd7Dec.c
new file mode 100644
index 000000000..68438d5ce
--- /dev/null
+++ b/src/libs/7zip/unix/C/Ppmd7Dec.c
@@ -0,0 +1,187 @@
+/* Ppmd7Dec.c -- PPMdH Decoder
+2010-03-12 : Igor Pavlov : Public domain
+This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
+
+#include "Ppmd7.h"
+
+#define kTopValue (1 << 24)
+
+Bool Ppmd7z_RangeDec_Init(CPpmd7z_RangeDec *p)
+{
+ unsigned i;
+ p->Code = 0;
+ p->Range = 0xFFFFFFFF;
+ if (p->Stream->Read((void *)p->Stream) != 0)
+ return False;
+ for (i = 0; i < 4; i++)
+ p->Code = (p->Code << 8) | p->Stream->Read((void *)p->Stream);
+ return (p->Code < 0xFFFFFFFF);
+}
+
+static UInt32 Range_GetThreshold(void *pp, UInt32 total)
+{
+ CPpmd7z_RangeDec *p = (CPpmd7z_RangeDec *)pp;
+ return (p->Code) / (p->Range /= total);
+}
+
+static void Range_Normalize(CPpmd7z_RangeDec *p)
+{
+ if (p->Range < kTopValue)
+ {
+ p->Code = (p->Code << 8) | p->Stream->Read((void *)p->Stream);
+ p->Range <<= 8;
+ if (p->Range < kTopValue)
+ {
+ p->Code = (p->Code << 8) | p->Stream->Read((void *)p->Stream);
+ p->Range <<= 8;
+ }
+ }
+}
+
+static void Range_Decode(void *pp, UInt32 start, UInt32 size)
+{
+ CPpmd7z_RangeDec *p = (CPpmd7z_RangeDec *)pp;
+ p->Code -= start * p->Range;
+ p->Range *= size;
+ Range_Normalize(p);
+}
+
+static UInt32 Range_DecodeBit(void *pp, UInt32 size0)
+{
+ CPpmd7z_RangeDec *p = (CPpmd7z_RangeDec *)pp;
+ UInt32 newBound = (p->Range >> 14) * size0;
+ UInt32 symbol;
+ if (p->Code < newBound)
+ {
+ symbol = 0;
+ p->Range = newBound;
+ }
+ else
+ {
+ symbol = 1;
+ p->Code -= newBound;
+ p->Range -= newBound;
+ }
+ Range_Normalize(p);
+ return symbol;
+}
+
+void Ppmd7z_RangeDec_CreateVTable(CPpmd7z_RangeDec *p)
+{
+ p->p.GetThreshold = Range_GetThreshold;
+ p->p.Decode = Range_Decode;
+ p->p.DecodeBit = Range_DecodeBit;
+}
+
+
+#define MASK(sym) ((signed char *)charMask)[sym]
+
+int Ppmd7_DecodeSymbol(CPpmd7 *p, IPpmd7_RangeDec *rc)
+{
+ size_t charMask[256 / sizeof(size_t)];
+ if (p->MinContext->NumStats != 1)
+ {
+ CPpmd_State *s = Ppmd7_GetStats(p, p->MinContext);
+ unsigned i;
+ UInt32 count, hiCnt;
+ if ((count = rc->GetThreshold(rc, p->MinContext->SummFreq)) < (hiCnt = s->Freq))
+ {
+ Byte symbol;
+ rc->Decode(rc, 0, s->Freq);
+ p->FoundState = s;
+ symbol = s->Symbol;
+ Ppmd7_Update1_0(p);
+ return symbol;
+ }
+ p->PrevSuccess = 0;
+ i = p->MinContext->NumStats - 1;
+ do
+ {
+ if ((hiCnt += (++s)->Freq) > count)
+ {
+ Byte symbol;
+ rc->Decode(rc, hiCnt - s->Freq, s->Freq);
+ p->FoundState = s;
+ symbol = s->Symbol;
+ Ppmd7_Update1(p);
+ return symbol;
+ }
+ }
+ while (--i);
+ if (count >= p->MinContext->SummFreq)
+ return -2;
+ p->HiBitsFlag = p->HB2Flag[p->FoundState->Symbol];
+ rc->Decode(rc, hiCnt, p->MinContext->SummFreq - hiCnt);
+ PPMD_SetAllBitsIn256Bytes(charMask);
+ MASK(s->Symbol) = 0;
+ i = p->MinContext->NumStats - 1;
+ do { MASK((--s)->Symbol) = 0; } while (--i);
+ }
+ else
+ {
+ UInt16 *prob = Ppmd7_GetBinSumm(p);
+ if (rc->DecodeBit(rc, *prob) == 0)
+ {
+ Byte symbol;
+ *prob = (UInt16)PPMD_UPDATE_PROB_0(*prob);
+ symbol = (p->FoundState = Ppmd7Context_OneState(p->MinContext))->Symbol;
+ Ppmd7_UpdateBin(p);
+ return symbol;
+ }
+ *prob = (UInt16)PPMD_UPDATE_PROB_1(*prob);
+ p->InitEsc = PPMD7_kExpEscape[*prob >> 10];
+ PPMD_SetAllBitsIn256Bytes(charMask);
+ MASK(Ppmd7Context_OneState(p->MinContext)->Symbol) = 0;
+ p->PrevSuccess = 0;
+ }
+ for (;;)
+ {
+ CPpmd_State *ps[256], *s;
+ UInt32 freqSum, count, hiCnt;
+ CPpmd_See *see;
+ unsigned i, num, numMasked = p->MinContext->NumStats;
+ do
+ {
+ p->OrderFall++;
+ if (!p->MinContext->Suffix)
+ return -1;
+ p->MinContext = Ppmd7_GetContext(p, p->MinContext->Suffix);
+ }
+ while (p->MinContext->NumStats == numMasked);
+ hiCnt = 0;
+ s = Ppmd7_GetStats(p, p->MinContext);
+ i = 0;
+ num = p->MinContext->NumStats - numMasked;
+ do
+ {
+ int k = (int)(MASK(s->Symbol));
+ hiCnt += (s->Freq & k);
+ ps[i] = s++;
+ i -= k;
+ }
+ while (i != num);
+
+ see = Ppmd7_MakeEscFreq(p, numMasked, &freqSum);
+ freqSum += hiCnt;
+ count = rc->GetThreshold(rc, freqSum);
+
+ if (count < hiCnt)
+ {
+ Byte symbol;
+ CPpmd_State **pps = ps;
+ for (hiCnt = 0; (hiCnt += (*pps)->Freq) <= count; pps++);
+ s = *pps;
+ rc->Decode(rc, hiCnt - s->Freq, s->Freq);
+ Ppmd_See_Update(see);
+ p->FoundState = s;
+ symbol = s->Symbol;
+ Ppmd7_Update2(p);
+ return symbol;
+ }
+ if (count >= freqSum)
+ return -2;
+ rc->Decode(rc, hiCnt, freqSum - hiCnt);
+ see->Summ = (UInt16)(see->Summ + freqSum);
+ do { MASK(ps[--i]->Symbol) = 0; } while (i != 0);
+ }
+}
diff --git a/src/libs/7zip/unix/C/Ppmd7Enc.c b/src/libs/7zip/unix/C/Ppmd7Enc.c
new file mode 100644
index 000000000..8247757d0
--- /dev/null
+++ b/src/libs/7zip/unix/C/Ppmd7Enc.c
@@ -0,0 +1,185 @@
+/* Ppmd7Enc.c -- PPMdH Encoder
+2010-03-12 : Igor Pavlov : Public domain
+This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */
+
+#include "Ppmd7.h"
+
+#define kTopValue (1 << 24)
+
+void Ppmd7z_RangeEnc_Init(CPpmd7z_RangeEnc *p)
+{
+ p->Low = 0;
+ p->Range = 0xFFFFFFFF;
+ p->Cache = 0;
+ p->CacheSize = 1;
+}
+
+static void RangeEnc_ShiftLow(CPpmd7z_RangeEnc *p)
+{
+ if ((UInt32)p->Low < (UInt32)0xFF000000 || (unsigned)(p->Low >> 32) != 0)
+ {
+ Byte temp = p->Cache;
+ do
+ {
+ p->Stream->Write(p->Stream, (Byte)(temp + (Byte)(p->Low >> 32)));
+ temp = 0xFF;
+ }
+ while(--p->CacheSize != 0);
+ p->Cache = (Byte)((UInt32)p->Low >> 24);
+ }
+ p->CacheSize++;
+ p->Low = (UInt32)p->Low << 8;
+}
+
+static void RangeEnc_Encode(CPpmd7z_RangeEnc *p, UInt32 start, UInt32 size, UInt32 total)
+{
+ p->Low += start * (p->Range /= total);
+ p->Range *= size;
+ while (p->Range < kTopValue)
+ {
+ p->Range <<= 8;
+ RangeEnc_ShiftLow(p);
+ }
+}
+
+static void RangeEnc_EncodeBit_0(CPpmd7z_RangeEnc *p, UInt32 size0)
+{
+ p->Range = (p->Range >> 14) * size0;
+ while (p->Range < kTopValue)
+ {
+ p->Range <<= 8;
+ RangeEnc_ShiftLow(p);
+ }
+}
+
+static void RangeEnc_EncodeBit_1(CPpmd7z_RangeEnc *p, UInt32 size0)
+{
+ UInt32 newBound = (p->Range >> 14) * size0;
+ p->Low += newBound;
+ p->Range -= newBound;
+ while (p->Range < kTopValue)
+ {
+ p->Range <<= 8;
+ RangeEnc_ShiftLow(p);
+ }
+}
+
+void Ppmd7z_RangeEnc_FlushData(CPpmd7z_RangeEnc *p)
+{
+ unsigned i;
+ for (i = 0; i < 5; i++)
+ RangeEnc_ShiftLow(p);
+}
+
+
+#define MASK(sym) ((signed char *)charMask)[sym]
+
+void Ppmd7_EncodeSymbol(CPpmd7 *p, CPpmd7z_RangeEnc *rc, int symbol)
+{
+ size_t charMask[256 / sizeof(size_t)];
+ if (p->MinContext->NumStats != 1)
+ {
+ CPpmd_State *s = Ppmd7_GetStats(p, p->MinContext);
+ UInt32 sum;
+ unsigned i;
+ if (s->Symbol == symbol)
+ {
+ RangeEnc_Encode(rc, 0, s->Freq, p->MinContext->SummFreq);
+ p->FoundState = s;
+ Ppmd7_Update1_0(p);
+ return;
+ }
+ p->PrevSuccess = 0;
+ sum = s->Freq;
+ i = p->MinContext->NumStats - 1;
+ do
+ {
+ if ((++s)->Symbol == symbol)
+ {
+ RangeEnc_Encode(rc, sum, s->Freq, p->MinContext->SummFreq);
+ p->FoundState = s;
+ Ppmd7_Update1(p);
+ return;
+ }
+ sum += s->Freq;
+ }
+ while (--i);
+
+ p->HiBitsFlag = p->HB2Flag[p->FoundState->Symbol];
+ PPMD_SetAllBitsIn256Bytes(charMask);
+ MASK(s->Symbol) = 0;
+ i = p->MinContext->NumStats - 1;
+ do { MASK((--s)->Symbol) = 0; } while (--i);
+ RangeEnc_Encode(rc, sum, p->MinContext->SummFreq - sum, p->MinContext->SummFreq);
+ }
+ else
+ {
+ UInt16 *prob = Ppmd7_GetBinSumm(p);
+ CPpmd_State *s = Ppmd7Context_OneState(p->MinContext);
+ if (s->Symbol == symbol)
+ {
+ RangeEnc_EncodeBit_0(rc, *prob);
+ *prob = (UInt16)PPMD_UPDATE_PROB_0(*prob);
+ p->FoundState = s;
+ Ppmd7_UpdateBin(p);
+ return;
+ }
+ else
+ {
+ RangeEnc_EncodeBit_1(rc, *prob);
+ *prob = (UInt16)PPMD_UPDATE_PROB_1(*prob);
+ p->InitEsc = PPMD7_kExpEscape[*prob >> 10];
+ PPMD_SetAllBitsIn256Bytes(charMask);
+ MASK(s->Symbol) = 0;
+ p->PrevSuccess = 0;
+ }
+ }
+ for (;;)
+ {
+ UInt32 escFreq;
+ CPpmd_See *see;
+ CPpmd_State *s;
+ UInt32 sum;
+ unsigned i, numMasked = p->MinContext->NumStats;
+ do
+ {
+ p->OrderFall++;
+ if (!p->MinContext->Suffix)
+ return; /* EndMarker (symbol = -1) */
+ p->MinContext = Ppmd7_GetContext(p, p->MinContext->Suffix);
+ }
+ while (p->MinContext->NumStats == numMasked);
+
+ see = Ppmd7_MakeEscFreq(p, numMasked, &escFreq);
+ s = Ppmd7_GetStats(p, p->MinContext);
+ sum = 0;
+ i = p->MinContext->NumStats;
+ do
+ {
+ int cur = s->Symbol;
+ if (cur == symbol)
+ {
+ UInt32 low = sum;
+ CPpmd_State *s1 = s;
+ do
+ {
+ sum += (s->Freq & (int)(MASK(s->Symbol)));
+ s++;
+ }
+ while (--i);
+ RangeEnc_Encode(rc, low, s1->Freq, sum + escFreq);
+ Ppmd_See_Update(see);
+ p->FoundState = s1;
+ Ppmd7_Update2(p);
+ return;
+ }
+ sum += (s->Freq & (int)(MASK(cur)));
+ MASK(cur) = 0;
+ s++;
+ }
+ while (--i);
+
+ RangeEnc_Encode(rc, sum, escFreq, sum + escFreq);
+ see->Summ = (UInt16)(see->Summ + sum + escFreq);
+ }
+}
diff --git a/src/libs/7zip/unix/C/Ppmd8.c b/src/libs/7zip/unix/C/Ppmd8.c
new file mode 100644
index 000000000..9187a88ef
--- /dev/null
+++ b/src/libs/7zip/unix/C/Ppmd8.c
@@ -0,0 +1,1120 @@
+/* Ppmd8.c -- PPMdI codec
+2010-03-24 : Igor Pavlov : Public domain
+This code is based on PPMd var.I (2002): Dmitry Shkarin : Public domain */
+
+#include <memory.h>
+
+#include "Ppmd8.h"
+
+const Byte PPMD8_kExpEscape[16] = { 25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 };
+static const UInt16 kInitBinEsc[] = { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051};
+
+#define MAX_FREQ 124
+#define UNIT_SIZE 12
+
+#define U2B(nu) ((UInt32)(nu) * UNIT_SIZE)
+#define U2I(nu) (p->Units2Indx[(nu) - 1])
+#define I2U(indx) (p->Indx2Units[indx])
+
+#ifdef PPMD_32BIT
+ #define REF(ptr) (ptr)
+#else
+ #define REF(ptr) ((UInt32)((Byte *)(ptr) - (p)->Base))
+#endif
+
+#define STATS_REF(ptr) ((CPpmd_State_Ref)REF(ptr))
+
+#define CTX(ref) ((CPpmd8_Context *)Ppmd8_GetContext(p, ref))
+#define STATS(ctx) Ppmd8_GetStats(p, ctx)
+#define ONE_STATE(ctx) Ppmd8Context_OneState(ctx)
+#define SUFFIX(ctx) CTX((ctx)->Suffix)
+
+typedef CPpmd8_Context * CTX_PTR;
+
+struct CPpmd8_Node_;
+
+typedef
+ #ifdef PPMD_32BIT
+ struct CPpmd8_Node_ *
+ #else
+ UInt32
+ #endif
+ CPpmd8_Node_Ref;
+
+typedef struct CPpmd8_Node_
+{
+ UInt32 Stamp;
+ CPpmd8_Node_Ref Next;
+ UInt32 NU;
+} CPpmd8_Node;
+
+#ifdef PPMD_32BIT
+ #define NODE(ptr) (ptr)
+#else
+ #define NODE(offs) ((CPpmd8_Node *)(p->Base + (offs)))
+#endif
+
+#define EMPTY_NODE 0xFFFFFFFF
+
+void Ppmd8_Construct(CPpmd8 *p)
+{
+ unsigned i, k, m;
+
+ p->Base = 0;
+
+ for (i = 0, k = 0; i < PPMD_NUM_INDEXES; i++)
+ {
+ unsigned step = (i >= 12 ? 4 : (i >> 2) + 1);
+ do { p->Units2Indx[k++] = (Byte)i; } while(--step);
+ p->Indx2Units[i] = (Byte)k;
+ }
+
+ p->NS2BSIndx[0] = (0 << 1);
+ p->NS2BSIndx[1] = (1 << 1);
+ memset(p->NS2BSIndx + 2, (2 << 1), 9);
+ memset(p->NS2BSIndx + 11, (3 << 1), 256 - 11);
+
+ for (i = 0; i < 5; i++)
+ p->NS2Indx[i] = (Byte)i;
+ for (m = i, k = 1; i < 260; i++)
+ {
+ p->NS2Indx[i] = (Byte)m;
+ if (--k == 0)
+ k = (++m) - 4;
+ }
+}
+
+void Ppmd8_Free(CPpmd8 *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->Base);
+ p->Size = 0;
+ p->Base = 0;
+}
+
+Bool Ppmd8_Alloc(CPpmd8 *p, UInt32 size, ISzAlloc *alloc)
+{
+ if (p->Base == 0 || p->Size != size)
+ {
+ Ppmd8_Free(p, alloc);
+ p->AlignOffset =
+ #ifdef PPMD_32BIT
+ (4 - size) & 3;
+ #else
+ 4 - (size & 3);
+ #endif
+ if ((p->Base = (Byte *)alloc->Alloc(alloc, p->AlignOffset + size)) == 0)
+ return False;
+ p->Size = size;
+ }
+ return True;
+}
+
+static void InsertNode(CPpmd8 *p, void *node, unsigned indx)
+{
+ ((CPpmd8_Node *)node)->Stamp = EMPTY_NODE;
+ ((CPpmd8_Node *)node)->Next = (CPpmd8_Node_Ref)p->FreeList[indx];
+ ((CPpmd8_Node *)node)->NU = I2U(indx);
+ p->FreeList[indx] = REF(node);
+ p->Stamps[indx]++;
+}
+
+static void *RemoveNode(CPpmd8 *p, unsigned indx)
+{
+ CPpmd8_Node *node = NODE((CPpmd8_Node_Ref)p->FreeList[indx]);
+ p->FreeList[indx] = node->Next;
+ p->Stamps[indx]--;
+ return node;
+}
+
+static void SplitBlock(CPpmd8 *p, void *ptr, unsigned oldIndx, unsigned newIndx)
+{
+ unsigned i, nu = I2U(oldIndx) - I2U(newIndx);
+ ptr = (Byte *)ptr + U2B(I2U(newIndx));
+ if (I2U(i = U2I(nu)) != nu)
+ {
+ unsigned k = I2U(--i);
+ InsertNode(p, ((Byte *)ptr) + U2B(k), nu - k - 1);
+ }
+ InsertNode(p, ptr, i);
+}
+
+static void GlueFreeBlocks(CPpmd8 *p)
+{
+ CPpmd8_Node_Ref head = 0;
+ CPpmd8_Node_Ref *prev = &head;
+ unsigned i;
+
+ p->GlueCount = 1 << 13;
+ memset(p->Stamps, 0, sizeof(p->Stamps));
+
+ /* Order-0 context is always at top UNIT, so we don't need guard NODE at the end.
+ All blocks up to p->LoUnit can be free, so we need guard NODE at LoUnit. */
+ if (p->LoUnit != p->HiUnit)
+ ((CPpmd8_Node *)p->LoUnit)->Stamp = 0;
+
+ /* Glue free blocks */
+ for (i = 0; i < PPMD_NUM_INDEXES; i++)
+ {
+ CPpmd8_Node_Ref next = (CPpmd8_Node_Ref)p->FreeList[i];
+ p->FreeList[i] = 0;
+ while (next != 0)
+ {
+ CPpmd8_Node *node = NODE(next);
+ if (node->NU != 0)
+ {
+ CPpmd8_Node *node2;
+ *prev = next;
+ prev = &(node->Next);
+ while ((node2 = node + node->NU)->Stamp == EMPTY_NODE)
+ {
+ node->NU += node2->NU;
+ node2->NU = 0;
+ }
+ }
+ next = node->Next;
+ }
+ }
+ *prev = 0;
+
+ /* Fill lists of free blocks */
+ while (head != 0)
+ {
+ CPpmd8_Node *node = NODE(head);
+ unsigned nu;
+ head = node->Next;
+ nu = node->NU;
+ if (nu == 0)
+ continue;
+ for (; nu > 128; nu -= 128, node += 128)
+ InsertNode(p, node, PPMD_NUM_INDEXES - 1);
+ if (I2U(i = U2I(nu)) != nu)
+ {
+ unsigned k = I2U(--i);
+ InsertNode(p, node + k, nu - k - 1);
+ }
+ InsertNode(p, node, i);
+ }
+}
+
+static void *AllocUnitsRare(CPpmd8 *p, unsigned indx)
+{
+ unsigned i;
+ void *retVal;
+ if (p->GlueCount == 0)
+ {
+ GlueFreeBlocks(p);
+ if (p->FreeList[indx] != 0)
+ return RemoveNode(p, indx);
+ }
+ i = indx;
+ do
+ {
+ if (++i == PPMD_NUM_INDEXES)
+ {
+ UInt32 numBytes = U2B(I2U(indx));
+ p->GlueCount--;
+ return ((UInt32)(p->UnitsStart - p->Text) > numBytes) ? (p->UnitsStart -= numBytes) : (NULL);
+ }
+ }
+ while (p->FreeList[i] == 0);
+ retVal = RemoveNode(p, i);
+ SplitBlock(p, retVal, i, indx);
+ return retVal;
+}
+
+static void *AllocUnits(CPpmd8 *p, unsigned indx)
+{
+ UInt32 numBytes;
+ if (p->FreeList[indx] != 0)
+ return RemoveNode(p, indx);
+ numBytes = U2B(I2U(indx));
+ if (numBytes <= (UInt32)(p->HiUnit - p->LoUnit))
+ {
+ void *retVal = p->LoUnit;
+ p->LoUnit += numBytes;
+ return retVal;
+ }
+ return AllocUnitsRare(p, indx);
+}
+
+#define MyMem12Cpy(dest, src, num) \
+ { UInt32 *d = (UInt32 *)dest; const UInt32 *s = (const UInt32 *)src; UInt32 n = num; \
+ do { d[0] = s[0]; d[1] = s[1]; d[2] = s[2]; s += 3; d += 3; } while(--n); }
+
+static void *ShrinkUnits(CPpmd8 *p, void *oldPtr, unsigned oldNU, unsigned newNU)
+{
+ unsigned i0 = U2I(oldNU);
+ unsigned i1 = U2I(newNU);
+ if (i0 == i1)
+ return oldPtr;
+ if (p->FreeList[i1] != 0)
+ {
+ void *ptr = RemoveNode(p, i1);
+ MyMem12Cpy(ptr, oldPtr, newNU);
+ InsertNode(p, oldPtr, i0);
+ return ptr;
+ }
+ SplitBlock(p, oldPtr, i0, i1);
+ return oldPtr;
+}
+
+static void FreeUnits(CPpmd8 *p, void *ptr, unsigned nu)
+{
+ InsertNode(p, ptr, U2I(nu));
+}
+
+static void SpecialFreeUnit(CPpmd8 *p, void *ptr)
+{
+ if ((Byte *)ptr != p->UnitsStart)
+ InsertNode(p, ptr, 0);
+ else
+ {
+ #ifdef PPMD8_FREEZE_SUPPORT
+ *(UInt32 *)ptr = EMPTY_NODE; /* it's used for (Flags == 0xFF) check in RemoveBinContexts */
+ #endif
+ p->UnitsStart += UNIT_SIZE;
+ }
+}
+
+static void *MoveUnitsUp(CPpmd8 *p, void *oldPtr, unsigned nu)
+{
+ unsigned indx = U2I(nu);
+ void *ptr;
+ if ((Byte *)oldPtr > p->UnitsStart + 16 * 1024 || REF(oldPtr) > p->FreeList[indx])
+ return oldPtr;
+ ptr = RemoveNode(p, indx);
+ MyMem12Cpy(ptr, oldPtr, nu);
+ if ((Byte*)oldPtr != p->UnitsStart)
+ InsertNode(p, oldPtr, indx);
+ else
+ p->UnitsStart += U2B(I2U(indx));
+ return ptr;
+}
+
+static void ExpandTextArea(CPpmd8 *p)
+{
+ UInt32 count[PPMD_NUM_INDEXES];
+ unsigned i;
+ memset(count, 0, sizeof(count));
+ if (p->LoUnit != p->HiUnit)
+ ((CPpmd8_Node *)p->LoUnit)->Stamp = 0;
+
+ {
+ CPpmd8_Node *node = (CPpmd8_Node *)p->UnitsStart;
+ for (; node->Stamp == EMPTY_NODE; node += node->NU)
+ {
+ node->Stamp = 0;
+ count[U2I(node->NU)]++;
+ }
+ p->UnitsStart = (Byte *)node;
+ }
+
+ for (i = 0; i < PPMD_NUM_INDEXES; i++)
+ {
+ CPpmd8_Node_Ref *next = (CPpmd8_Node_Ref *)&p->FreeList[i];
+ while (count[i] != 0)
+ {
+ CPpmd8_Node *node = NODE(*next);
+ while (node->Stamp == 0)
+ {
+ *next = node->Next;
+ node = NODE(*next);
+ p->Stamps[i]--;
+ if (--count[i] == 0)
+ break;
+ }
+ next = &node->Next;
+ }
+ }
+}
+
+#define SUCCESSOR(p) ((CPpmd_Void_Ref)((p)->SuccessorLow | ((UInt32)(p)->SuccessorHigh << 16)))
+
+static void SetSuccessor(CPpmd_State *p, CPpmd_Void_Ref v)
+{
+ (p)->SuccessorLow = (UInt16)((UInt32)(v) & 0xFFFF);
+ (p)->SuccessorHigh = (UInt16)(((UInt32)(v) >> 16) & 0xFFFF);
+}
+
+#define RESET_TEXT(offs) { p->Text = p->Base + p->AlignOffset + (offs); }
+
+static void RestartModel(CPpmd8 *p)
+{
+ unsigned i, k, m, r;
+
+ memset(p->FreeList, 0, sizeof(p->FreeList));
+ memset(p->Stamps, 0, sizeof(p->Stamps));
+ RESET_TEXT(0);
+ p->HiUnit = p->Text + p->Size;
+ p->LoUnit = p->UnitsStart = p->HiUnit - p->Size / 8 / UNIT_SIZE * 7 * UNIT_SIZE;
+ p->GlueCount = 0;
+
+ p->OrderFall = p->MaxOrder;
+ p->RunLength = p->InitRL = -(Int32)((p->MaxOrder < 12) ? p->MaxOrder : 12) - 1;
+ p->PrevSuccess = 0;
+
+ p->MinContext = p->MaxContext = (CTX_PTR)(p->HiUnit -= UNIT_SIZE); /* AllocContext(p); */
+ p->MinContext->Suffix = 0;
+ p->MinContext->NumStats = 255;
+ p->MinContext->Flags = 0;
+ p->MinContext->SummFreq = 256 + 1;
+ p->FoundState = (CPpmd_State *)p->LoUnit; /* AllocUnits(p, PPMD_NUM_INDEXES - 1); */
+ p->LoUnit += U2B(256 / 2);
+ p->MinContext->Stats = REF(p->FoundState);
+ for (i = 0; i < 256; i++)
+ {
+ CPpmd_State *s = &p->FoundState[i];
+ s->Symbol = (Byte)i;
+ s->Freq = 1;
+ SetSuccessor(s, 0);
+ }
+
+ for (i = m = 0; m < 25; m++)
+ {
+ while (p->NS2Indx[i] == m)
+ i++;
+ for (k = 0; k < 8; k++)
+ {
+ UInt16 val = (UInt16)(PPMD_BIN_SCALE - kInitBinEsc[k] / (i + 1));
+ UInt16 *dest = p->BinSumm[m] + k;
+ for (r = 0; r < 64; r += 8)
+ dest[r] = val;
+ }
+ }
+
+ for (i = m = 0; m < 24; m++)
+ {
+ while (p->NS2Indx[i + 3] == m + 3)
+ i++;
+ for (k = 0; k < 32; k++)
+ {
+ CPpmd_See *s = &p->See[m][k];
+ s->Summ = (UInt16)((2 * i + 5) << (s->Shift = PPMD_PERIOD_BITS - 4));
+ s->Count = 7;
+ }
+ }
+}
+
+void Ppmd8_Init(CPpmd8 *p, unsigned maxOrder, unsigned restoreMethod)
+{
+ p->MaxOrder = maxOrder;
+ p->RestoreMethod = restoreMethod;
+ RestartModel(p);
+ p->DummySee.Shift = PPMD_PERIOD_BITS;
+ p->DummySee.Summ = 0; /* unused */
+ p->DummySee.Count = 64; /* unused */
+}
+
+static void Refresh(CPpmd8 *p, CTX_PTR ctx, unsigned oldNU, unsigned scale)
+{
+ unsigned i = ctx->NumStats, escFreq, sumFreq, flags;
+ CPpmd_State *s = (CPpmd_State *)ShrinkUnits(p, STATS(ctx), oldNU, (i + 2) >> 1);
+ ctx->Stats = REF(s);
+ #ifdef PPMD8_FREEZE_SUPPORT
+ /* fixed over Shkarin's code. Fixed code is not compatible with original code for some files in FREEZE mode. */
+ scale |= (ctx->SummFreq >= ((UInt32)1 << 15));
+ #endif
+ flags = (ctx->Flags & (0x10 + 0x04 * scale)) + 0x08 * (s->Symbol >= 0x40);
+ escFreq = ctx->SummFreq - s->Freq;
+ sumFreq = (s->Freq = (Byte)((s->Freq + scale) >> scale));
+ do
+ {
+ escFreq -= (++s)->Freq;
+ sumFreq += (s->Freq = (Byte)((s->Freq + scale) >> scale));
+ flags |= 0x08 * (s->Symbol >= 0x40);
+ }
+ while (--i);
+ ctx->SummFreq = (UInt16)(sumFreq + ((escFreq + scale) >> scale));
+ ctx->Flags = (Byte)flags;
+}
+
+static void SwapStates(CPpmd_State *t1, CPpmd_State *t2)
+{
+ CPpmd_State tmp = *t1;
+ *t1 = *t2;
+ *t2 = tmp;
+}
+
+static CPpmd_Void_Ref CutOff(CPpmd8 *p, CTX_PTR ctx, unsigned order)
+{
+ int i;
+ unsigned tmp;
+ CPpmd_State *s;
+
+ if (!ctx->NumStats)
+ {
+ s = ONE_STATE(ctx);
+ if ((Byte *)Ppmd8_GetPtr(p, SUCCESSOR(s)) >= p->UnitsStart)
+ {
+ if (order < p->MaxOrder)
+ SetSuccessor(s, CutOff(p, CTX(SUCCESSOR(s)), order + 1));
+ else
+ SetSuccessor(s, 0);
+ if (SUCCESSOR(s) || order <= 9) /* O_BOUND */
+ return REF(ctx);
+ }
+ SpecialFreeUnit(p, ctx);
+ return 0;
+ }
+
+ ctx->Stats = STATS_REF(MoveUnitsUp(p, STATS(ctx), tmp = ((unsigned)ctx->NumStats + 2) >> 1));
+
+ for (s = STATS(ctx) + (i = ctx->NumStats); s >= STATS(ctx); s--)
+ if ((Byte *)Ppmd8_GetPtr(p, SUCCESSOR(s)) < p->UnitsStart)
+ {
+ CPpmd_State *s2 = STATS(ctx) + (i--);
+ SetSuccessor(s, 0);
+ SwapStates(s, s2);
+ }
+ else if (order < p->MaxOrder)
+ SetSuccessor(s, CutOff(p, CTX(SUCCESSOR(s)), order + 1));
+ else
+ SetSuccessor(s, 0);
+
+ if (i != ctx->NumStats && order)
+ {
+ ctx->NumStats = (Byte)i;
+ s = STATS(ctx);
+ if (i < 0)
+ {
+ FreeUnits(p, s, tmp);
+ SpecialFreeUnit(p, ctx);
+ return 0;
+ }
+ if (i == 0)
+ {
+ ctx->Flags = (ctx->Flags & 0x10) + 0x08 * (s->Symbol >= 0x40);
+ *ONE_STATE(ctx) = *s;
+ FreeUnits(p, s, tmp);
+ ONE_STATE(ctx)->Freq = (Byte)((unsigned)ONE_STATE(ctx)->Freq + 11) >> 3;
+ }
+ else
+ Refresh(p, ctx, tmp, ctx->SummFreq > 16 * i);
+ }
+ return REF(ctx);
+}
+
+#ifdef PPMD8_FREEZE_SUPPORT
+static CPpmd_Void_Ref RemoveBinContexts(CPpmd8 *p, CTX_PTR ctx, unsigned order)
+{
+ CPpmd_State *s;
+ if (!ctx->NumStats)
+ {
+ s = ONE_STATE(ctx);
+ if ((Byte *)Ppmd8_GetPtr(p, SUCCESSOR(s)) >= p->UnitsStart && order < p->MaxOrder)
+ SetSuccessor(s, RemoveBinContexts(p, CTX(SUCCESSOR(s)), order + 1));
+ else
+ SetSuccessor(s, 0);
+ /* Suffix context can be removed already, since different (high-order)
+ Successors may refer to same context. So we check Flags == 0xFF (Stamp == EMPTY_NODE) */
+ if (!SUCCESSOR(s) && (!SUFFIX(ctx)->NumStats || SUFFIX(ctx)->Flags == 0xFF))
+ {
+ FreeUnits(p, ctx, 1);
+ return 0;
+ }
+ else
+ return REF(ctx);
+ }
+
+ for (s = STATS(ctx) + ctx->NumStats; s >= STATS(ctx); s--)
+ if ((Byte *)Ppmd8_GetPtr(p, SUCCESSOR(s)) >= p->UnitsStart && order < p->MaxOrder)
+ SetSuccessor(s, RemoveBinContexts(p, CTX(SUCCESSOR(s)), order + 1));
+ else
+ SetSuccessor(s, 0);
+
+ return REF(ctx);
+}
+#endif
+
+static UInt32 GetUsedMemory(const CPpmd8 *p)
+{
+ UInt32 v = 0;
+ unsigned i;
+ for (i = 0; i < PPMD_NUM_INDEXES; i++)
+ v += p->Stamps[i] * I2U(i);
+ return p->Size - (UInt32)(p->HiUnit - p->LoUnit) - (UInt32)(p->UnitsStart - p->Text) - U2B(v);
+}
+
+#ifdef PPMD8_FREEZE_SUPPORT
+ #define RESTORE_MODEL(c1, fSuccessor) RestoreModel(p, c1, fSuccessor)
+#else
+ #define RESTORE_MODEL(c1, fSuccessor) RestoreModel(p, c1)
+#endif
+
+static void RestoreModel(CPpmd8 *p, CTX_PTR c1
+ #ifdef PPMD8_FREEZE_SUPPORT
+ , CTX_PTR fSuccessor
+ #endif
+ )
+{
+ CTX_PTR c;
+ CPpmd_State *s;
+ RESET_TEXT(0);
+ for (c = p->MaxContext; c != c1; c = SUFFIX(c))
+ if (--(c->NumStats) == 0)
+ {
+ s = STATS(c);
+ c->Flags = (c->Flags & 0x10) + 0x08 * (s->Symbol >= 0x40);
+ *ONE_STATE(c) = *s;
+ SpecialFreeUnit(p, s);
+ ONE_STATE(c)->Freq = (ONE_STATE(c)->Freq + 11) >> 3;
+ }
+ else
+ Refresh(p, c, (c->NumStats+3) >> 1, 0);
+
+ for (; c != p->MinContext; c = SUFFIX(c))
+ if (!c->NumStats)
+ ONE_STATE(c)->Freq -= ONE_STATE(c)->Freq >> 1;
+ else if ((c->SummFreq += 4) > 128 + 4 * c->NumStats)
+ Refresh(p, c, (c->NumStats + 2) >> 1, 1);
+
+ #ifdef PPMD8_FREEZE_SUPPORT
+ if (p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE)
+ {
+ p->MaxContext = fSuccessor;
+ p->GlueCount += !(p->Stamps[1] & 1);
+ }
+ else if (p->RestoreMethod == PPMD8_RESTORE_METHOD_FREEZE)
+ {
+ while (p->MaxContext->Suffix)
+ p->MaxContext = SUFFIX(p->MaxContext);
+ RemoveBinContexts(p, p->MaxContext, 0);
+ p->RestoreMethod++;
+ p->GlueCount = 0;
+ p->OrderFall = p->MaxOrder;
+ }
+ else
+ #endif
+ if (p->RestoreMethod == PPMD8_RESTORE_METHOD_RESTART || GetUsedMemory(p) < (p->Size >> 1))
+ RestartModel(p);
+ else
+ {
+ while (p->MaxContext->Suffix)
+ p->MaxContext = SUFFIX(p->MaxContext);
+ do
+ {
+ CutOff(p, p->MaxContext, 0);
+ ExpandTextArea(p);
+ }
+ while (GetUsedMemory(p) > 3 * (p->Size >> 2));
+ p->GlueCount = 0;
+ p->OrderFall = p->MaxOrder;
+ }
+}
+
+static CTX_PTR CreateSuccessors(CPpmd8 *p, Bool skip, CPpmd_State *s1, CTX_PTR c)
+{
+ CPpmd_State upState;
+ Byte flags;
+ CPpmd_Byte_Ref upBranch = (CPpmd_Byte_Ref)SUCCESSOR(p->FoundState);
+ /* fixed over Shkarin's code. Maybe it could work without + 1 too. */
+ CPpmd_State *ps[PPMD8_MAX_ORDER + 1];
+ unsigned numPs = 0;
+
+ if (!skip)
+ ps[numPs++] = p->FoundState;
+
+ while (c->Suffix)
+ {
+ CPpmd_Void_Ref successor;
+ CPpmd_State *s;
+ c = SUFFIX(c);
+ if (s1)
+ {
+ s = s1;
+ s1 = NULL;
+ }
+ else if (c->NumStats != 0)
+ {
+ for (s = STATS(c); s->Symbol != p->FoundState->Symbol; s++);
+ if (s->Freq < MAX_FREQ - 9)
+ {
+ s->Freq++;
+ c->SummFreq++;
+ }
+ }
+ else
+ {
+ s = ONE_STATE(c);
+ s->Freq += (!SUFFIX(c)->NumStats & (s->Freq < 24));
+ }
+ successor = SUCCESSOR(s);
+ if (successor != upBranch)
+ {
+ c = CTX(successor);
+ if (numPs == 0)
+ return c;
+ break;
+ }
+ ps[numPs++] = s;
+ }
+
+ upState.Symbol = *(const Byte *)Ppmd8_GetPtr(p, upBranch);
+ SetSuccessor(&upState, upBranch + 1);
+ flags = 0x10 * (p->FoundState->Symbol >= 0x40) + 0x08 * (upState.Symbol >= 0x40);
+
+ if (c->NumStats == 0)
+ upState.Freq = ONE_STATE(c)->Freq;
+ else
+ {
+ UInt32 cf, s0;
+ CPpmd_State *s;
+ for (s = STATS(c); s->Symbol != upState.Symbol; s++);
+ cf = s->Freq - 1;
+ s0 = c->SummFreq - c->NumStats - cf;
+ upState.Freq = (Byte)(1 + ((2 * cf <= s0) ? (5 * cf > s0) : ((cf + 2 * s0 - 3) / s0)));
+ }
+
+ do
+ {
+ /* Create Child */
+ CTX_PTR c1; /* = AllocContext(p); */
+ if (p->HiUnit != p->LoUnit)
+ c1 = (CTX_PTR)(p->HiUnit -= UNIT_SIZE);
+ else if (p->FreeList[0] != 0)
+ c1 = (CTX_PTR)RemoveNode(p, 0);
+ else
+ {
+ c1 = (CTX_PTR)AllocUnitsRare(p, 0);
+ if (!c1)
+ return NULL;
+ }
+ c1->NumStats = 0;
+ c1->Flags = flags;
+ *ONE_STATE(c1) = upState;
+ c1->Suffix = REF(c);
+ SetSuccessor(ps[--numPs], REF(c1));
+ c = c1;
+ }
+ while (numPs != 0);
+
+ return c;
+}
+
+static CTX_PTR ReduceOrder(CPpmd8 *p, CPpmd_State *s1, CTX_PTR c)
+{
+ CPpmd_State *s = NULL;
+ CTX_PTR c1 = c;
+ CPpmd_Void_Ref upBranch = REF(p->Text);
+
+ #ifdef PPMD8_FREEZE_SUPPORT
+ /* The BUG in Shkarin's code was fixed: ps could overflow in CUT_OFF mode. */
+ CPpmd_State *ps[PPMD8_MAX_ORDER + 1];
+ unsigned numPs = 0;
+ ps[numPs++] = p->FoundState;
+ #endif
+
+ SetSuccessor(p->FoundState, upBranch);
+ p->OrderFall++;
+
+ for (;;)
+ {
+ if (s1)
+ {
+ c = SUFFIX(c);
+ s = s1;
+ s1 = NULL;
+ }
+ else
+ {
+ if (!c->Suffix)
+ {
+ #ifdef PPMD8_FREEZE_SUPPORT
+ if (p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE)
+ {
+ do { SetSuccessor(ps[--numPs], REF(c)); } while (numPs);
+ RESET_TEXT(1);
+ p->OrderFall = 1;
+ }
+ #endif
+ return c;
+ }
+ c = SUFFIX(c);
+ if (c->NumStats)
+ {
+ if ((s = STATS(c))->Symbol != p->FoundState->Symbol)
+ do { s++; } while (s->Symbol != p->FoundState->Symbol);
+ if (s->Freq < MAX_FREQ - 9)
+ {
+ s->Freq += 2;
+ c->SummFreq += 2;
+ }
+ }
+ else
+ {
+ s = ONE_STATE(c);
+ s->Freq += (s->Freq < 32);
+ }
+ }
+ if (SUCCESSOR(s))
+ break;
+ #ifdef PPMD8_FREEZE_SUPPORT
+ ps[numPs++] = s;
+ #endif
+ SetSuccessor(s, upBranch);
+ p->OrderFall++;
+ }
+
+ #ifdef PPMD8_FREEZE_SUPPORT
+ if (p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE)
+ {
+ c = CTX(SUCCESSOR(s));
+ do { SetSuccessor(ps[--numPs], REF(c)); } while (numPs);
+ RESET_TEXT(1);
+ p->OrderFall = 1;
+ return c;
+ }
+ else
+ #endif
+ if (SUCCESSOR(s) <= upBranch)
+ {
+ CTX_PTR successor;
+ CPpmd_State *s1 = p->FoundState;
+ p->FoundState = s;
+
+ successor = CreateSuccessors(p, False, NULL, c);
+ if (successor == NULL)
+ SetSuccessor(s, 0);
+ else
+ SetSuccessor(s, REF(successor));
+ p->FoundState = s1;
+ }
+
+ if (p->OrderFall == 1 && c1 == p->MaxContext)
+ {
+ SetSuccessor(p->FoundState, SUCCESSOR(s));
+ p->Text--;
+ }
+ if (SUCCESSOR(s) == 0)
+ return NULL;
+ return CTX(SUCCESSOR(s));
+}
+
+static void UpdateModel(CPpmd8 *p)
+{
+ CPpmd_Void_Ref successor, fSuccessor = SUCCESSOR(p->FoundState);
+ CTX_PTR c;
+ unsigned s0, ns, fFreq = p->FoundState->Freq;
+ Byte flag, fSymbol = p->FoundState->Symbol;
+ CPpmd_State *s = NULL;
+
+ if (p->FoundState->Freq < MAX_FREQ / 4 && p->MinContext->Suffix != 0)
+ {
+ c = SUFFIX(p->MinContext);
+
+ if (c->NumStats == 0)
+ {
+ s = ONE_STATE(c);
+ if (s->Freq < 32)
+ s->Freq++;
+ }
+ else
+ {
+ s = STATS(c);
+ if (s->Symbol != p->FoundState->Symbol)
+ {
+ do { s++; } while (s->Symbol != p->FoundState->Symbol);
+ if (s[0].Freq >= s[-1].Freq)
+ {
+ SwapStates(&s[0], &s[-1]);
+ s--;
+ }
+ }
+ if (s->Freq < MAX_FREQ - 9)
+ {
+ s->Freq += 2;
+ c->SummFreq += 2;
+ }
+ }
+ }
+
+ c = p->MaxContext;
+ if (p->OrderFall == 0 && fSuccessor)
+ {
+ CTX_PTR cs = CreateSuccessors(p, True, s, p->MinContext);
+ if (cs == 0)
+ {
+ SetSuccessor(p->FoundState, 0);
+ RESTORE_MODEL(c, CTX(fSuccessor));
+ }
+ else
+ {
+ SetSuccessor(p->FoundState, REF(cs));
+ p->MaxContext = cs;
+ }
+ return;
+ }
+
+ *p->Text++ = p->FoundState->Symbol;
+ successor = REF(p->Text);
+ if (p->Text >= p->UnitsStart)
+ {
+ RESTORE_MODEL(c, CTX(fSuccessor)); /* check it */
+ return;
+ }
+
+ if (!fSuccessor)
+ {
+ CTX_PTR cs = ReduceOrder(p, s, p->MinContext);
+ if (cs == NULL)
+ {
+ RESTORE_MODEL(c, 0);
+ return;
+ }
+ fSuccessor = REF(cs);
+ }
+ else if ((Byte *)Ppmd8_GetPtr(p, fSuccessor) < p->UnitsStart)
+ {
+ CTX_PTR cs = CreateSuccessors(p, False, s, p->MinContext);
+ if (cs == NULL)
+ {
+ RESTORE_MODEL(c, 0);
+ return;
+ }
+ fSuccessor = REF(cs);
+ }
+
+ if (--p->OrderFall == 0)
+ {
+ successor = fSuccessor;
+ p->Text -= (p->MaxContext != p->MinContext);
+ }
+ #ifdef PPMD8_FREEZE_SUPPORT
+ else if (p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE)
+ {
+ successor = fSuccessor;
+ RESET_TEXT(0);
+ p->OrderFall = 0;
+ }
+ #endif
+
+ s0 = p->MinContext->SummFreq - (ns = p->MinContext->NumStats) - fFreq;
+ flag = 0x08 * (fSymbol >= 0x40);
+
+ for (; c != p->MinContext; c = SUFFIX(c))
+ {
+ unsigned ns1;
+ UInt32 cf, sf;
+ if ((ns1 = c->NumStats) != 0)
+ {
+ if ((ns1 & 1) != 0)
+ {
+ /* Expand for one UNIT */
+ unsigned oldNU = (ns1 + 1) >> 1;
+ unsigned i = U2I(oldNU);
+ if (i != U2I(oldNU + 1))
+ {
+ void *ptr = AllocUnits(p, i + 1);
+ void *oldPtr;
+ if (!ptr)
+ {
+ RESTORE_MODEL(c, CTX(fSuccessor));
+ return;
+ }
+ oldPtr = STATS(c);
+ MyMem12Cpy(ptr, oldPtr, oldNU);
+ InsertNode(p, oldPtr, i);
+ c->Stats = STATS_REF(ptr);
+ }
+ }
+ c->SummFreq = (UInt16)(c->SummFreq + (3 * ns1 + 1 < ns));
+ }
+ else
+ {
+ CPpmd_State *s = (CPpmd_State*)AllocUnits(p, 0);
+ if (!s)
+ {
+ RESTORE_MODEL(c, CTX(fSuccessor));
+ return;
+ }
+ *s = *ONE_STATE(c);
+ c->Stats = REF(s);
+ if (s->Freq < MAX_FREQ / 4 - 1)
+ s->Freq <<= 1;
+ else
+ s->Freq = MAX_FREQ - 4;
+ c->SummFreq = (UInt16)(s->Freq + p->InitEsc + (ns > 2));
+ }
+ cf = 2 * fFreq * (c->SummFreq + 6);
+ sf = (UInt32)s0 + c->SummFreq;
+ if (cf < 6 * sf)
+ {
+ cf = 1 + (cf > sf) + (cf >= 4 * sf);
+ c->SummFreq += 4;
+ }
+ else
+ {
+ cf = 4 + (cf > 9 * sf) + (cf > 12 * sf) + (cf > 15 * sf);
+ c->SummFreq = (UInt16)(c->SummFreq + cf);
+ }
+ {
+ CPpmd_State *s = STATS(c) + ns1 + 1;
+ SetSuccessor(s, successor);
+ s->Symbol = fSymbol;
+ s->Freq = (Byte)cf;
+ c->Flags |= flag;
+ c->NumStats = (Byte)(ns1 + 1);
+ }
+ }
+ p->MaxContext = p->MinContext = CTX(fSuccessor);
+}
+
+static void Rescale(CPpmd8 *p)
+{
+ unsigned i, adder, sumFreq, escFreq;
+ CPpmd_State *stats = STATS(p->MinContext);
+ CPpmd_State *s = p->FoundState;
+ {
+ CPpmd_State tmp = *s;
+ for (; s != stats; s--)
+ s[0] = s[-1];
+ *s = tmp;
+ }
+ escFreq = p->MinContext->SummFreq - s->Freq;
+ s->Freq += 4;
+ adder = (p->OrderFall != 0
+ #ifdef PPMD8_FREEZE_SUPPORT
+ || p->RestoreMethod > PPMD8_RESTORE_METHOD_FREEZE
+ #endif
+ );
+ s->Freq = (Byte)((s->Freq + adder) >> 1);
+ sumFreq = s->Freq;
+
+ i = p->MinContext->NumStats;
+ do
+ {
+ escFreq -= (++s)->Freq;
+ s->Freq = (Byte)((s->Freq + adder) >> 1);
+ sumFreq += s->Freq;
+ if (s[0].Freq > s[-1].Freq)
+ {
+ CPpmd_State *s1 = s;
+ CPpmd_State tmp = *s1;
+ do
+ s1[0] = s1[-1];
+ while (--s1 != stats && tmp.Freq > s1[-1].Freq);
+ *s1 = tmp;
+ }
+ }
+ while (--i);
+
+ if (s->Freq == 0)
+ {
+ unsigned numStats = p->MinContext->NumStats;
+ unsigned n0, n1;
+ do { i++; } while ((--s)->Freq == 0);
+ escFreq += i;
+ p->MinContext->NumStats = (Byte)(p->MinContext->NumStats - i);
+ if (p->MinContext->NumStats == 0)
+ {
+ CPpmd_State tmp = *stats;
+ tmp.Freq = (Byte)((2 * tmp.Freq + escFreq - 1) / escFreq);
+ if (tmp.Freq > MAX_FREQ / 3)
+ tmp.Freq = MAX_FREQ / 3;
+ InsertNode(p, stats, U2I((numStats + 2) >> 1));
+ p->MinContext->Flags = (p->MinContext->Flags & 0x10) + 0x08 * (tmp.Symbol >= 0x40);
+ *(p->FoundState = ONE_STATE(p->MinContext)) = tmp;
+ return;
+ }
+ n0 = (numStats + 2) >> 1;
+ n1 = (p->MinContext->NumStats + 2) >> 1;
+ if (n0 != n1)
+ p->MinContext->Stats = STATS_REF(ShrinkUnits(p, stats, n0, n1));
+ p->MinContext->Flags &= ~0x08;
+ p->MinContext->Flags |= 0x08 * ((s = STATS(p->MinContext))->Symbol >= 0x40);
+ i = p->MinContext->NumStats;
+ do { p->MinContext->Flags |= 0x08*((++s)->Symbol >= 0x40); } while (--i);
+ }
+ p->MinContext->SummFreq = (UInt16)(sumFreq + escFreq - (escFreq >> 1));
+ p->MinContext->Flags |= 0x4;
+ p->FoundState = STATS(p->MinContext);
+}
+
+CPpmd_See *Ppmd8_MakeEscFreq(CPpmd8 *p, unsigned numMasked1, UInt32 *escFreq)
+{
+ CPpmd_See *see;
+ if (p->MinContext->NumStats != 0xFF)
+ {
+ see = p->See[p->NS2Indx[p->MinContext->NumStats + 2] - 3] +
+ (p->MinContext->SummFreq > 11 * ((unsigned)p->MinContext->NumStats + 1)) +
+ 2 * (2 * (unsigned)p->MinContext->NumStats <
+ ((unsigned)SUFFIX(p->MinContext)->NumStats + numMasked1)) +
+ p->MinContext->Flags;
+ {
+ unsigned r = (see->Summ >> see->Shift);
+ see->Summ = (UInt16)(see->Summ - r);
+ *escFreq = r + (r == 0);
+ }
+ }
+ else
+ {
+ see = &p->DummySee;
+ *escFreq = 1;
+ }
+ return see;
+}
+
+static void NextContext(CPpmd8 *p)
+{
+ CTX_PTR c = CTX(SUCCESSOR(p->FoundState));
+ if (p->OrderFall == 0 && (Byte *)c >= p->UnitsStart)
+ p->MinContext = p->MaxContext = c;
+ else
+ {
+ UpdateModel(p);
+ p->MinContext = p->MaxContext;
+ }
+}
+
+void Ppmd8_Update1(CPpmd8 *p)
+{
+ CPpmd_State *s = p->FoundState;
+ s->Freq += 4;
+ p->MinContext->SummFreq += 4;
+ if (s[0].Freq > s[-1].Freq)
+ {
+ SwapStates(&s[0], &s[-1]);
+ p->FoundState = --s;
+ if (s->Freq > MAX_FREQ)
+ Rescale(p);
+ }
+ NextContext(p);
+}
+
+void Ppmd8_Update1_0(CPpmd8 *p)
+{
+ p->PrevSuccess = (2 * p->FoundState->Freq >= p->MinContext->SummFreq);
+ p->RunLength += p->PrevSuccess;
+ p->MinContext->SummFreq += 4;
+ if ((p->FoundState->Freq += 4) > MAX_FREQ)
+ Rescale(p);
+ NextContext(p);
+}
+
+void Ppmd8_UpdateBin(CPpmd8 *p)
+{
+ p->FoundState->Freq = (Byte)(p->FoundState->Freq + (p->FoundState->Freq < 196));
+ p->PrevSuccess = 1;
+ p->RunLength++;
+ NextContext(p);
+}
+
+void Ppmd8_Update2(CPpmd8 *p)
+{
+ p->MinContext->SummFreq += 4;
+ if ((p->FoundState->Freq += 4) > MAX_FREQ)
+ Rescale(p);
+ p->RunLength = p->InitRL;
+ UpdateModel(p);
+ p->MinContext = p->MaxContext;
+}
+
+/* H->I changes:
+ NS2Indx
+ GlewCount, and Glue method
+ BinSum
+ See / EscFreq
+ CreateSuccessors updates more suffix contexts
+ UpdateModel consts.
+ PrevSuccess Update
+*/
diff --git a/src/libs/7zip/unix/C/Ppmd8.h b/src/libs/7zip/unix/C/Ppmd8.h
new file mode 100644
index 000000000..870dc9dd8
--- /dev/null
+++ b/src/libs/7zip/unix/C/Ppmd8.h
@@ -0,0 +1,133 @@
+/* Ppmd8.h -- PPMdI codec
+2010-03-24 : Igor Pavlov : Public domain
+This code is based on:
+ PPMd var.I (2002): Dmitry Shkarin : Public domain
+ Carryless rangecoder (1999): Dmitry Subbotin : Public domain */
+
+#ifndef __PPMD8_H
+#define __PPMD8_H
+
+#include "Ppmd.h"
+
+EXTERN_C_BEGIN
+
+#define PPMD8_MIN_ORDER 2
+#define PPMD8_MAX_ORDER 16
+
+struct CPpmd8_Context_;
+
+typedef
+ #ifdef PPMD_32BIT
+ struct CPpmd8_Context_ *
+ #else
+ UInt32
+ #endif
+ CPpmd8_Context_Ref;
+
+typedef struct CPpmd8_Context_
+{
+ Byte NumStats;
+ Byte Flags;
+ UInt16 SummFreq;
+ CPpmd_State_Ref Stats;
+ CPpmd8_Context_Ref Suffix;
+} CPpmd8_Context;
+
+#define Ppmd8Context_OneState(p) ((CPpmd_State *)&(p)->SummFreq)
+
+/* The BUG in Shkarin's code for FREEZE mode was fixed, but that fixed
+ code is not compatible with original code for some files compressed
+ in FREEZE mode. So we disable FREEZE mode support. */
+
+enum
+{
+ PPMD8_RESTORE_METHOD_RESTART,
+ PPMD8_RESTORE_METHOD_CUT_OFF
+ #ifdef PPMD8_FREEZE_SUPPORT
+ , PPMD8_RESTORE_METHOD_FREEZE
+ #endif
+};
+
+typedef struct
+{
+ CPpmd8_Context *MinContext, *MaxContext;
+ CPpmd_State *FoundState;
+ unsigned OrderFall, InitEsc, PrevSuccess, MaxOrder;
+ Int32 RunLength, InitRL; /* must be 32-bit at least */
+
+ UInt32 Size;
+ UInt32 GlueCount;
+ Byte *Base, *LoUnit, *HiUnit, *Text, *UnitsStart;
+ UInt32 AlignOffset;
+ unsigned RestoreMethod;
+
+ /* Range Coder */
+ UInt32 Range;
+ UInt32 Code;
+ UInt32 Low;
+ union
+ {
+ IByteIn *In;
+ IByteOut *Out;
+ } Stream;
+
+ Byte Indx2Units[PPMD_NUM_INDEXES];
+ Byte Units2Indx[128];
+ CPpmd_Void_Ref FreeList[PPMD_NUM_INDEXES];
+ UInt32 Stamps[PPMD_NUM_INDEXES];
+
+ Byte NS2BSIndx[256], NS2Indx[260];
+ CPpmd_See DummySee, See[24][32];
+ UInt16 BinSumm[25][64];
+} CPpmd8;
+
+void Ppmd8_Construct(CPpmd8 *p);
+Bool Ppmd8_Alloc(CPpmd8 *p, UInt32 size, ISzAlloc *alloc);
+void Ppmd8_Free(CPpmd8 *p, ISzAlloc *alloc);
+void Ppmd8_Init(CPpmd8 *p, unsigned maxOrder, unsigned restoreMethod);
+#define Ppmd8_WasAllocated(p) ((p)->Base != NULL)
+
+
+/* ---------- Internal Functions ---------- */
+
+extern const Byte PPMD8_kExpEscape[16];
+
+#ifdef PPMD_32BIT
+ #define Ppmd8_GetPtr(p, ptr) (ptr)
+ #define Ppmd8_GetContext(p, ptr) (ptr)
+ #define Ppmd8_GetStats(p, ctx) ((ctx)->Stats)
+#else
+ #define Ppmd8_GetPtr(p, offs) ((void *)((p)->Base + (offs)))
+ #define Ppmd8_GetContext(p, offs) ((CPpmd8_Context *)Ppmd8_GetPtr((p), (offs)))
+ #define Ppmd8_GetStats(p, ctx) ((CPpmd_State *)Ppmd8_GetPtr((p), ((ctx)->Stats)))
+#endif
+
+void Ppmd8_Update1(CPpmd8 *p);
+void Ppmd8_Update1_0(CPpmd8 *p);
+void Ppmd8_Update2(CPpmd8 *p);
+void Ppmd8_UpdateBin(CPpmd8 *p);
+
+#define Ppmd8_GetBinSumm(p) \
+ &p->BinSumm[p->NS2Indx[Ppmd8Context_OneState(p->MinContext)->Freq - 1]][ \
+ p->NS2BSIndx[Ppmd8_GetContext(p, p->MinContext->Suffix)->NumStats] + \
+ p->PrevSuccess + p->MinContext->Flags + ((p->RunLength >> 26) & 0x20)]
+
+CPpmd_See *Ppmd8_MakeEscFreq(CPpmd8 *p, unsigned numMasked, UInt32 *scale);
+
+
+/* ---------- Decode ---------- */
+
+Bool Ppmd8_RangeDec_Init(CPpmd8 *p);
+#define Ppmd8_RangeDec_IsFinishedOK(p) ((p)->Code == 0)
+int Ppmd8_DecodeSymbol(CPpmd8 *p); /* returns: -1 as EndMarker, -2 as DataError */
+
+
+/* ---------- Encode ---------- */
+
+#define Ppmd8_RangeEnc_Init(p) { (p)->Low = 0; (p)->Range = 0xFFFFFFFF; }
+void Ppmd8_RangeEnc_FlushData(CPpmd8 *p);
+void Ppmd8_EncodeSymbol(CPpmd8 *p, int symbol); /* symbol = -1 means EndMarker */
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/Ppmd8Dec.c b/src/libs/7zip/unix/C/Ppmd8Dec.c
new file mode 100644
index 000000000..c54e02ab2
--- /dev/null
+++ b/src/libs/7zip/unix/C/Ppmd8Dec.c
@@ -0,0 +1,155 @@
+/* Ppmd8Dec.c -- PPMdI Decoder
+2010-04-16 : Igor Pavlov : Public domain
+This code is based on:
+ PPMd var.I (2002): Dmitry Shkarin : Public domain
+ Carryless rangecoder (1999): Dmitry Subbotin : Public domain */
+
+#include "Ppmd8.h"
+
+#define kTop (1 << 24)
+#define kBot (1 << 15)
+
+Bool Ppmd8_RangeDec_Init(CPpmd8 *p)
+{
+ unsigned i;
+ p->Low = 0;
+ p->Range = 0xFFFFFFFF;
+ p->Code = 0;
+ for (i = 0; i < 4; i++)
+ p->Code = (p->Code << 8) | p->Stream.In->Read(p->Stream.In);
+ return (p->Code < 0xFFFFFFFF);
+}
+
+static UInt32 RangeDec_GetThreshold(CPpmd8 *p, UInt32 total)
+{
+ return p->Code / (p->Range /= total);
+}
+
+static void RangeDec_Decode(CPpmd8 *p, UInt32 start, UInt32 size)
+{
+ start *= p->Range;
+ p->Low += start;
+ p->Code -= start;
+ p->Range *= size;
+
+ while ((p->Low ^ (p->Low + p->Range)) < kTop ||
+ (p->Range < kBot && ((p->Range = (0 - p->Low) & (kBot - 1)), 1)))
+ {
+ p->Code = (p->Code << 8) | p->Stream.In->Read(p->Stream.In);
+ p->Range <<= 8;
+ p->Low <<= 8;
+ }
+}
+
+#define MASK(sym) ((signed char *)charMask)[sym]
+
+int Ppmd8_DecodeSymbol(CPpmd8 *p)
+{
+ size_t charMask[256 / sizeof(size_t)];
+ if (p->MinContext->NumStats != 0)
+ {
+ CPpmd_State *s = Ppmd8_GetStats(p, p->MinContext);
+ unsigned i;
+ UInt32 count, hiCnt;
+ if ((count = RangeDec_GetThreshold(p, p->MinContext->SummFreq)) < (hiCnt = s->Freq))
+ {
+ Byte symbol;
+ RangeDec_Decode(p, 0, s->Freq);
+ p->FoundState = s;
+ symbol = s->Symbol;
+ Ppmd8_Update1_0(p);
+ return symbol;
+ }
+ p->PrevSuccess = 0;
+ i = p->MinContext->NumStats;
+ do
+ {
+ if ((hiCnt += (++s)->Freq) > count)
+ {
+ Byte symbol;
+ RangeDec_Decode(p, hiCnt - s->Freq, s->Freq);
+ p->FoundState = s;
+ symbol = s->Symbol;
+ Ppmd8_Update1(p);
+ return symbol;
+ }
+ }
+ while (--i);
+ if (count >= p->MinContext->SummFreq)
+ return -2;
+ RangeDec_Decode(p, hiCnt, p->MinContext->SummFreq - hiCnt);
+ PPMD_SetAllBitsIn256Bytes(charMask);
+ MASK(s->Symbol) = 0;
+ i = p->MinContext->NumStats;
+ do { MASK((--s)->Symbol) = 0; } while (--i);
+ }
+ else
+ {
+ UInt16 *prob = Ppmd8_GetBinSumm(p);
+ if (((p->Code / (p->Range >>= 14)) < *prob))
+ {
+ Byte symbol;
+ RangeDec_Decode(p, 0, *prob);
+ *prob = (UInt16)PPMD_UPDATE_PROB_0(*prob);
+ symbol = (p->FoundState = Ppmd8Context_OneState(p->MinContext))->Symbol;
+ Ppmd8_UpdateBin(p);
+ return symbol;
+ }
+ RangeDec_Decode(p, *prob, (1 << 14) - *prob);
+ *prob = (UInt16)PPMD_UPDATE_PROB_1(*prob);
+ p->InitEsc = PPMD8_kExpEscape[*prob >> 10];
+ PPMD_SetAllBitsIn256Bytes(charMask);
+ MASK(Ppmd8Context_OneState(p->MinContext)->Symbol) = 0;
+ p->PrevSuccess = 0;
+ }
+ for (;;)
+ {
+ CPpmd_State *ps[256], *s;
+ UInt32 freqSum, count, hiCnt;
+ CPpmd_See *see;
+ unsigned i, num, numMasked = p->MinContext->NumStats;
+ do
+ {
+ p->OrderFall++;
+ if (!p->MinContext->Suffix)
+ return -1;
+ p->MinContext = Ppmd8_GetContext(p, p->MinContext->Suffix);
+ }
+ while (p->MinContext->NumStats == numMasked);
+ hiCnt = 0;
+ s = Ppmd8_GetStats(p, p->MinContext);
+ i = 0;
+ num = p->MinContext->NumStats - numMasked;
+ do
+ {
+ int k = (int)(MASK(s->Symbol));
+ hiCnt += (s->Freq & k);
+ ps[i] = s++;
+ i -= k;
+ }
+ while (i != num);
+
+ see = Ppmd8_MakeEscFreq(p, numMasked, &freqSum);
+ freqSum += hiCnt;
+ count = RangeDec_GetThreshold(p, freqSum);
+
+ if (count < hiCnt)
+ {
+ Byte symbol;
+ CPpmd_State **pps = ps;
+ for (hiCnt = 0; (hiCnt += (*pps)->Freq) <= count; pps++);
+ s = *pps;
+ RangeDec_Decode(p, hiCnt - s->Freq, s->Freq);
+ Ppmd_See_Update(see);
+ p->FoundState = s;
+ symbol = s->Symbol;
+ Ppmd8_Update2(p);
+ return symbol;
+ }
+ if (count >= freqSum)
+ return -2;
+ RangeDec_Decode(p, hiCnt, freqSum - hiCnt);
+ see->Summ = (UInt16)(see->Summ + freqSum);
+ do { MASK(ps[--i]->Symbol) = 0; } while (i != 0);
+ }
+}
diff --git a/src/libs/7zip/unix/C/Ppmd8Enc.c b/src/libs/7zip/unix/C/Ppmd8Enc.c
new file mode 100644
index 000000000..8da727eb2
--- /dev/null
+++ b/src/libs/7zip/unix/C/Ppmd8Enc.c
@@ -0,0 +1,161 @@
+/* Ppmd8Enc.c -- PPMdI Encoder
+2010-04-16 : Igor Pavlov : Public domain
+This code is based on:
+ PPMd var.I (2002): Dmitry Shkarin : Public domain
+ Carryless rangecoder (1999): Dmitry Subbotin : Public domain */
+
+#include "Ppmd8.h"
+
+#define kTop (1 << 24)
+#define kBot (1 << 15)
+
+void Ppmd8_RangeEnc_FlushData(CPpmd8 *p)
+{
+ unsigned i;
+ for (i = 0; i < 4; i++, p->Low <<= 8 )
+ p->Stream.Out->Write(p->Stream.Out, (Byte)(p->Low >> 24));
+}
+
+static void RangeEnc_Normalize(CPpmd8 *p)
+{
+ while ((p->Low ^ (p->Low + p->Range)) < kTop ||
+ (p->Range < kBot && ((p->Range = (0 - p->Low) & (kBot - 1)), 1)))
+ {
+ p->Stream.Out->Write(p->Stream.Out, (Byte)(p->Low >> 24));
+ p->Range <<= 8;
+ p->Low <<= 8;
+ }
+}
+
+static void RangeEnc_Encode(CPpmd8 *p, UInt32 start, UInt32 size, UInt32 total)
+{
+ p->Low += start * (p->Range /= total);
+ p->Range *= size;
+ RangeEnc_Normalize(p);
+}
+
+static void RangeEnc_EncodeBit_0(CPpmd8 *p, UInt32 size0)
+{
+ p->Range >>= 14;
+ p->Range *= size0;
+ RangeEnc_Normalize(p);
+}
+
+static void RangeEnc_EncodeBit_1(CPpmd8 *p, UInt32 size0)
+{
+ p->Low += size0 * (p->Range >>= 14);
+ p->Range *= ((1 << 14) - size0);
+ RangeEnc_Normalize(p);
+}
+
+
+#define MASK(sym) ((signed char *)charMask)[sym]
+
+void Ppmd8_EncodeSymbol(CPpmd8 *p, int symbol)
+{
+ size_t charMask[256 / sizeof(size_t)];
+ if (p->MinContext->NumStats != 0)
+ {
+ CPpmd_State *s = Ppmd8_GetStats(p, p->MinContext);
+ UInt32 sum;
+ unsigned i;
+ if (s->Symbol == symbol)
+ {
+ RangeEnc_Encode(p, 0, s->Freq, p->MinContext->SummFreq);
+ p->FoundState = s;
+ Ppmd8_Update1_0(p);
+ return;
+ }
+ p->PrevSuccess = 0;
+ sum = s->Freq;
+ i = p->MinContext->NumStats;
+ do
+ {
+ if ((++s)->Symbol == symbol)
+ {
+ RangeEnc_Encode(p, sum, s->Freq, p->MinContext->SummFreq);
+ p->FoundState = s;
+ Ppmd8_Update1(p);
+ return;
+ }
+ sum += s->Freq;
+ }
+ while (--i);
+
+ PPMD_SetAllBitsIn256Bytes(charMask);
+ MASK(s->Symbol) = 0;
+ i = p->MinContext->NumStats;
+ do { MASK((--s)->Symbol) = 0; } while (--i);
+ RangeEnc_Encode(p, sum, p->MinContext->SummFreq - sum, p->MinContext->SummFreq);
+ }
+ else
+ {
+ UInt16 *prob = Ppmd8_GetBinSumm(p);
+ CPpmd_State *s = Ppmd8Context_OneState(p->MinContext);
+ if (s->Symbol == symbol)
+ {
+ RangeEnc_EncodeBit_0(p, *prob);
+ *prob = (UInt16)PPMD_UPDATE_PROB_0(*prob);
+ p->FoundState = s;
+ Ppmd8_UpdateBin(p);
+ return;
+ }
+ else
+ {
+ RangeEnc_EncodeBit_1(p, *prob);
+ *prob = (UInt16)PPMD_UPDATE_PROB_1(*prob);
+ p->InitEsc = PPMD8_kExpEscape[*prob >> 10];
+ PPMD_SetAllBitsIn256Bytes(charMask);
+ MASK(s->Symbol) = 0;
+ p->PrevSuccess = 0;
+ }
+ }
+ for (;;)
+ {
+ UInt32 escFreq;
+ CPpmd_See *see;
+ CPpmd_State *s;
+ UInt32 sum;
+ unsigned i, numMasked = p->MinContext->NumStats;
+ do
+ {
+ p->OrderFall++;
+ if (!p->MinContext->Suffix)
+ return; /* EndMarker (symbol = -1) */
+ p->MinContext = Ppmd8_GetContext(p, p->MinContext->Suffix);
+ }
+ while (p->MinContext->NumStats == numMasked);
+
+ see = Ppmd8_MakeEscFreq(p, numMasked, &escFreq);
+ s = Ppmd8_GetStats(p, p->MinContext);
+ sum = 0;
+ i = p->MinContext->NumStats + 1;
+ do
+ {
+ int cur = s->Symbol;
+ if (cur == symbol)
+ {
+ UInt32 low = sum;
+ CPpmd_State *s1 = s;
+ do
+ {
+ sum += (s->Freq & (int)(MASK(s->Symbol)));
+ s++;
+ }
+ while (--i);
+ RangeEnc_Encode(p, low, s1->Freq, sum + escFreq);
+ Ppmd_See_Update(see);
+ p->FoundState = s1;
+ Ppmd8_Update2(p);
+ return;
+ }
+ sum += (s->Freq & (int)(MASK(cur)));
+ MASK(cur) = 0;
+ s++;
+ }
+ while (--i);
+
+ RangeEnc_Encode(p, sum, escFreq, sum + escFreq);
+ see->Summ = (UInt16)(see->Summ + sum + escFreq);
+ }
+}
diff --git a/src/libs/7zip/unix/C/RotateDefs.h b/src/libs/7zip/unix/C/RotateDefs.h
new file mode 100644
index 000000000..c3a1385ce
--- /dev/null
+++ b/src/libs/7zip/unix/C/RotateDefs.h
@@ -0,0 +1,20 @@
+/* RotateDefs.h -- Rotate functions
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __ROTATE_DEFS_H
+#define __ROTATE_DEFS_H
+
+#ifdef _MSC_VER
+
+#include <stdlib.h>
+#define rotlFixed(x, n) _rotl((x), (n))
+#define rotrFixed(x, n) _rotr((x), (n))
+
+#else
+
+#define rotlFixed(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
+#define rotrFixed(x, n) (((x) >> (n)) | ((x) << (32 - (n))))
+
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/Sha256.c b/src/libs/7zip/unix/C/Sha256.c
new file mode 100644
index 000000000..eb4fc61fc
--- /dev/null
+++ b/src/libs/7zip/unix/C/Sha256.c
@@ -0,0 +1,204 @@
+/* Crypto/Sha256.c -- SHA-256 Hash
+2010-06-11 : Igor Pavlov : Public domain
+This code is based on public domain code from Wei Dai's Crypto++ library. */
+
+#include "RotateDefs.h"
+#include "Sha256.h"
+
+/* define it for speed optimization */
+/* #define _SHA256_UNROLL */
+/* #define _SHA256_UNROLL2 */
+
+void Sha256_Init(CSha256 *p)
+{
+ p->state[0] = 0x6a09e667;
+ p->state[1] = 0xbb67ae85;
+ p->state[2] = 0x3c6ef372;
+ p->state[3] = 0xa54ff53a;
+ p->state[4] = 0x510e527f;
+ p->state[5] = 0x9b05688c;
+ p->state[6] = 0x1f83d9ab;
+ p->state[7] = 0x5be0cd19;
+ p->count = 0;
+}
+
+#define S0(x) (rotrFixed(x, 2) ^ rotrFixed(x,13) ^ rotrFixed(x, 22))
+#define S1(x) (rotrFixed(x, 6) ^ rotrFixed(x,11) ^ rotrFixed(x, 25))
+#define s0(x) (rotrFixed(x, 7) ^ rotrFixed(x,18) ^ (x >> 3))
+#define s1(x) (rotrFixed(x,17) ^ rotrFixed(x,19) ^ (x >> 10))
+
+#define blk0(i) (W[i] = data[i])
+#define blk2(i) (W[i&15] += s1(W[(i-2)&15]) + W[(i-7)&15] + s0(W[(i-15)&15]))
+
+#define Ch(x,y,z) (z^(x&(y^z)))
+#define Maj(x,y,z) ((x&y)|(z&(x|y)))
+
+#define a(i) T[(0-(i))&7]
+#define b(i) T[(1-(i))&7]
+#define c(i) T[(2-(i))&7]
+#define d(i) T[(3-(i))&7]
+#define e(i) T[(4-(i))&7]
+#define f(i) T[(5-(i))&7]
+#define g(i) T[(6-(i))&7]
+#define h(i) T[(7-(i))&7]
+
+
+#ifdef _SHA256_UNROLL2
+
+#define R(a,b,c,d,e,f,g,h, i) h += S1(e) + Ch(e,f,g) + K[i+j] + (j?blk2(i):blk0(i));\
+ d += h; h += S0(a) + Maj(a, b, c)
+
+#define RX_8(i) \
+ R(a,b,c,d,e,f,g,h, i); \
+ R(h,a,b,c,d,e,f,g, i+1); \
+ R(g,h,a,b,c,d,e,f, i+2); \
+ R(f,g,h,a,b,c,d,e, i+3); \
+ R(e,f,g,h,a,b,c,d, i+4); \
+ R(d,e,f,g,h,a,b,c, i+5); \
+ R(c,d,e,f,g,h,a,b, i+6); \
+ R(b,c,d,e,f,g,h,a, i+7)
+
+#else
+
+#define R(i) h(i) += S1(e(i)) + Ch(e(i),f(i),g(i)) + K[i+j] + (j?blk2(i):blk0(i));\
+ d(i) += h(i); h(i) += S0(a(i)) + Maj(a(i), b(i), c(i))
+
+#ifdef _SHA256_UNROLL
+
+#define RX_8(i) R(i+0); R(i+1); R(i+2); R(i+3); R(i+4); R(i+5); R(i+6); R(i+7);
+
+#endif
+
+#endif
+
+static const UInt32 K[64] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+};
+
+static void Sha256_Transform(UInt32 *state, const UInt32 *data)
+{
+ UInt32 W[16];
+ unsigned j;
+ #ifdef _SHA256_UNROLL2
+ UInt32 a,b,c,d,e,f,g,h;
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ f = state[5];
+ g = state[6];
+ h = state[7];
+ #else
+ UInt32 T[8];
+ for (j = 0; j < 8; j++)
+ T[j] = state[j];
+ #endif
+
+ for (j = 0; j < 64; j += 16)
+ {
+ #if defined(_SHA256_UNROLL) || defined(_SHA256_UNROLL2)
+ RX_8(0); RX_8(8);
+ #else
+ unsigned i;
+ for (i = 0; i < 16; i++) { R(i); }
+ #endif
+ }
+
+ #ifdef _SHA256_UNROLL2
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ state[5] += f;
+ state[6] += g;
+ state[7] += h;
+ #else
+ for (j = 0; j < 8; j++)
+ state[j] += T[j];
+ #endif
+
+ /* Wipe variables */
+ /* memset(W, 0, sizeof(W)); */
+ /* memset(T, 0, sizeof(T)); */
+}
+
+#undef S0
+#undef S1
+#undef s0
+#undef s1
+
+static void Sha256_WriteByteBlock(CSha256 *p)
+{
+ UInt32 data32[16];
+ unsigned i;
+ for (i = 0; i < 16; i++)
+ data32[i] =
+ ((UInt32)(p->buffer[i * 4 ]) << 24) +
+ ((UInt32)(p->buffer[i * 4 + 1]) << 16) +
+ ((UInt32)(p->buffer[i * 4 + 2]) << 8) +
+ ((UInt32)(p->buffer[i * 4 + 3]));
+ Sha256_Transform(p->state, data32);
+}
+
+void Sha256_Update(CSha256 *p, const Byte *data, size_t size)
+{
+ UInt32 curBufferPos = (UInt32)p->count & 0x3F;
+ while (size > 0)
+ {
+ p->buffer[curBufferPos++] = *data++;
+ p->count++;
+ size--;
+ if (curBufferPos == 64)
+ {
+ curBufferPos = 0;
+ Sha256_WriteByteBlock(p);
+ }
+ }
+}
+
+void Sha256_Final(CSha256 *p, Byte *digest)
+{
+ UInt64 lenInBits = (p->count << 3);
+ UInt32 curBufferPos = (UInt32)p->count & 0x3F;
+ unsigned i;
+ p->buffer[curBufferPos++] = 0x80;
+ while (curBufferPos != (64 - 8))
+ {
+ curBufferPos &= 0x3F;
+ if (curBufferPos == 0)
+ Sha256_WriteByteBlock(p);
+ p->buffer[curBufferPos++] = 0;
+ }
+ for (i = 0; i < 8; i++)
+ {
+ p->buffer[curBufferPos++] = (Byte)(lenInBits >> 56);
+ lenInBits <<= 8;
+ }
+ Sha256_WriteByteBlock(p);
+
+ for (i = 0; i < 8; i++)
+ {
+ *digest++ = (Byte)(p->state[i] >> 24);
+ *digest++ = (Byte)(p->state[i] >> 16);
+ *digest++ = (Byte)(p->state[i] >> 8);
+ *digest++ = (Byte)(p->state[i]);
+ }
+ Sha256_Init(p);
+}
diff --git a/src/libs/7zip/unix/C/Sha256.h b/src/libs/7zip/unix/C/Sha256.h
new file mode 100644
index 000000000..530f513ec
--- /dev/null
+++ b/src/libs/7zip/unix/C/Sha256.h
@@ -0,0 +1,26 @@
+/* Sha256.h -- SHA-256 Hash
+2010-06-11 : Igor Pavlov : Public domain */
+
+#ifndef __CRYPTO_SHA256_H
+#define __CRYPTO_SHA256_H
+
+#include "Types.h"
+
+EXTERN_C_BEGIN
+
+#define SHA256_DIGEST_SIZE 32
+
+typedef struct
+{
+ UInt32 state[8];
+ UInt64 count;
+ Byte buffer[64];
+} CSha256;
+
+void Sha256_Init(CSha256 *p);
+void Sha256_Update(CSha256 *p, const Byte *data, size_t size);
+void Sha256_Final(CSha256 *p, Byte *digest);
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/Sort.c b/src/libs/7zip/unix/C/Sort.c
new file mode 100644
index 000000000..388d22893
--- /dev/null
+++ b/src/libs/7zip/unix/C/Sort.c
@@ -0,0 +1,93 @@
+/* Sort.c -- Sort functions
+2010-09-17 : Igor Pavlov : Public domain */
+
+#include "Sort.h"
+
+#define HeapSortDown(p, k, size, temp) \
+ { for (;;) { \
+ UInt32 s = (k << 1); \
+ if (s > size) break; \
+ if (s < size && p[s + 1] > p[s]) s++; \
+ if (temp >= p[s]) break; \
+ p[k] = p[s]; k = s; \
+ } p[k] = temp; }
+
+void HeapSort(UInt32 *p, UInt32 size)
+{
+ if (size <= 1)
+ return;
+ p--;
+ {
+ UInt32 i = size / 2;
+ do
+ {
+ UInt32 temp = p[i];
+ UInt32 k = i;
+ HeapSortDown(p, k, size, temp)
+ }
+ while (--i != 0);
+ }
+ /*
+ do
+ {
+ UInt32 k = 1;
+ UInt32 temp = p[size];
+ p[size--] = p[1];
+ HeapSortDown(p, k, size, temp)
+ }
+ while (size > 1);
+ */
+ while (size > 3)
+ {
+ UInt32 temp = p[size];
+ UInt32 k = (p[3] > p[2]) ? 3 : 2;
+ p[size--] = p[1];
+ p[1] = p[k];
+ HeapSortDown(p, k, size, temp)
+ }
+ {
+ UInt32 temp = p[size];
+ p[size] = p[1];
+ if (size > 2 && p[2] < temp)
+ {
+ p[1] = p[2];
+ p[2] = temp;
+ }
+ else
+ p[1] = temp;
+ }
+}
+
+/*
+#define HeapSortRefDown(p, vals, n, size, temp) \
+ { UInt32 k = n; UInt32 val = vals[temp]; for (;;) { \
+ UInt32 s = (k << 1); \
+ if (s > size) break; \
+ if (s < size && vals[p[s + 1]] > vals[p[s]]) s++; \
+ if (val >= vals[p[s]]) break; \
+ p[k] = p[s]; k = s; \
+ } p[k] = temp; }
+
+void HeapSortRef(UInt32 *p, UInt32 *vals, UInt32 size)
+{
+ if (size <= 1)
+ return;
+ p--;
+ {
+ UInt32 i = size / 2;
+ do
+ {
+ UInt32 temp = p[i];
+ HeapSortRefDown(p, vals, i, size, temp);
+ }
+ while (--i != 0);
+ }
+ do
+ {
+ UInt32 temp = p[size];
+ p[size--] = p[1];
+ HeapSortRefDown(p, vals, 1, size, temp);
+ }
+ while (size > 1);
+}
+*/
diff --git a/src/libs/7zip/unix/C/Sort.h b/src/libs/7zip/unix/C/Sort.h
new file mode 100644
index 000000000..65dfc6f6a
--- /dev/null
+++ b/src/libs/7zip/unix/C/Sort.h
@@ -0,0 +1,20 @@
+/* Sort.h -- Sort functions
+2009-02-07 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_SORT_H
+#define __7Z_SORT_H
+
+#include "Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void HeapSort(UInt32 *p, UInt32 size);
+/* void HeapSortRef(UInt32 *p, UInt32 *vals, UInt32 size); */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/Threads.c b/src/libs/7zip/unix/C/Threads.c
new file mode 100644
index 000000000..1b8203f68
--- /dev/null
+++ b/src/libs/7zip/unix/C/Threads.c
@@ -0,0 +1,582 @@
+/* Threads.c */
+
+#include "Threads.h"
+
+#ifdef ENV_BEOS
+#include <kernel/OS.h>
+#else
+#include <pthread.h>
+#include <stdlib.h>
+#endif
+
+#include <errno.h>
+
+#if defined(__linux__)
+#define PTHREAD_MUTEX_ERRORCHECK PTHREAD_MUTEX_ERRORCHECK_NP
+#endif
+
+#ifdef ENV_BEOS
+
+/* TODO : optimize the code and verify the returned values */
+
+WRes Thread_Create(CThread *thread, THREAD_FUNC_RET_TYPE (THREAD_FUNC_CALL_TYPE *startAddress)(void *), LPVOID parameter)
+{
+ thread->_tid = spawn_thread((int32 (*)(void *))startAddress, "CThread", B_LOW_PRIORITY, parameter);
+ if (thread->_tid >= B_OK) {
+ resume_thread(thread->_tid);
+ } else {
+ thread->_tid = B_BAD_THREAD_ID;
+ }
+ thread->_created = 1;
+ return 0; // SZ_OK;
+}
+
+WRes Thread_Wait(CThread *thread)
+{
+ int ret;
+
+ if (thread->_created == 0)
+ return EINVAL;
+
+ if (thread->_tid >= B_OK)
+ {
+ status_t exit_value;
+ wait_for_thread(thread->_tid, &exit_value);
+ thread->_tid = B_BAD_THREAD_ID;
+ } else {
+ return EINVAL;
+ }
+
+ thread->_created = 0;
+
+ return 0;
+}
+
+WRes Thread_Close(CThread *thread)
+{
+ if (!thread->_created) return SZ_OK;
+
+ thread->_tid = B_BAD_THREAD_ID;
+ thread->_created = 0;
+ return SZ_OK;
+}
+
+
+WRes Event_Create(CEvent *p, BOOL manualReset, int initialSignaled)
+{
+ p->_index_waiting = 0;
+ p->_manual_reset = manualReset;
+ p->_state = (initialSignaled ? TRUE : FALSE);
+ p->_created = 1;
+ p->_sem = create_sem(1,"event");
+ return 0;
+}
+
+WRes Event_Set(CEvent *p) {
+ int index;
+ acquire_sem(p->_sem);
+ p->_state = TRUE;
+ for(index = 0 ; index < p->_index_waiting ; index++)
+ {
+ send_data(p->_waiting[index], '7zCN', NULL, 0);
+ }
+ p->_index_waiting = 0;
+ release_sem(p->_sem);
+ return 0;
+}
+
+WRes Event_Reset(CEvent *p) {
+ acquire_sem(p->_sem);
+ p->_state = FALSE;
+ release_sem(p->_sem);
+ return 0;
+}
+
+WRes Event_Wait(CEvent *p) {
+ acquire_sem(p->_sem);
+ while (p->_state == FALSE)
+ {
+ thread_id sender;
+ p->_waiting[p->_index_waiting++] = find_thread(NULL);
+ release_sem(p->_sem);
+ /* int msg = */ receive_data(&sender, NULL, 0);
+ acquire_sem(p->_sem);
+ }
+ if (p->_manual_reset == FALSE)
+ {
+ p->_state = FALSE;
+ }
+ release_sem(p->_sem);
+ return 0;
+}
+
+WRes Event_Close(CEvent *p) {
+ if (p->_created)
+ {
+ p->_created = 0;
+ delete_sem(p->_sem);
+ }
+ return 0;
+}
+
+WRes Semaphore_Create(CSemaphore *p, UInt32 initiallyCount, UInt32 maxCount)
+{
+ p->_index_waiting = 0;
+ p->_count = initiallyCount;
+ p->_maxCount = maxCount;
+ p->_created = 1;
+ p->_sem = create_sem(1,"sem");
+ return 0;
+}
+
+WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 releaseCount)
+{
+ UInt32 newCount;
+ int index;
+
+ if (releaseCount < 1) return EINVAL;
+
+ acquire_sem(p->_sem);
+ newCount = p->_count + releaseCount;
+ if (newCount > p->_maxCount)
+ {
+ release_sem(p->_sem);
+ return EINVAL;
+ }
+ p->_count = newCount;
+ for(index = 0 ; index < p->_index_waiting ; index++)
+ {
+ send_data(p->_waiting[index], '7zCN', NULL, 0);
+ }
+ p->_index_waiting = 0;
+ release_sem(p->_sem);
+ return 0;
+}
+
+WRes Semaphore_Wait(CSemaphore *p) {
+ acquire_sem(p->_sem);
+ while (p->_count < 1)
+ {
+ thread_id sender;
+ p->_waiting[p->_index_waiting++] = find_thread(NULL);
+ release_sem(p->_sem);
+ /* int msg = */ receive_data(&sender, NULL, 0);
+ acquire_sem(p->_sem);
+ }
+ p->_count--;
+ release_sem(p->_sem);
+ return 0;
+}
+
+WRes Semaphore_Close(CSemaphore *p) {
+ if (p->_created)
+ {
+ p->_created = 0;
+ delete_sem(p->_sem);
+ }
+ return 0;
+}
+
+WRes CriticalSection_Init(CCriticalSection * lpCriticalSection)
+{
+ lpCriticalSection->_sem = create_sem(1,"cc");
+ return 0;
+}
+
+#else /* !ENV_BEOS */
+
+WRes Thread_Create(CThread *thread, THREAD_FUNC_RET_TYPE (THREAD_FUNC_CALL_TYPE *startAddress)(void *), LPVOID parameter)
+{
+ pthread_attr_t attr;
+ int ret;
+
+ thread->_created = 0;
+
+ ret = pthread_attr_init(&attr);
+ if (ret) return ret;
+
+ ret = pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_JOINABLE);
+ if (ret) return ret;
+
+ ret = pthread_create(&thread->_tid, &attr, (void * (*)(void *))startAddress, parameter);
+
+ /* ret2 = */ pthread_attr_destroy(&attr);
+
+ if (ret) return ret;
+
+ thread->_created = 1;
+
+ return 0; // SZ_OK;
+}
+
+WRes Thread_Wait(CThread *thread)
+{
+ void *thread_return;
+ int ret;
+
+ if (thread->_created == 0)
+ return EINVAL;
+
+ ret = pthread_join(thread->_tid,&thread_return);
+ thread->_created = 0;
+
+ return ret;
+}
+
+WRes Thread_Close(CThread *thread)
+{
+ if (!thread->_created) return SZ_OK;
+
+ pthread_detach(thread->_tid);
+ thread->_tid = 0;
+ thread->_created = 0;
+ return SZ_OK;
+}
+
+#ifdef DEBUG_SYNCHRO
+
+#include <stdio.h>
+
+static void dump_error(int ligne,int ret,const char *text,void *param)
+{
+ printf("\n##T%d#ERROR2 (l=%d) %s : param=%p ret = %d (%s)##\n",(int)pthread_self(),ligne,text,param,ret,strerror(ret));
+ // abort();
+}
+
+WRes Event_Create(CEvent *p, BOOL manualReset, int initialSignaled)
+{
+ int ret;
+ pthread_mutexattr_t mutexattr;
+ memset(&mutexattr,0,sizeof(mutexattr));
+ ret = pthread_mutexattr_init(&mutexattr);
+ if (ret != 0) dump_error(__LINE__,ret,"Event_Create::pthread_mutexattr_init",&mutexattr);
+ ret = pthread_mutexattr_settype(&mutexattr,PTHREAD_MUTEX_ERRORCHECK);
+ if (ret != 0) dump_error(__LINE__,ret,"Event_Create::pthread_mutexattr_settype",&mutexattr);
+ ret = pthread_mutex_init(&p->_mutex,&mutexattr);
+ if (ret != 0) dump_error(__LINE__,ret,"Event_Create::pthread_mutexattr_init",&p->_mutex);
+ if (ret == 0)
+ {
+ ret = pthread_cond_init(&p->_cond,0);
+ if (ret != 0) dump_error(__LINE__,ret,"Event_Create::pthread_cond_init",&p->_cond);
+ p->_manual_reset = manualReset;
+ p->_state = (initialSignaled ? TRUE : FALSE);
+ p->_created = 1;
+ }
+ return ret;
+}
+
+WRes Event_Set(CEvent *p) {
+ int ret = pthread_mutex_lock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"ES::pthread_mutex_lock",&p->_mutex);
+ if (ret == 0)
+ {
+ p->_state = TRUE;
+ ret = pthread_cond_broadcast(&p->_cond);
+ if (ret != 0) dump_error(__LINE__,ret,"ES::pthread_cond_broadcast",&p->_cond);
+ if (ret == 0)
+ {
+ ret = pthread_mutex_unlock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"ES::pthread_mutex_unlock",&p->_mutex);
+ }
+ }
+ return ret;
+}
+
+WRes Event_Reset(CEvent *p) {
+ int ret = pthread_mutex_lock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"ER::pthread_mutex_lock",&p->_mutex);
+ if (ret == 0)
+ {
+ p->_state = FALSE;
+ ret = pthread_mutex_unlock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"ER::pthread_mutex_unlock",&p->_mutex);
+ }
+ return ret;
+}
+
+WRes Event_Wait(CEvent *p) {
+ int ret = pthread_mutex_lock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"EW::pthread_mutex_lock",&p->_mutex);
+ if (ret == 0)
+ {
+ while ((p->_state == FALSE) && (ret == 0))
+ {
+ ret = pthread_cond_wait(&p->_cond, &p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"EW::pthread_cond_wait",&p->_mutex);
+ }
+ if (ret == 0)
+ {
+ if (p->_manual_reset == FALSE)
+ {
+ p->_state = FALSE;
+ }
+ ret = pthread_mutex_unlock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"EW::pthread_mutex_unlock",&p->_mutex);
+ }
+ }
+ return ret;
+}
+
+WRes Event_Close(CEvent *p) {
+ if (p->_created)
+ {
+ int ret;
+ p->_created = 0;
+ ret = pthread_mutex_destroy(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"EC::pthread_mutex_destroy",&p->_mutex);
+ ret = pthread_cond_destroy(&p->_cond);
+ if (ret != 0) dump_error(__LINE__,ret,"EC::pthread_cond_destroy",&p->_cond);
+ }
+ return 0;
+}
+
+WRes Semaphore_Create(CSemaphore *p, UInt32 initiallyCount, UInt32 maxCount)
+{
+ int ret;
+ pthread_mutexattr_t mutexattr;
+ memset(&mutexattr,0,sizeof(mutexattr));
+ ret = pthread_mutexattr_init(&mutexattr);
+ if (ret != 0) dump_error(__LINE__,ret,"SemC::pthread_mutexattr_init",&mutexattr);
+ ret = pthread_mutexattr_settype(&mutexattr,PTHREAD_MUTEX_ERRORCHECK);
+ if (ret != 0) dump_error(__LINE__,ret,"SemC::pthread_mutexattr_settype",&mutexattr);
+ ret = pthread_mutex_init(&p->_mutex,&mutexattr);
+ if (ret != 0) dump_error(__LINE__,ret,"SemC::pthread_mutexattr_init",&p->_mutex);
+ if (ret == 0)
+ {
+ ret = pthread_cond_init(&p->_cond,0);
+ if (ret != 0) dump_error(__LINE__,ret,"SemC::pthread_cond_init",&p->_mutex);
+ p->_count = initiallyCount;
+ p->_maxCount = maxCount;
+ p->_created = 1;
+ }
+ return ret;
+}
+
+WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 releaseCount)
+{
+ int ret;
+ if (releaseCount < 1) return EINVAL;
+
+ ret = pthread_mutex_lock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"SemR::pthread_mutex_lock",&p->_mutex);
+ if (ret == 0)
+ {
+ UInt32 newCount = p->_count + releaseCount;
+ if (newCount > p->_maxCount)
+ {
+ ret = pthread_mutex_unlock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"SemR::pthread_mutex_unlock",&p->_mutex);
+ return EINVAL;
+ }
+ p->_count = newCount;
+ ret = pthread_cond_broadcast(&p->_cond);
+ if (ret != 0) dump_error(__LINE__,ret,"SemR::pthread_cond_broadcast",&p->_cond);
+ if (ret == 0)
+ {
+ ret = pthread_mutex_unlock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"SemR::pthread_mutex_unlock",&p->_mutex);
+ }
+ }
+ return ret;
+}
+
+WRes Semaphore_Wait(CSemaphore *p) {
+ int ret = pthread_mutex_lock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"SemW::pthread_mutex_lock",&p->_mutex);
+ if (ret == 0)
+ {
+ while ((p->_count < 1) && (ret == 0))
+ {
+ ret = pthread_cond_wait(&p->_cond, &p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"SemW::pthread_cond_wait",&p->_mutex);
+ }
+ if (ret == 0)
+ {
+ p->_count--;
+ ret = pthread_mutex_unlock(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"SemW::pthread_mutex_unlock",&p->_mutex);
+ }
+ }
+ return ret;
+}
+
+WRes Semaphore_Close(CSemaphore *p) {
+ if (p->_created)
+ {
+ int ret;
+ p->_created = 0;
+ ret = pthread_mutex_destroy(&p->_mutex);
+ if (ret != 0) dump_error(__LINE__,ret,"Semc::pthread_mutex_destroy",&p->_mutex);
+ ret = pthread_cond_destroy(&p->_cond);
+ if (ret != 0) dump_error(__LINE__,ret,"Semc::pthread_cond_destroy",&p->_cond);
+ }
+ return 0;
+}
+
+WRes CriticalSection_Init(CCriticalSection * lpCriticalSection)
+{
+ if (lpCriticalSection)
+ {
+ int ret;
+ pthread_mutexattr_t mutexattr;
+ memset(&mutexattr,0,sizeof(mutexattr));
+ ret = pthread_mutexattr_init(&mutexattr);
+ if (ret != 0) dump_error(__LINE__,ret,"CS I::pthread_mutexattr_init",&mutexattr);
+ ret = pthread_mutexattr_settype(&mutexattr,PTHREAD_MUTEX_ERRORCHECK);
+ if (ret != 0) dump_error(__LINE__,ret,"CS I::pthread_mutexattr_settype",&mutexattr);
+ ret = pthread_mutex_init(&lpCriticalSection->_mutex,&mutexattr);
+ if (ret != 0) dump_error(__LINE__,ret,"CS I::pthread_mutexattr_init",&lpCriticalSection->_mutex);
+ return ret;
+ }
+ return EINTR;
+}
+
+void CriticalSection_Enter(CCriticalSection * lpCriticalSection)
+{
+ if (lpCriticalSection)
+ {
+ int ret = pthread_mutex_lock(&(lpCriticalSection->_mutex));
+ if (ret != 0) dump_error(__LINE__,ret,"CS::pthread_mutex_lock",&(lpCriticalSection->_mutex));
+ }
+}
+
+void CriticalSection_Leave(CCriticalSection * lpCriticalSection)
+{
+ if (lpCriticalSection)
+ {
+ int ret = pthread_mutex_unlock(&(lpCriticalSection->_mutex));
+ if (ret != 0) dump_error(__LINE__,ret,"CS::pthread_mutex_unlock",&(lpCriticalSection->_mutex));
+ }
+}
+
+void CriticalSection_Delete(CCriticalSection * lpCriticalSection)
+{
+ if (lpCriticalSection)
+ {
+ int ret = pthread_mutex_destroy(&(lpCriticalSection->_mutex));
+ if (ret != 0) dump_error(__LINE__,ret,"CS::pthread_mutex_destroy",&(lpCriticalSection->_mutex));
+ }
+}
+
+#else
+
+WRes Event_Create(CEvent *p, BOOL manualReset, int initialSignaled)
+{
+ pthread_mutex_init(&p->_mutex,0);
+ pthread_cond_init(&p->_cond,0);
+ p->_manual_reset = manualReset;
+ p->_state = (initialSignaled ? TRUE : FALSE);
+ p->_created = 1;
+ return 0;
+}
+
+WRes Event_Set(CEvent *p) {
+ pthread_mutex_lock(&p->_mutex);
+ p->_state = TRUE;
+ pthread_cond_broadcast(&p->_cond);
+ pthread_mutex_unlock(&p->_mutex);
+ return 0;
+}
+
+WRes Event_Reset(CEvent *p) {
+ pthread_mutex_lock(&p->_mutex);
+ p->_state = FALSE;
+ pthread_mutex_unlock(&p->_mutex);
+ return 0;
+}
+
+WRes Event_Wait(CEvent *p) {
+ pthread_mutex_lock(&p->_mutex);
+ while (p->_state == FALSE)
+ {
+ pthread_cond_wait(&p->_cond, &p->_mutex);
+ }
+ if (p->_manual_reset == FALSE)
+ {
+ p->_state = FALSE;
+ }
+ pthread_mutex_unlock(&p->_mutex);
+ return 0;
+}
+
+WRes Event_Close(CEvent *p) {
+ if (p->_created)
+ {
+ p->_created = 0;
+ pthread_mutex_destroy(&p->_mutex);
+ pthread_cond_destroy(&p->_cond);
+ }
+ return 0;
+}
+
+WRes Semaphore_Create(CSemaphore *p, UInt32 initiallyCount, UInt32 maxCount)
+{
+ pthread_mutex_init(&p->_mutex,0);
+ pthread_cond_init(&p->_cond,0);
+ p->_count = initiallyCount;
+ p->_maxCount = maxCount;
+ p->_created = 1;
+ return 0;
+}
+
+WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 releaseCount)
+{
+ UInt32 newCount;
+
+ if (releaseCount < 1) return EINVAL;
+
+ pthread_mutex_lock(&p->_mutex);
+
+ newCount = p->_count + releaseCount;
+ if (newCount > p->_maxCount)
+ {
+ pthread_mutex_unlock(&p->_mutex);
+ return EINVAL;
+ }
+ p->_count = newCount;
+ pthread_cond_broadcast(&p->_cond);
+ pthread_mutex_unlock(&p->_mutex);
+ return 0;
+}
+
+WRes Semaphore_Wait(CSemaphore *p) {
+ pthread_mutex_lock(&p->_mutex);
+ while (p->_count < 1)
+ {
+ pthread_cond_wait(&p->_cond, &p->_mutex);
+ }
+ p->_count--;
+ pthread_mutex_unlock(&p->_mutex);
+ return 0;
+}
+
+WRes Semaphore_Close(CSemaphore *p) {
+ if (p->_created)
+ {
+ p->_created = 0;
+ pthread_mutex_destroy(&p->_mutex);
+ pthread_cond_destroy(&p->_cond);
+ }
+ return 0;
+}
+
+WRes CriticalSection_Init(CCriticalSection * lpCriticalSection)
+{
+ return pthread_mutex_init(&(lpCriticalSection->_mutex),0);
+}
+
+#endif /* DEBUG_SYNCHRO */
+
+#endif /* ENV_BEOS */
+
+WRes ManualResetEvent_Create(CManualResetEvent *p, int initialSignaled)
+ { return Event_Create(p, TRUE, initialSignaled); }
+
+WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p)
+ { return ManualResetEvent_Create(p, 0); }
+
+WRes AutoResetEvent_Create(CAutoResetEvent *p, int initialSignaled)
+ { return Event_Create(p, FALSE, initialSignaled); }
+WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p)
+ { return AutoResetEvent_Create(p, 0); }
+
diff --git a/src/libs/7zip/unix/C/Threads.h b/src/libs/7zip/unix/C/Threads.h
new file mode 100644
index 000000000..07b05be0c
--- /dev/null
+++ b/src/libs/7zip/unix/C/Threads.h
@@ -0,0 +1,123 @@
+/* Threads.h -- multithreading library
+2008-11-22 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_THRESDS_H
+#define __7Z_THRESDS_H
+
+#include "Types.h"
+#include "windows.h"
+
+#ifdef ENV_BEOS
+#include <kernel/OS.h>
+#define MAX_THREAD 256
+#else
+#include <pthread.h>
+#endif
+
+/* #define DEBUG_SYNCHRO 1 */
+
+typedef struct _CThread
+{
+#ifdef ENV_BEOS
+ thread_id _tid;
+#else
+ pthread_t _tid;
+#endif
+ int _created;
+
+} CThread;
+
+#define Thread_Construct(thread) (thread)->_created = 0
+#define Thread_WasCreated(thread) ((thread)->_created != 0)
+
+typedef unsigned THREAD_FUNC_RET_TYPE;
+#define THREAD_FUNC_CALL_TYPE MY_STD_CALL
+#define THREAD_FUNC_DECL THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE
+
+typedef THREAD_FUNC_RET_TYPE (THREAD_FUNC_CALL_TYPE * THREAD_FUNC_TYPE)(void *);
+
+WRes Thread_Create(CThread *thread, THREAD_FUNC_TYPE startAddress, LPVOID parameter);
+WRes Thread_Wait(CThread *thread);
+WRes Thread_Close(CThread *thread);
+
+typedef struct _CEvent
+{
+ int _created;
+ int _manual_reset;
+ int _state;
+#ifdef ENV_BEOS
+ thread_id _waiting[MAX_THREAD];
+ int _index_waiting;
+ sem_id _sem;
+#else
+ pthread_mutex_t _mutex;
+ pthread_cond_t _cond;
+#endif
+} CEvent;
+
+typedef CEvent CAutoResetEvent;
+typedef CEvent CManualResetEvent;
+
+#define Event_Construct(event) (event)->_created = 0
+#define Event_IsCreated(event) ((event)->_created)
+
+WRes ManualResetEvent_Create(CManualResetEvent *event, int initialSignaled);
+WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *event);
+WRes AutoResetEvent_Create(CAutoResetEvent *event, int initialSignaled);
+WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *event);
+WRes Event_Set(CEvent *event);
+WRes Event_Reset(CEvent *event);
+WRes Event_Wait(CEvent *event);
+WRes Event_Close(CEvent *event);
+
+
+typedef struct _CSemaphore
+{
+ int _created;
+ UInt32 _count;
+ UInt32 _maxCount;
+#ifdef ENV_BEOS
+ thread_id _waiting[MAX_THREAD];
+ int _index_waiting;
+ sem_id _sem;
+#else
+ pthread_mutex_t _mutex;
+ pthread_cond_t _cond;
+#endif
+} CSemaphore;
+
+#define Semaphore_Construct(p) (p)->_created = 0
+
+WRes Semaphore_Create(CSemaphore *p, UInt32 initiallyCount, UInt32 maxCount);
+WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
+#define Semaphore_Release1(p) Semaphore_ReleaseN(p, 1)
+WRes Semaphore_Wait(CSemaphore *p);
+WRes Semaphore_Close(CSemaphore *p);
+
+typedef struct {
+#ifdef ENV_BEOS
+ sem_id _sem;
+#else
+ pthread_mutex_t _mutex;
+#endif
+} CCriticalSection;
+
+WRes CriticalSection_Init(CCriticalSection *p);
+#ifdef ENV_BEOS
+#define CriticalSection_Delete(p) delete_sem((p)->_sem)
+#define CriticalSection_Enter(p) acquire_sem((p)->_sem)
+#define CriticalSection_Leave(p) release_sem((p)->_sem)
+#else
+#ifdef DEBUG_SYNCHRO
+void CriticalSection_Delete(CCriticalSection *);
+void CriticalSection_Enter(CCriticalSection *);
+void CriticalSection_Leave(CCriticalSection *);
+#else
+#define CriticalSection_Delete(p) pthread_mutex_destroy(&((p)->_mutex))
+#define CriticalSection_Enter(p) pthread_mutex_lock(&((p)->_mutex))
+#define CriticalSection_Leave(p) pthread_mutex_unlock(&((p)->_mutex))
+#endif
+#endif
+
+#endif
+
diff --git a/src/libs/7zip/unix/C/Types.h b/src/libs/7zip/unix/C/Types.h
new file mode 100644
index 000000000..7732c240c
--- /dev/null
+++ b/src/libs/7zip/unix/C/Types.h
@@ -0,0 +1,254 @@
+/* Types.h -- Basic types
+2010-10-09 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_TYPES_H
+#define __7Z_TYPES_H
+
+#include <stddef.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#ifndef EXTERN_C_BEGIN
+#ifdef __cplusplus
+#define EXTERN_C_BEGIN extern "C" {
+#define EXTERN_C_END }
+#else
+#define EXTERN_C_BEGIN
+#define EXTERN_C_END
+#endif
+#endif
+
+EXTERN_C_BEGIN
+
+#define SZ_OK 0
+
+#define SZ_ERROR_DATA 1
+#define SZ_ERROR_MEM 2
+#define SZ_ERROR_CRC 3
+#define SZ_ERROR_UNSUPPORTED 4
+#define SZ_ERROR_PARAM 5
+#define SZ_ERROR_INPUT_EOF 6
+#define SZ_ERROR_OUTPUT_EOF 7
+#define SZ_ERROR_READ 8
+#define SZ_ERROR_WRITE 9
+#define SZ_ERROR_PROGRESS 10
+#define SZ_ERROR_FAIL 11
+#define SZ_ERROR_THREAD 12
+
+#define SZ_ERROR_ARCHIVE 16
+#define SZ_ERROR_NO_ARCHIVE 17
+
+typedef int SRes;
+
+#ifdef _WIN32
+typedef DWORD WRes;
+#else
+typedef int WRes;
+#endif
+
+#ifndef RINOK
+#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
+#endif
+
+typedef unsigned char Byte;
+typedef short Int16;
+typedef unsigned short UInt16;
+
+#ifdef _LZMA_UINT32_IS_ULONG
+typedef long Int32;
+typedef unsigned long UInt32;
+#else
+typedef int Int32;
+typedef unsigned int UInt32;
+#endif
+
+#ifdef _SZ_NO_INT_64
+
+/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
+ NOTES: Some code will work incorrectly in that case! */
+
+typedef long Int64;
+typedef unsigned long UInt64;
+
+#else
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+typedef __int64 Int64;
+typedef unsigned __int64 UInt64;
+#define UINT64_CONST(n) n
+#else
+typedef long long int Int64;
+typedef unsigned long long int UInt64;
+#define UINT64_CONST(n) n ## ULL
+#endif
+
+#endif
+
+#ifdef _LZMA_NO_SYSTEM_SIZE_T
+typedef UInt32 SizeT;
+#else
+typedef size_t SizeT;
+#endif
+
+typedef int Bool;
+#define True 1
+#define False 0
+
+
+#ifdef _WIN32
+#define MY_STD_CALL __stdcall
+#else
+#define MY_STD_CALL
+#endif
+
+#ifdef _MSC_VER
+
+#if _MSC_VER >= 1300
+#define MY_NO_INLINE __declspec(noinline)
+#else
+#define MY_NO_INLINE
+#endif
+
+#define MY_CDECL __cdecl
+#define MY_FAST_CALL __fastcall
+
+#else
+
+#define MY_CDECL
+#define MY_FAST_CALL
+
+#endif
+
+
+/* The following interfaces use first parameter as pointer to structure */
+
+typedef struct
+{
+ Byte (*Read)(void *p); /* reads one byte, returns 0 in case of EOF or error */
+} IByteIn;
+
+typedef struct
+{
+ void (*Write)(void *p, Byte b);
+} IByteOut;
+
+typedef struct
+{
+ SRes (*Read)(void *p, void *buf, size_t *size);
+ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
+ (output(*size) < input(*size)) is allowed */
+} ISeqInStream;
+
+/* it can return SZ_ERROR_INPUT_EOF */
+SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size);
+SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType);
+SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf);
+
+typedef struct
+{
+ size_t (*Write)(void *p, const void *buf, size_t size);
+ /* Returns: result - the number of actually written bytes.
+ (result < size) means error */
+} ISeqOutStream;
+
+typedef enum
+{
+ SZ_SEEK_SET = 0,
+ SZ_SEEK_CUR = 1,
+ SZ_SEEK_END = 2
+} ESzSeek;
+
+typedef struct
+{
+ SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */
+ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
+} ISeekInStream;
+
+typedef struct
+{
+ SRes (*Look)(void *p, const void **buf, size_t *size);
+ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
+ (output(*size) > input(*size)) is not allowed
+ (output(*size) < input(*size)) is allowed */
+ SRes (*Skip)(void *p, size_t offset);
+ /* offset must be <= output(*size) of Look */
+
+ SRes (*Read)(void *p, void *buf, size_t *size);
+ /* reads directly (without buffer). It's same as ISeqInStream::Read */
+ SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);
+} ILookInStream;
+
+SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size);
+SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset);
+
+/* reads via ILookInStream::Read */
+SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType);
+SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size);
+
+#define LookToRead_BUF_SIZE (1 << 14)
+
+typedef struct
+{
+ ILookInStream s;
+ ISeekInStream *realStream;
+ size_t pos;
+ size_t size;
+ Byte buf[LookToRead_BUF_SIZE];
+} CLookToRead;
+
+void LookToRead_CreateVTable(CLookToRead *p, int lookahead);
+void LookToRead_Init(CLookToRead *p);
+
+typedef struct
+{
+ ISeqInStream s;
+ ILookInStream *realStream;
+} CSecToLook;
+
+void SecToLook_CreateVTable(CSecToLook *p);
+
+typedef struct
+{
+ ISeqInStream s;
+ ILookInStream *realStream;
+} CSecToRead;
+
+void SecToRead_CreateVTable(CSecToRead *p);
+
+typedef struct
+{
+ SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize);
+ /* Returns: result. (result != SZ_OK) means break.
+ Value (UInt64)(Int64)-1 for size means unknown value. */
+} ICompressProgress;
+
+typedef struct
+{
+ void *(*Alloc)(void *p, size_t size);
+ void (*Free)(void *p, void *address); /* address can be 0 */
+} ISzAlloc;
+
+#define IAlloc_Alloc(p, size) (p)->Alloc((p), size)
+#define IAlloc_Free(p, a) (p)->Free((p), a)
+
+#ifdef _WIN32
+
+#define CHAR_PATH_SEPARATOR '\\'
+#define WCHAR_PATH_SEPARATOR L'\\'
+#define STRING_PATH_SEPARATOR "\\"
+#define WSTRING_PATH_SEPARATOR L"\\"
+
+#else
+
+#define CHAR_PATH_SEPARATOR '/'
+#define WCHAR_PATH_SEPARATOR L'/'
+#define STRING_PATH_SEPARATOR "/"
+#define WSTRING_PATH_SEPARATOR L"/"
+
+#endif
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/Xz.c b/src/libs/7zip/unix/C/Xz.c
new file mode 100644
index 000000000..18caba2c1
--- /dev/null
+++ b/src/libs/7zip/unix/C/Xz.c
@@ -0,0 +1,88 @@
+/* Xz.c - Xz
+2009-04-15 : Igor Pavlov : Public domain */
+
+#include "7zCrc.h"
+#include "CpuArch.h"
+#include "Xz.h"
+#include "XzCrc64.h"
+
+Byte XZ_SIG[XZ_SIG_SIZE] = { 0xFD, '7', 'z', 'X', 'Z', 0 };
+Byte XZ_FOOTER_SIG[XZ_FOOTER_SIG_SIZE] = { 'Y', 'Z' };
+
+unsigned Xz_WriteVarInt(Byte *buf, UInt64 v)
+{
+ unsigned i = 0;
+ do
+ {
+ buf[i++] = (Byte)((v & 0x7F) | 0x80);
+ v >>= 7;
+ }
+ while (v != 0);
+ buf[i - 1] &= 0x7F;
+ return i;
+}
+
+void Xz_Construct(CXzStream *p)
+{
+ p->numBlocks = p->numBlocksAllocated = 0;
+ p->blocks = 0;
+ p->flags = 0;
+}
+
+void Xz_Free(CXzStream *p, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, p->blocks);
+ p->numBlocks = p->numBlocksAllocated = 0;
+ p->blocks = 0;
+}
+
+unsigned XzFlags_GetCheckSize(CXzStreamFlags f)
+{
+ int t = XzFlags_GetCheckType(f);
+ return (t == 0) ? 0 : (4 << ((t - 1) / 3));
+}
+
+void XzCheck_Init(CXzCheck *p, int mode)
+{
+ p->mode = mode;
+ switch (mode)
+ {
+ case XZ_CHECK_CRC32: p->crc = CRC_INIT_VAL; break;
+ case XZ_CHECK_CRC64: p->crc64 = CRC64_INIT_VAL; break;
+ case XZ_CHECK_SHA256: Sha256_Init(&p->sha); break;
+ }
+}
+
+void XzCheck_Update(CXzCheck *p, const void *data, size_t size)
+{
+ switch (p->mode)
+ {
+ case XZ_CHECK_CRC32: p->crc = CrcUpdate(p->crc, data, size); break;
+ case XZ_CHECK_CRC64: p->crc64 = Crc64Update(p->crc64, data, size); break;
+ case XZ_CHECK_SHA256: Sha256_Update(&p->sha, (const Byte *)data, size); break;
+ }
+}
+
+int XzCheck_Final(CXzCheck *p, Byte *digest)
+{
+ switch (p->mode)
+ {
+ case XZ_CHECK_CRC32:
+ SetUi32(digest, CRC_GET_DIGEST(p->crc));
+ break;
+ case XZ_CHECK_CRC64:
+ {
+ int i;
+ UInt64 v = CRC64_GET_DIGEST(p->crc64);
+ for (i = 0; i < 8; i++, v >>= 8)
+ digest[i] = (Byte)(v & 0xFF);
+ break;
+ }
+ case XZ_CHECK_SHA256:
+ Sha256_Final(&p->sha, digest);
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
diff --git a/src/libs/7zip/unix/C/Xz.h b/src/libs/7zip/unix/C/Xz.h
new file mode 100644
index 000000000..2cfa1b789
--- /dev/null
+++ b/src/libs/7zip/unix/C/Xz.h
@@ -0,0 +1,252 @@
+/* Xz.h - Xz interface
+2010-09-17 : Igor Pavlov : Public domain */
+
+#ifndef __XZ_H
+#define __XZ_H
+
+#include "Sha256.h"
+
+EXTERN_C_BEGIN
+
+#define XZ_ID_Subblock 1
+#define XZ_ID_Delta 3
+#define XZ_ID_X86 4
+#define XZ_ID_PPC 5
+#define XZ_ID_IA64 6
+#define XZ_ID_ARM 7
+#define XZ_ID_ARMT 8
+#define XZ_ID_SPARC 9
+#define XZ_ID_LZMA2 0x21
+
+unsigned Xz_ReadVarInt(const Byte *p, size_t maxSize, UInt64 *value);
+unsigned Xz_WriteVarInt(Byte *buf, UInt64 v);
+
+/* ---------- xz block ---------- */
+
+#define XZ_BLOCK_HEADER_SIZE_MAX 1024
+
+#define XZ_NUM_FILTERS_MAX 4
+#define XZ_BF_NUM_FILTERS_MASK 3
+#define XZ_BF_PACK_SIZE (1 << 6)
+#define XZ_BF_UNPACK_SIZE (1 << 7)
+
+#define XZ_FILTER_PROPS_SIZE_MAX 20
+
+typedef struct
+{
+ UInt64 id;
+ UInt32 propsSize;
+ Byte props[XZ_FILTER_PROPS_SIZE_MAX];
+} CXzFilter;
+
+typedef struct
+{
+ UInt64 packSize;
+ UInt64 unpackSize;
+ Byte flags;
+ CXzFilter filters[XZ_NUM_FILTERS_MAX];
+} CXzBlock;
+
+#define XzBlock_GetNumFilters(p) (((p)->flags & XZ_BF_NUM_FILTERS_MASK) + 1)
+#define XzBlock_HasPackSize(p) (((p)->flags & XZ_BF_PACK_SIZE) != 0)
+#define XzBlock_HasUnpackSize(p) (((p)->flags & XZ_BF_UNPACK_SIZE) != 0)
+
+SRes XzBlock_Parse(CXzBlock *p, const Byte *header);
+SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStream *inStream, Bool *isIndex, UInt32 *headerSizeRes);
+
+/* ---------- xz stream ---------- */
+
+#define XZ_SIG_SIZE 6
+#define XZ_FOOTER_SIG_SIZE 2
+
+extern Byte XZ_SIG[XZ_SIG_SIZE];
+extern Byte XZ_FOOTER_SIG[XZ_FOOTER_SIG_SIZE];
+
+#define XZ_STREAM_FLAGS_SIZE 2
+#define XZ_STREAM_CRC_SIZE 4
+
+#define XZ_STREAM_HEADER_SIZE (XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE + XZ_STREAM_CRC_SIZE)
+#define XZ_STREAM_FOOTER_SIZE (XZ_FOOTER_SIG_SIZE + XZ_STREAM_FLAGS_SIZE + XZ_STREAM_CRC_SIZE + 4)
+
+#define XZ_CHECK_MASK 0xF
+#define XZ_CHECK_NO 0
+#define XZ_CHECK_CRC32 1
+#define XZ_CHECK_CRC64 4
+#define XZ_CHECK_SHA256 10
+
+typedef struct
+{
+ int mode;
+ UInt32 crc;
+ UInt64 crc64;
+ CSha256 sha;
+} CXzCheck;
+
+void XzCheck_Init(CXzCheck *p, int mode);
+void XzCheck_Update(CXzCheck *p, const void *data, size_t size);
+int XzCheck_Final(CXzCheck *p, Byte *digest);
+
+typedef UInt16 CXzStreamFlags;
+
+#define XzFlags_IsSupported(f) ((f) <= XZ_CHECK_MASK)
+#define XzFlags_GetCheckType(f) ((f) & XZ_CHECK_MASK)
+#define XzFlags_HasDataCrc32(f) (Xz_GetCheckType(f) == XZ_CHECK_CRC32)
+unsigned XzFlags_GetCheckSize(CXzStreamFlags f);
+
+SRes Xz_ParseHeader(CXzStreamFlags *p, const Byte *buf);
+SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStream *inStream);
+
+typedef struct
+{
+ UInt64 unpackSize;
+ UInt64 totalSize;
+} CXzBlockSizes;
+
+typedef struct
+{
+ CXzStreamFlags flags;
+ size_t numBlocks;
+ size_t numBlocksAllocated;
+ CXzBlockSizes *blocks;
+ UInt64 startOffset;
+} CXzStream;
+
+void Xz_Construct(CXzStream *p);
+void Xz_Free(CXzStream *p, ISzAlloc *alloc);
+
+#define XZ_SIZE_OVERFLOW ((UInt64)(Int64)-1)
+
+UInt64 Xz_GetUnpackSize(const CXzStream *p);
+UInt64 Xz_GetPackSize(const CXzStream *p);
+
+typedef struct
+{
+ size_t num;
+ size_t numAllocated;
+ CXzStream *streams;
+} CXzs;
+
+void Xzs_Construct(CXzs *p);
+void Xzs_Free(CXzs *p, ISzAlloc *alloc);
+SRes Xzs_ReadBackward(CXzs *p, ILookInStream *inStream, Int64 *startOffset, ICompressProgress *progress, ISzAlloc *alloc);
+
+UInt64 Xzs_GetNumBlocks(const CXzs *p);
+UInt64 Xzs_GetUnpackSize(const CXzs *p);
+
+typedef enum
+{
+ CODER_STATUS_NOT_SPECIFIED, /* use main error code instead */
+ CODER_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
+ CODER_STATUS_NOT_FINISHED, /* stream was not finished */
+ CODER_STATUS_NEEDS_MORE_INPUT /* you must provide more input bytes */
+} ECoderStatus;
+
+typedef enum
+{
+ CODER_FINISH_ANY, /* finish at any point */
+ CODER_FINISH_END /* block must be finished at the end */
+} ECoderFinishMode;
+
+typedef struct _IStateCoder
+{
+ void *p;
+ void (*Free)(void *p, ISzAlloc *alloc);
+ SRes (*SetProps)(void *p, const Byte *props, size_t propSize, ISzAlloc *alloc);
+ void (*Init)(void *p);
+ SRes (*Code)(void *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ int srcWasFinished, ECoderFinishMode finishMode, int *wasFinished);
+} IStateCoder;
+
+#define MIXCODER_NUM_FILTERS_MAX 4
+
+typedef struct
+{
+ ISzAlloc *alloc;
+ Byte *buf;
+ int numCoders;
+ int finished[MIXCODER_NUM_FILTERS_MAX - 1];
+ size_t pos[MIXCODER_NUM_FILTERS_MAX - 1];
+ size_t size[MIXCODER_NUM_FILTERS_MAX - 1];
+ UInt64 ids[MIXCODER_NUM_FILTERS_MAX];
+ IStateCoder coders[MIXCODER_NUM_FILTERS_MAX];
+} CMixCoder;
+
+void MixCoder_Construct(CMixCoder *p, ISzAlloc *alloc);
+void MixCoder_Free(CMixCoder *p);
+void MixCoder_Init(CMixCoder *p);
+SRes MixCoder_SetFromMethod(CMixCoder *p, int coderIndex, UInt64 methodId);
+SRes MixCoder_Code(CMixCoder *p, Byte *dest, SizeT *destLen,
+ const Byte *src, SizeT *srcLen, int srcWasFinished,
+ ECoderFinishMode finishMode, ECoderStatus *status);
+
+typedef enum
+{
+ XZ_STATE_STREAM_HEADER,
+ XZ_STATE_STREAM_INDEX,
+ XZ_STATE_STREAM_INDEX_CRC,
+ XZ_STATE_STREAM_FOOTER,
+ XZ_STATE_STREAM_PADDING,
+ XZ_STATE_BLOCK_HEADER,
+ XZ_STATE_BLOCK,
+ XZ_STATE_BLOCK_FOOTER
+} EXzState;
+
+typedef struct
+{
+ EXzState state;
+ UInt32 pos;
+ unsigned alignPos;
+ unsigned indexPreSize;
+
+ CXzStreamFlags streamFlags;
+
+ UInt32 blockHeaderSize;
+ UInt64 packSize;
+ UInt64 unpackSize;
+
+ UInt64 numBlocks;
+ UInt64 indexSize;
+ UInt64 indexPos;
+ UInt64 padSize;
+
+ UInt64 numStreams;
+
+ UInt32 crc;
+ CMixCoder decoder;
+ CXzBlock block;
+ CXzCheck check;
+ CSha256 sha;
+ Byte shaDigest[SHA256_DIGEST_SIZE];
+ Byte buf[XZ_BLOCK_HEADER_SIZE_MAX];
+} CXzUnpacker;
+
+SRes XzUnpacker_Create(CXzUnpacker *p, ISzAlloc *alloc);
+void XzUnpacker_Free(CXzUnpacker *p);
+
+/*
+finishMode:
+ It has meaning only if the decoding reaches output limit (*destLen).
+ LZMA_FINISH_ANY - use smallest number of input bytes
+ LZMA_FINISH_END - read EndOfStream marker after decoding
+
+Returns:
+ SZ_OK
+ status:
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED
+ SZ_ERROR_DATA - Data error
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
+*/
+
+
+SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen,
+ const Byte *src, SizeT *srcLen, /* int srcWasFinished, */ int finishMode,
+ ECoderStatus *status);
+
+Bool XzUnpacker_IsStreamWasFinished(CXzUnpacker *p);
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/XzCrc64.c b/src/libs/7zip/unix/C/XzCrc64.c
new file mode 100644
index 000000000..0369554b7
--- /dev/null
+++ b/src/libs/7zip/unix/C/XzCrc64.c
@@ -0,0 +1,33 @@
+/* XzCrc64.c -- CRC64 calculation
+2010-04-16 : Igor Pavlov : Public domain */
+
+#include "XzCrc64.h"
+
+#define kCrc64Poly UINT64_CONST(0xC96C5795D7870F42)
+UInt64 g_Crc64Table[256];
+
+void MY_FAST_CALL Crc64GenerateTable(void)
+{
+ UInt32 i;
+ for (i = 0; i < 256; i++)
+ {
+ UInt64 r = i;
+ int j;
+ for (j = 0; j < 8; j++)
+ r = (r >> 1) ^ ((UInt64)kCrc64Poly & ~((r & 1) - 1));
+ g_Crc64Table[i] = r;
+ }
+}
+
+UInt64 MY_FAST_CALL Crc64Update(UInt64 v, const void *data, size_t size)
+{
+ const Byte *p = (const Byte *)data;
+ for (; size > 0 ; size--, p++)
+ v = CRC64_UPDATE_BYTE(v, *p);
+ return v;
+}
+
+UInt64 MY_FAST_CALL Crc64Calc(const void *data, size_t size)
+{
+ return CRC64_GET_DIGEST(Crc64Update(CRC64_INIT_VAL, data, size));
+}
diff --git a/src/libs/7zip/unix/C/XzCrc64.h b/src/libs/7zip/unix/C/XzCrc64.h
new file mode 100644
index 000000000..0e8efd7ea
--- /dev/null
+++ b/src/libs/7zip/unix/C/XzCrc64.h
@@ -0,0 +1,26 @@
+/* XzCrc64.h -- CRC64 calculation
+2010-04-16 : Igor Pavlov : Public domain */
+
+#ifndef __XZ_CRC64_H
+#define __XZ_CRC64_H
+
+#include <stddef.h>
+
+#include "Types.h"
+
+EXTERN_C_BEGIN
+
+extern UInt64 g_Crc64Table[];
+
+void MY_FAST_CALL Crc64GenerateTable(void);
+
+#define CRC64_INIT_VAL UINT64_CONST(0xFFFFFFFFFFFFFFFF)
+#define CRC64_GET_DIGEST(crc) ((crc) ^ CRC64_INIT_VAL)
+#define CRC64_UPDATE_BYTE(crc, b) (g_Crc64Table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8))
+
+UInt64 MY_FAST_CALL Crc64Update(UInt64 crc, const void *data, size_t size);
+UInt64 MY_FAST_CALL Crc64Calc(const void *data, size_t size);
+
+EXTERN_C_END
+
+#endif
diff --git a/src/libs/7zip/unix/C/XzDec.c b/src/libs/7zip/unix/C/XzDec.c
new file mode 100644
index 000000000..40f1a2a45
--- /dev/null
+++ b/src/libs/7zip/unix/C/XzDec.c
@@ -0,0 +1,875 @@
+/* XzDec.c -- Xz Decode
+2010-04-16 : Igor Pavlov : Public domain */
+
+/* #define XZ_DUMP */
+
+#ifdef XZ_DUMP
+#include <stdio.h>
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "7zCrc.h"
+#include "Alloc.h"
+#include "Bra.h"
+#include "CpuArch.h"
+#include "Delta.h"
+#include "Lzma2Dec.h"
+
+#ifdef USE_SUBBLOCK
+#include "SbDec.h"
+#endif
+
+#include "Xz.h"
+
+#define XZ_CHECK_SIZE_MAX 64
+
+#define CODER_BUF_SIZE (1 << 17)
+
+unsigned Xz_ReadVarInt(const Byte *p, size_t maxSize, UInt64 *value)
+{
+ int i, limit;
+ *value = 0;
+ limit = (maxSize > 9) ? 9 : (int)maxSize;
+
+ for (i = 0; i < limit;)
+ {
+ Byte b = p[i];
+ *value |= (UInt64)(b & 0x7F) << (7 * i++);
+ if ((b & 0x80) == 0)
+ return (b == 0 && i != 1) ? 0 : i;
+ }
+ return 0;
+}
+
+/* ---------- BraState ---------- */
+
+#define BRA_BUF_SIZE (1 << 14)
+
+typedef struct
+{
+ size_t bufPos;
+ size_t bufConv;
+ size_t bufTotal;
+
+ UInt32 methodId;
+ int encodeMode;
+ UInt32 delta;
+ UInt32 ip;
+ UInt32 x86State;
+ Byte deltaState[DELTA_STATE_SIZE];
+
+ Byte buf[BRA_BUF_SIZE];
+} CBraState;
+
+void BraState_Free(void *pp, ISzAlloc *alloc)
+{
+ alloc->Free(alloc, pp);
+}
+
+SRes BraState_SetProps(void *pp, const Byte *props, size_t propSize, ISzAlloc *alloc)
+{
+ CBraState *p = ((CBraState *)pp);
+ alloc = alloc;
+ p->encodeMode = 0;
+ p->ip = 0;
+ if (p->methodId == XZ_ID_Delta)
+ {
+ if (propSize != 1)
+ return SZ_ERROR_UNSUPPORTED;
+ p->delta = (unsigned)props[0] + 1;
+ }
+ else
+ {
+ if (propSize == 4)
+ {
+ UInt32 v = GetUi32(props);
+ switch(p->methodId)
+ {
+ case XZ_ID_PPC:
+ case XZ_ID_ARM:
+ case XZ_ID_SPARC:
+ if ((v & 3) != 0)
+ return SZ_ERROR_UNSUPPORTED;
+ break;
+ case XZ_ID_ARMT:
+ if ((v & 1) != 0)
+ return SZ_ERROR_UNSUPPORTED;
+ break;
+ case XZ_ID_IA64:
+ if ((v & 0xF) != 0)
+ return SZ_ERROR_UNSUPPORTED;
+ break;
+ }
+ p->ip = v;
+ }
+ else if (propSize != 0)
+ return SZ_ERROR_UNSUPPORTED;
+ }
+ return SZ_OK;
+}
+
+void BraState_Init(void *pp)
+{
+ CBraState *p = ((CBraState *)pp);
+ p->bufPos = p->bufConv = p->bufTotal = 0;
+ x86_Convert_Init(p->x86State);
+ if (p->methodId == XZ_ID_Delta)
+ Delta_Init(p->deltaState);
+}
+
+#define CASE_BRA_CONV(isa) case XZ_ID_ ## isa: p->bufConv = isa ## _Convert(p->buf, p->bufTotal, p->ip, p->encodeMode); break;
+
+static SRes BraState_Code(void *pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ int srcWasFinished, ECoderFinishMode finishMode, int *wasFinished)
+{
+ CBraState *p = ((CBraState *)pp);
+ SizeT destLenOrig = *destLen;
+ SizeT srcLenOrig = *srcLen;
+ *destLen = 0;
+ *srcLen = 0;
+ finishMode = finishMode;
+ *wasFinished = 0;
+ while (destLenOrig > 0)
+ {
+ if (p->bufPos != p->bufConv)
+ {
+ size_t curSize = p->bufConv - p->bufPos;
+ if (curSize > destLenOrig)
+ curSize = destLenOrig;
+ memcpy(dest, p->buf + p->bufPos, curSize);
+ p->bufPos += curSize;
+ *destLen += curSize;
+ dest += curSize;
+ destLenOrig -= curSize;
+ continue;
+ }
+ p->bufTotal -= p->bufPos;
+ memmove(p->buf, p->buf + p->bufPos, p->bufTotal);
+ p->bufPos = 0;
+ p->bufConv = 0;
+ {
+ size_t curSize = BRA_BUF_SIZE - p->bufTotal;
+ if (curSize > srcLenOrig)
+ curSize = srcLenOrig;
+ memcpy(p->buf + p->bufTotal, src, curSize);
+ *srcLen += curSize;
+ src += curSize;
+ srcLenOrig -= curSize;
+ p->bufTotal += curSize;
+ }
+ if (p->bufTotal == 0)
+ break;
+ switch(p->methodId)
+ {
+ case XZ_ID_Delta:
+ if (p->encodeMode)
+ Delta_Encode(p->deltaState, p->delta, p->buf, p->bufTotal);
+ else
+ Delta_Decode(p->deltaState, p->delta, p->buf, p->bufTotal);
+ p->bufConv = p->bufTotal;
+ break;
+ case XZ_ID_X86:
+ p->bufConv = x86_Convert(p->buf, p->bufTotal, p->ip, &p->x86State, p->encodeMode);
+ break;
+ CASE_BRA_CONV(PPC)
+ CASE_BRA_CONV(IA64)
+ CASE_BRA_CONV(ARM)
+ CASE_BRA_CONV(ARMT)
+ CASE_BRA_CONV(SPARC)
+ default:
+ return SZ_ERROR_UNSUPPORTED;
+ }
+ p->ip += (UInt32)p->bufConv;
+
+ if (p->bufConv == 0)
+ {
+ if (!srcWasFinished)
+ break;
+ p->bufConv = p->bufTotal;
+ }
+ }
+ if (p->bufTotal == p->bufPos && srcLenOrig == 0 && srcWasFinished)
+ *wasFinished = 1;
+ return SZ_OK;
+}
+
+SRes BraState_SetFromMethod(IStateCoder *p, UInt64 id, ISzAlloc *alloc)
+{
+ CBraState *decoder;
+ if (id != XZ_ID_Delta &&
+ id != XZ_ID_X86 &&
+ id != XZ_ID_PPC &&
+ id != XZ_ID_IA64 &&
+ id != XZ_ID_ARM &&
+ id != XZ_ID_ARMT &&
+ id != XZ_ID_SPARC)
+ return SZ_ERROR_UNSUPPORTED;
+ p->p = 0;
+ decoder = alloc->Alloc(alloc, sizeof(CBraState));
+ if (decoder == 0)
+ return SZ_ERROR_MEM;
+ decoder->methodId = (UInt32)id;
+ p->p = decoder;
+ p->Free = BraState_Free;
+ p->SetProps = BraState_SetProps;
+ p->Init = BraState_Init;
+ p->Code = BraState_Code;
+ return SZ_OK;
+}
+
+/* ---------- SbState ---------- */
+
+#ifdef USE_SUBBLOCK
+
+static void SbState_Free(void *pp, ISzAlloc *alloc)
+{
+ CSubblockDec *p = (CSubblockDec *)pp;
+ SubblockDec_Free(p, alloc);
+ alloc->Free(alloc, pp);
+}
+
+static SRes SbState_SetProps(void *pp, const Byte *props, size_t propSize, ISzAlloc *alloc)
+{
+ pp = pp;
+ props = props;
+ alloc = alloc;
+ return (propSize == 0) ? SZ_OK : SZ_ERROR_UNSUPPORTED;
+}
+
+static void SbState_Init(void *pp)
+{
+ SubblockDec_Init((CSubblockDec *)pp);
+}
+
+static SRes SbState_Code(void *pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ int srcWasFinished, ECoderFinishMode finishMode, int *wasFinished)
+{
+ ECoderStatus status;
+ SRes res = SubblockDec_Decode((CSubblockDec *)pp, dest, destLen, src, srcLen, finishMode, &status);
+ srcWasFinished = srcWasFinished;
+ *wasFinished = (status == LZMA_STATUS_FINISHED_WITH_MARK);
+ return res;
+}
+
+SRes SbState_SetFromMethod(IStateCoder *p, ISzAlloc *alloc)
+{
+ CSubblockDec *decoder;
+ p->p = 0;
+ decoder = alloc->Alloc(alloc, sizeof(CSubblockDec));
+ if (decoder == 0)
+ return SZ_ERROR_MEM;
+ p->p = decoder;
+ p->Free = SbState_Free;
+ p->SetProps = SbState_SetProps;
+ p->Init = SbState_Init;
+ p->Code = SbState_Code;
+ SubblockDec_Construct(decoder);
+ return SZ_OK;
+}
+#endif
+
+/* ---------- Lzma2State ---------- */
+
+static void Lzma2State_Free(void *pp, ISzAlloc *alloc)
+{
+ Lzma2Dec_Free((CLzma2Dec *)pp, alloc);
+ alloc->Free(alloc, pp);
+}
+
+static SRes Lzma2State_SetProps(void *pp, const Byte *props, size_t propSize, ISzAlloc *alloc)
+{
+ if (propSize != 1)
+ return SZ_ERROR_UNSUPPORTED;
+ return Lzma2Dec_Allocate((CLzma2Dec *)pp, props[0], alloc);
+}
+
+static void Lzma2State_Init(void *pp)
+{
+ Lzma2Dec_Init((CLzma2Dec *)pp);
+}
+
+static SRes Lzma2State_Code(void *pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ int srcWasFinished, ECoderFinishMode finishMode, int *wasFinished)
+{
+ ELzmaStatus status;
+ /* ELzmaFinishMode fm = (finishMode == LZMA_FINISH_ANY) ? LZMA_FINISH_ANY : LZMA_FINISH_END; */
+ SRes res = Lzma2Dec_DecodeToBuf((CLzma2Dec *)pp, dest, destLen, src, srcLen, finishMode, &status);
+ srcWasFinished = srcWasFinished;
+ *wasFinished = (status == LZMA_STATUS_FINISHED_WITH_MARK);
+ return res;
+}
+
+static SRes Lzma2State_SetFromMethod(IStateCoder *p, ISzAlloc *alloc)
+{
+ CLzma2Dec *decoder = alloc->Alloc(alloc, sizeof(CLzma2Dec));
+ p->p = decoder;
+ if (decoder == 0)
+ return SZ_ERROR_MEM;
+ p->Free = Lzma2State_Free;
+ p->SetProps = Lzma2State_SetProps;
+ p->Init = Lzma2State_Init;
+ p->Code = Lzma2State_Code;
+ Lzma2Dec_Construct(decoder);
+ return SZ_OK;
+}
+
+
+void MixCoder_Construct(CMixCoder *p, ISzAlloc *alloc)
+{
+ int i;
+ p->alloc = alloc;
+ p->buf = 0;
+ p->numCoders = 0;
+ for (i = 0; i < MIXCODER_NUM_FILTERS_MAX; i++)
+ p->coders[i].p = NULL;
+}
+
+void MixCoder_Free(CMixCoder *p)
+{
+ int i;
+ for (i = 0; i < p->numCoders; i++)
+ {
+ IStateCoder *sc = &p->coders[i];
+ if (p->alloc && sc->p)
+ sc->Free(sc->p, p->alloc);
+ }
+ p->numCoders = 0;
+ if (p->buf)
+ p->alloc->Free(p->alloc, p->buf);
+}
+
+void MixCoder_Init(CMixCoder *p)
+{
+ int i;
+ for (i = 0; i < p->numCoders - 1; i++)
+ {
+ p->size[i] = 0;
+ p->pos[i] = 0;
+ p->finished[i] = 0;
+ }
+ for (i = 0; i < p->numCoders; i++)
+ {
+ IStateCoder *coder = &p->coders[i];
+ coder->Init(coder->p);
+ }
+}
+
+SRes MixCoder_SetFromMethod(CMixCoder *p, int coderIndex, UInt64 methodId)
+{
+ IStateCoder *sc = &p->coders[coderIndex];
+ p->ids[coderIndex] = methodId;
+ switch(methodId)
+ {
+ case XZ_ID_LZMA2: return Lzma2State_SetFromMethod(sc, p->alloc);
+ #ifdef USE_SUBBLOCK
+ case XZ_ID_Subblock: return SbState_SetFromMethod(sc, p->alloc);
+ #endif
+ }
+ if (coderIndex == 0)
+ return SZ_ERROR_UNSUPPORTED;
+ return BraState_SetFromMethod(sc, methodId, p->alloc);
+}
+
+SRes MixCoder_Code(CMixCoder *p, Byte *dest, SizeT *destLen,
+ const Byte *src, SizeT *srcLen, int srcWasFinished,
+ ECoderFinishMode finishMode, ECoderStatus *status)
+{
+ SizeT destLenOrig = *destLen;
+ SizeT srcLenOrig = *srcLen;
+ Bool allFinished = True;
+ *destLen = 0;
+ *srcLen = 0;
+ *status = CODER_STATUS_NOT_FINISHED;
+
+ if (p->buf == 0)
+ {
+ p->buf = p->alloc->Alloc(p->alloc, CODER_BUF_SIZE * (MIXCODER_NUM_FILTERS_MAX - 1));
+ if (p->buf == 0)
+ return SZ_ERROR_MEM;
+ }
+
+ if (p->numCoders != 1)
+ finishMode = CODER_FINISH_ANY;
+
+ for (;;)
+ {
+ Bool processed = False;
+ int i;
+ /*
+ if (p->numCoders == 1 && *destLen == destLenOrig && finishMode == LZMA_FINISH_ANY)
+ break;
+ */
+
+ for (i = 0; i < p->numCoders; i++)
+ {
+ SRes res;
+ IStateCoder *coder = &p->coders[i];
+ Byte *destCur;
+ SizeT destLenCur, srcLenCur;
+ const Byte *srcCur;
+ int srcFinishedCur;
+ int encodingWasFinished;
+
+ if (i == 0)
+ {
+ srcCur = src;
+ srcLenCur = srcLenOrig - *srcLen;
+ srcFinishedCur = srcWasFinished;
+ }
+ else
+ {
+ srcCur = p->buf + (CODER_BUF_SIZE * (i - 1)) + p->pos[i - 1];
+ srcLenCur = p->size[i - 1] - p->pos[i - 1];
+ srcFinishedCur = p->finished[i - 1];
+ }
+
+ if (i == p->numCoders - 1)
+ {
+ destCur = dest;
+ destLenCur = destLenOrig - *destLen;
+ }
+ else
+ {
+ if (p->pos[i] != p->size[i])
+ continue;
+ destCur = p->buf + (CODER_BUF_SIZE * i);
+ destLenCur = CODER_BUF_SIZE;
+ }
+
+ res = coder->Code(coder->p, destCur, &destLenCur, srcCur, &srcLenCur, srcFinishedCur, finishMode, &encodingWasFinished);
+
+ if (!encodingWasFinished)
+ allFinished = False;
+
+ if (i == 0)
+ {
+ *srcLen += srcLenCur;
+ src += srcLenCur;
+ }
+ else
+ {
+ p->pos[i - 1] += srcLenCur;
+ }
+
+ if (i == p->numCoders - 1)
+ {
+ *destLen += destLenCur;
+ dest += destLenCur;
+ }
+ else
+ {
+ p->size[i] = destLenCur;
+ p->pos[i] = 0;
+ p->finished[i] = encodingWasFinished;
+ }
+
+ if (res != SZ_OK)
+ return res;
+
+ if (destLenCur != 0 || srcLenCur != 0)
+ processed = True;
+ }
+ if (!processed)
+ break;
+ }
+ if (allFinished)
+ *status = CODER_STATUS_FINISHED_WITH_MARK;
+ return SZ_OK;
+}
+
+SRes Xz_ParseHeader(CXzStreamFlags *p, const Byte *buf)
+{
+ *p = (CXzStreamFlags)GetBe16(buf + XZ_SIG_SIZE);
+ if (CrcCalc(buf + XZ_SIG_SIZE, XZ_STREAM_FLAGS_SIZE) !=
+ GetUi32(buf + XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE))
+ return SZ_ERROR_NO_ARCHIVE;
+ return XzFlags_IsSupported(*p) ? SZ_OK : SZ_ERROR_UNSUPPORTED;
+}
+
+static Bool Xz_CheckFooter(CXzStreamFlags flags, UInt64 indexSize, const Byte *buf)
+{
+ return
+ indexSize == (((UInt64)GetUi32(buf + 4) + 1) << 2) &&
+ (GetUi32(buf) == CrcCalc(buf + 4, 6) &&
+ flags == GetBe16(buf + 8) &&
+ memcmp(buf + 10, XZ_FOOTER_SIG, XZ_FOOTER_SIG_SIZE) == 0);
+}
+
+#define READ_VARINT_AND_CHECK(buf, pos, size, res) \
+ { unsigned s = Xz_ReadVarInt(buf + pos, size - pos, res); \
+ if (s == 0) return SZ_ERROR_ARCHIVE; pos += s; }
+
+
+SRes XzBlock_Parse(CXzBlock *p, const Byte *header)
+{
+ unsigned pos;
+ int numFilters, i;
+ UInt32 headerSize = (UInt32)header[0] << 2;
+
+ if (CrcCalc(header, headerSize) != GetUi32(header + headerSize))
+ return SZ_ERROR_ARCHIVE;
+
+ pos = 1;
+ if (pos == headerSize)
+ return SZ_ERROR_ARCHIVE;
+ p->flags = header[pos++];
+
+ if (XzBlock_HasPackSize(p))
+ {
+ READ_VARINT_AND_CHECK(header, pos, headerSize, &p->packSize);
+ if (p->packSize == 0 || p->packSize + headerSize >= (UInt64)1 << 63)
+ return SZ_ERROR_ARCHIVE;
+ }
+
+ if (XzBlock_HasUnpackSize(p))
+ READ_VARINT_AND_CHECK(header, pos, headerSize, &p->unpackSize);
+
+ numFilters = XzBlock_GetNumFilters(p);
+ for (i = 0; i < numFilters; i++)
+ {
+ CXzFilter *filter = p->filters + i;
+ UInt64 size;
+ READ_VARINT_AND_CHECK(header, pos, headerSize, &filter->id);
+ READ_VARINT_AND_CHECK(header, pos, headerSize, &size);
+ if (size > headerSize - pos || size > XZ_FILTER_PROPS_SIZE_MAX)
+ return SZ_ERROR_ARCHIVE;
+ filter->propsSize = (UInt32)size;
+ memcpy(filter->props, header + pos, (size_t)size);
+ pos += (unsigned)size;
+
+ #ifdef XZ_DUMP
+ printf("\nf[%d] = %2X: ", i, filter->id);
+ {
+ int i;
+ for (i = 0; i < size; i++)
+ printf(" %2X", filter->props[i]);
+ }
+ #endif
+ }
+
+ while (pos < headerSize)
+ if (header[pos++] != 0)
+ return SZ_ERROR_ARCHIVE;
+ return SZ_OK;
+}
+
+SRes XzDec_Init(CMixCoder *p, const CXzBlock *block)
+{
+ int i;
+ Bool needReInit = True;
+ int numFilters = XzBlock_GetNumFilters(block);
+ if (numFilters == p->numCoders)
+ {
+ for (i = 0; i < numFilters; i++)
+ if (p->ids[i] != block->filters[numFilters - 1 - i].id)
+ break;
+ needReInit = (i != numFilters);
+ }
+ if (needReInit)
+ {
+ MixCoder_Free(p);
+ p->numCoders = numFilters;
+ for (i = 0; i < numFilters; i++)
+ {
+ const CXzFilter *f = &block->filters[numFilters - 1 - i];
+ RINOK(MixCoder_SetFromMethod(p, i, f->id));
+ }
+ }
+ for (i = 0; i < numFilters; i++)
+ {
+ const CXzFilter *f = &block->filters[numFilters - 1 - i];
+ IStateCoder *sc = &p->coders[i];
+ RINOK(sc->SetProps(sc->p, f->props, f->propsSize, p->alloc));
+ }
+ MixCoder_Init(p);
+ return SZ_OK;
+}
+
+SRes XzUnpacker_Create(CXzUnpacker *p, ISzAlloc *alloc)
+{
+ MixCoder_Construct(&p->decoder, alloc);
+ p->state = XZ_STATE_STREAM_HEADER;
+ p->pos = 0;
+ p->numStreams = 0;
+ return SZ_OK;
+}
+
+void XzUnpacker_Free(CXzUnpacker *p)
+{
+ MixCoder_Free(&p->decoder);
+}
+
+SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen,
+ const Byte *src, SizeT *srcLen, int finishMode, ECoderStatus *status)
+{
+ SizeT destLenOrig = *destLen;
+ SizeT srcLenOrig = *srcLen;
+ *destLen = 0;
+ *srcLen = 0;
+ *status = CODER_STATUS_NOT_SPECIFIED;
+ for (;;)
+ {
+ SizeT srcRem = srcLenOrig - *srcLen;
+
+ if (p->state == XZ_STATE_BLOCK)
+ {
+ SizeT destLen2 = destLenOrig - *destLen;
+ SizeT srcLen2 = srcLenOrig - *srcLen;
+ SRes res;
+ if (srcLen2 == 0 && destLen2 == 0)
+ {
+ *status = CODER_STATUS_NOT_FINISHED;
+ return SZ_OK;
+ }
+
+ res = MixCoder_Code(&p->decoder, dest, &destLen2, src, &srcLen2, False, finishMode, status);
+ XzCheck_Update(&p->check, dest, destLen2);
+
+ (*srcLen) += srcLen2;
+ src += srcLen2;
+ p->packSize += srcLen2;
+
+ (*destLen) += destLen2;
+ dest += destLen2;
+ p->unpackSize += destLen2;
+
+ RINOK(res);
+
+ if (*status == CODER_STATUS_FINISHED_WITH_MARK)
+ {
+ Byte temp[32];
+ unsigned num = Xz_WriteVarInt(temp, p->packSize + p->blockHeaderSize + XzFlags_GetCheckSize(p->streamFlags));
+ num += Xz_WriteVarInt(temp + num, p->unpackSize);
+ Sha256_Update(&p->sha, temp, num);
+ p->indexSize += num;
+ p->numBlocks++;
+
+ p->state = XZ_STATE_BLOCK_FOOTER;
+ p->pos = 0;
+ p->alignPos = 0;
+ }
+ else if (srcLen2 == 0 && destLen2 == 0)
+ return SZ_OK;
+
+ continue;
+ }
+
+ if (srcRem == 0)
+ {
+ *status = CODER_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+
+ switch(p->state)
+ {
+ case XZ_STATE_STREAM_HEADER:
+ {
+ if (p->pos < XZ_STREAM_HEADER_SIZE)
+ {
+ if (p->pos < XZ_SIG_SIZE && *src != XZ_SIG[p->pos])
+ return SZ_ERROR_NO_ARCHIVE;
+ p->buf[p->pos++] = *src++;
+ (*srcLen)++;
+ }
+ else
+ {
+ RINOK(Xz_ParseHeader(&p->streamFlags, p->buf));
+ p->state = XZ_STATE_BLOCK_HEADER;
+ Sha256_Init(&p->sha);
+ p->indexSize = 0;
+ p->numBlocks = 0;
+ p->pos = 0;
+ }
+ break;
+ }
+
+ case XZ_STATE_BLOCK_HEADER:
+ {
+ if (p->pos == 0)
+ {
+ p->buf[p->pos++] = *src++;
+ (*srcLen)++;
+ if (p->buf[0] == 0)
+ {
+ p->indexPreSize = 1 + Xz_WriteVarInt(p->buf + 1, p->numBlocks);
+ p->indexPos = p->indexPreSize;
+ p->indexSize += p->indexPreSize;
+ Sha256_Final(&p->sha, p->shaDigest);
+ Sha256_Init(&p->sha);
+ p->crc = CrcUpdate(CRC_INIT_VAL, p->buf, p->indexPreSize);
+ p->state = XZ_STATE_STREAM_INDEX;
+ }
+ p->blockHeaderSize = ((UInt32)p->buf[0] << 2) + 4;
+ }
+ else if (p->pos != p->blockHeaderSize)
+ {
+ UInt32 cur = p->blockHeaderSize - p->pos;
+ if (cur > srcRem)
+ cur = (UInt32)srcRem;
+ memcpy(p->buf + p->pos, src, cur);
+ p->pos += cur;
+ (*srcLen) += cur;
+ src += cur;
+ }
+ else
+ {
+ RINOK(XzBlock_Parse(&p->block, p->buf));
+ p->state = XZ_STATE_BLOCK;
+ p->packSize = 0;
+ p->unpackSize = 0;
+ XzCheck_Init(&p->check, XzFlags_GetCheckType(p->streamFlags));
+ RINOK(XzDec_Init(&p->decoder, &p->block));
+ }
+ break;
+ }
+
+ case XZ_STATE_BLOCK_FOOTER:
+ {
+ if (((p->packSize + p->alignPos) & 3) != 0)
+ {
+ (*srcLen)++;
+ p->alignPos++;
+ if (*src++ != 0)
+ return SZ_ERROR_CRC;
+ }
+ else
+ {
+ UInt32 checkSize = XzFlags_GetCheckSize(p->streamFlags);
+ UInt32 cur = checkSize - p->pos;
+ if (cur != 0)
+ {
+ if (cur > srcRem)
+ cur = (UInt32)srcRem;
+ memcpy(p->buf + p->pos, src, cur);
+ p->pos += cur;
+ (*srcLen) += cur;
+ src += cur;
+ }
+ else
+ {
+ Byte digest[XZ_CHECK_SIZE_MAX];
+ p->state = XZ_STATE_BLOCK_HEADER;
+ p->pos = 0;
+ if (XzCheck_Final(&p->check, digest) && memcmp(digest, p->buf, checkSize) != 0)
+ return SZ_ERROR_CRC;
+ }
+ }
+ break;
+ }
+
+ case XZ_STATE_STREAM_INDEX:
+ {
+ if (p->pos < p->indexPreSize)
+ {
+ (*srcLen)++;
+ if (*src++ != p->buf[p->pos++])
+ return SZ_ERROR_CRC;
+ }
+ else
+ {
+ if (p->indexPos < p->indexSize)
+ {
+ UInt64 cur = p->indexSize - p->indexPos;
+ if (srcRem > cur)
+ srcRem = (SizeT)cur;
+ p->crc = CrcUpdate(p->crc, src, srcRem);
+ Sha256_Update(&p->sha, src, srcRem);
+ (*srcLen) += srcRem;
+ src += srcRem;
+ p->indexPos += srcRem;
+ }
+ else if ((p->indexPos & 3) != 0)
+ {
+ Byte b = *src++;
+ p->crc = CRC_UPDATE_BYTE(p->crc, b);
+ (*srcLen)++;
+ p->indexPos++;
+ p->indexSize++;
+ if (b != 0)
+ return SZ_ERROR_CRC;
+ }
+ else
+ {
+ Byte digest[SHA256_DIGEST_SIZE];
+ p->state = XZ_STATE_STREAM_INDEX_CRC;
+ p->indexSize += 4;
+ p->pos = 0;
+ Sha256_Final(&p->sha, digest);
+ if (memcmp(digest, p->shaDigest, SHA256_DIGEST_SIZE) != 0)
+ return SZ_ERROR_CRC;
+ }
+ }
+ break;
+ }
+
+ case XZ_STATE_STREAM_INDEX_CRC:
+ {
+ if (p->pos < 4)
+ {
+ (*srcLen)++;
+ p->buf[p->pos++] = *src++;
+ }
+ else
+ {
+ p->state = XZ_STATE_STREAM_FOOTER;
+ p->pos = 0;
+ if (CRC_GET_DIGEST(p->crc) != GetUi32(p->buf))
+ return SZ_ERROR_CRC;
+ }
+ break;
+ }
+
+ case XZ_STATE_STREAM_FOOTER:
+ {
+ UInt32 cur = XZ_STREAM_FOOTER_SIZE - p->pos;
+ if (cur > srcRem)
+ cur = (UInt32)srcRem;
+ memcpy(p->buf + p->pos, src, cur);
+ p->pos += cur;
+ (*srcLen) += cur;
+ src += cur;
+ if (p->pos == XZ_STREAM_FOOTER_SIZE)
+ {
+ p->state = XZ_STATE_STREAM_PADDING;
+ p->numStreams++;
+ p->padSize = 0;
+ if (!Xz_CheckFooter(p->streamFlags, p->indexSize, p->buf))
+ return SZ_ERROR_CRC;
+ }
+ break;
+ }
+
+ case XZ_STATE_STREAM_PADDING:
+ {
+ if (*src != 0)
+ {
+ if (((UInt32)p->padSize & 3) != 0)
+ return SZ_ERROR_NO_ARCHIVE;
+ p->pos = 0;
+ p->state = XZ_STATE_STREAM_HEADER;
+ }
+ else
+ {
+ (*srcLen)++;
+ src++;
+ p->padSize++;
+ }
+ break;
+ }
+
+ case XZ_STATE_BLOCK: break; /* to disable GCC warning */
+ }
+ }
+ /*
+ if (p->state == XZ_STATE_FINISHED)
+ *status = CODER_STATUS_FINISHED_WITH_MARK;
+ return SZ_OK;
+ */
+}
+
+Bool XzUnpacker_IsStreamWasFinished(CXzUnpacker *p)
+{
+ return (p->state == XZ_STATE_STREAM_PADDING) && (((UInt32)p->padSize & 3) == 0);
+}
diff --git a/src/libs/7zip/unix/C/XzEnc.c b/src/libs/7zip/unix/C/XzEnc.c
new file mode 100644
index 000000000..721b4e765
--- /dev/null
+++ b/src/libs/7zip/unix/C/XzEnc.c
@@ -0,0 +1,497 @@
+/* XzEnc.c -- Xz Encode
+2009-06-04 : Igor Pavlov : Public domain */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "7zCrc.h"
+#include "Alloc.h"
+#include "Bra.h"
+#include "CpuArch.h"
+#ifdef USE_SUBBLOCK
+#include "SbEnc.h"
+#endif
+
+#include "XzEnc.h"
+
+static void *SzBigAlloc(void *p, size_t size) { p = p; return BigAlloc(size); }
+static void SzBigFree(void *p, void *address) { p = p; BigFree(address); }
+static ISzAlloc g_BigAlloc = { SzBigAlloc, SzBigFree };
+
+static void *SzAlloc(void *p, size_t size) { p = p; return MyAlloc(size); }
+static void SzFree(void *p, void *address) { p = p; MyFree(address); }
+static ISzAlloc g_Alloc = { SzAlloc, SzFree };
+
+#define XzBlock_ClearFlags(p) (p)->flags = 0;
+#define XzBlock_SetNumFilters(p, n) (p)->flags |= ((n) - 1);
+#define XzBlock_SetHasPackSize(p) (p)->flags |= XZ_BF_PACK_SIZE;
+#define XzBlock_SetHasUnpackSize(p) (p)->flags |= XZ_BF_UNPACK_SIZE;
+
+static SRes WriteBytes(ISeqOutStream *s, const void *buf, UInt32 size)
+{
+ return (s->Write(s, buf, size) == size) ? SZ_OK : SZ_ERROR_WRITE;
+}
+
+static SRes WriteBytesAndCrc(ISeqOutStream *s, const void *buf, UInt32 size, UInt32 *crc)
+{
+ *crc = CrcUpdate(*crc, buf, size);
+ return WriteBytes(s, buf, size);
+}
+
+SRes Xz_WriteHeader(CXzStreamFlags f, ISeqOutStream *s)
+{
+ UInt32 crc;
+ Byte header[XZ_STREAM_HEADER_SIZE];
+ memcpy(header, XZ_SIG, XZ_SIG_SIZE);
+ header[XZ_SIG_SIZE] = (Byte)(f >> 8);
+ header[XZ_SIG_SIZE + 1] = (Byte)(f & 0xFF);
+ crc = CrcCalc(header + XZ_SIG_SIZE, XZ_STREAM_FLAGS_SIZE);
+ SetUi32(header + XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE, crc);
+ return WriteBytes(s, header, XZ_STREAM_HEADER_SIZE);
+}
+
+SRes XzBlock_WriteHeader(const CXzBlock *p, ISeqOutStream *s)
+{
+ Byte header[XZ_BLOCK_HEADER_SIZE_MAX];
+
+ unsigned pos = 1;
+ int numFilters, i;
+ header[pos++] = p->flags;
+
+ if (XzBlock_HasPackSize(p)) pos += Xz_WriteVarInt(header + pos, p->packSize);
+ if (XzBlock_HasUnpackSize(p)) pos += Xz_WriteVarInt(header + pos, p->unpackSize);
+ numFilters = XzBlock_GetNumFilters(p);
+ for (i = 0; i < numFilters; i++)
+ {
+ const CXzFilter *f = &p->filters[i];
+ pos += Xz_WriteVarInt(header + pos, f->id);
+ pos += Xz_WriteVarInt(header + pos, f->propsSize);
+ memcpy(header + pos, f->props, f->propsSize);
+ pos += f->propsSize;
+ }
+ while((pos & 3) != 0)
+ header[pos++] = 0;
+ header[0] = (Byte)(pos >> 2);
+ SetUi32(header + pos, CrcCalc(header, pos));
+ return WriteBytes(s, header, pos + 4);
+}
+
+SRes Xz_WriteFooter(CXzStream *p, ISeqOutStream *s)
+{
+ Byte buf[32];
+ UInt64 globalPos;
+ {
+ UInt32 crc = CRC_INIT_VAL;
+ unsigned pos = 1 + Xz_WriteVarInt(buf + 1, p->numBlocks);
+ size_t i;
+
+ globalPos = pos;
+ buf[0] = 0;
+ RINOK(WriteBytesAndCrc(s, buf, pos, &crc));
+ for (i = 0; i < p->numBlocks; i++)
+ {
+ const CXzBlockSizes *block = &p->blocks[i];
+ pos = Xz_WriteVarInt(buf, block->totalSize);
+ pos += Xz_WriteVarInt(buf + pos, block->unpackSize);
+ globalPos += pos;
+ RINOK(WriteBytesAndCrc(s, buf, pos, &crc));
+ }
+ pos = ((unsigned)globalPos & 3);
+ if (pos != 0)
+ {
+ buf[0] = buf[1] = buf[2] = 0;
+ RINOK(WriteBytesAndCrc(s, buf, 4 - pos, &crc));
+ globalPos += 4 - pos;
+ }
+ {
+ SetUi32(buf, CRC_GET_DIGEST(crc));
+ RINOK(WriteBytes(s, buf, 4));
+ globalPos += 4;
+ }
+ }
+
+ {
+ UInt32 indexSize = (UInt32)((globalPos >> 2) - 1);
+ SetUi32(buf + 4, indexSize);
+ buf[8] = (Byte)(p->flags >> 8);
+ buf[9] = (Byte)(p->flags & 0xFF);
+ SetUi32(buf, CrcCalc(buf + 4, 6));
+ memcpy(buf + 10, XZ_FOOTER_SIG, XZ_FOOTER_SIG_SIZE);
+ return WriteBytes(s, buf, 12);
+ }
+}
+
+SRes Xz_AddIndexRecord(CXzStream *p, UInt64 unpackSize, UInt64 totalSize, ISzAlloc *alloc)
+{
+ if (p->blocks == 0 || p->numBlocksAllocated == p->numBlocks)
+ {
+ size_t num = (p->numBlocks + 1) * 2;
+ size_t newSize = sizeof(CXzBlockSizes) * num;
+ CXzBlockSizes *blocks;
+ if (newSize / sizeof(CXzBlockSizes) != num)
+ return SZ_ERROR_MEM;
+ blocks = alloc->Alloc(alloc, newSize);
+ if (blocks == 0)
+ return SZ_ERROR_MEM;
+ if (p->numBlocks != 0)
+ {
+ memcpy(blocks, p->blocks, p->numBlocks * sizeof(CXzBlockSizes));
+ Xz_Free(p, alloc);
+ }
+ p->blocks = blocks;
+ p->numBlocksAllocated = num;
+ }
+ {
+ CXzBlockSizes *block = &p->blocks[p->numBlocks++];
+ block->totalSize = totalSize;
+ block->unpackSize = unpackSize;
+ }
+ return SZ_OK;
+}
+
+/* ---------- CSeqCheckInStream ---------- */
+
+typedef struct
+{
+ ISeqInStream p;
+ ISeqInStream *realStream;
+ UInt64 processed;
+ CXzCheck check;
+} CSeqCheckInStream;
+
+void SeqCheckInStream_Init(CSeqCheckInStream *p, int mode)
+{
+ p->processed = 0;
+ XzCheck_Init(&p->check, mode);
+}
+
+void SeqCheckInStream_GetDigest(CSeqCheckInStream *p, Byte *digest)
+{
+ XzCheck_Final(&p->check, digest);
+}
+
+static SRes SeqCheckInStream_Read(void *pp, void *data, size_t *size)
+{
+ CSeqCheckInStream *p = (CSeqCheckInStream *)pp;
+ SRes res = p->realStream->Read(p->realStream, data, size);
+ XzCheck_Update(&p->check, data, *size);
+ p->processed += *size;
+ return res;
+}
+
+/* ---------- CSeqSizeOutStream ---------- */
+
+typedef struct
+{
+ ISeqOutStream p;
+ ISeqOutStream *realStream;
+ UInt64 processed;
+} CSeqSizeOutStream;
+
+static size_t MyWrite(void *pp, const void *data, size_t size)
+{
+ CSeqSizeOutStream *p = (CSeqSizeOutStream *)pp;
+ size = p->realStream->Write(p->realStream, data, size);
+ p->processed += size;
+ return size;
+}
+
+/* ---------- CSeqInFilter ---------- */
+
+/*
+typedef struct _IFilter
+{
+ void *p;
+ void (*Free)(void *p, ISzAlloc *alloc);
+ SRes (*SetProps)(void *p, const Byte *props, size_t propSize, ISzAlloc *alloc);
+ void (*Init)(void *p);
+ size_t (*Filter)(void *p, Byte *data, SizeT destLen);
+} IFilter;
+
+#define FILT_BUF_SIZE (1 << 19)
+
+typedef struct
+{
+ ISeqInStream p;
+ ISeqInStream *realStream;
+ UInt32 x86State;
+ UInt32 ip;
+ UInt64 processed;
+ CXzCheck check;
+ Byte buf[FILT_BUF_SIZE];
+ UInt32 bufferPos;
+ UInt32 convertedPosBegin;
+ UInt32 convertedPosEnd;
+ IFilter *filter;
+} CSeqInFilter;
+
+static SRes SeqInFilter_Read(void *pp, void *data, size_t *size)
+{
+ CSeqInFilter *p = (CSeqInFilter *)pp;
+ size_t remSize = *size;
+ *size = 0;
+
+ while (remSize > 0)
+ {
+ int i;
+ if (p->convertedPosBegin != p->convertedPosEnd)
+ {
+ UInt32 sizeTemp = p->convertedPosEnd - p->convertedPosBegin;
+ if (remSize < sizeTemp)
+ sizeTemp = (UInt32)remSize;
+ memmove(data, p->buf + p->convertedPosBegin, sizeTemp);
+ p->convertedPosBegin += sizeTemp;
+ data = (void *)((Byte *)data + sizeTemp);
+ remSize -= sizeTemp;
+ *size += sizeTemp;
+ break;
+ }
+ for (i = 0; p->convertedPosEnd + i < p->bufferPos; i++)
+ p->buf[i] = p->buf[i + p->convertedPosEnd];
+ p->bufferPos = i;
+ p->convertedPosBegin = p->convertedPosEnd = 0;
+ {
+ size_t processedSizeTemp = FILT_BUF_SIZE - p->bufferPos;
+ RINOK(p->realStream->Read(p->realStream, p->buf + p->bufferPos, &processedSizeTemp));
+ p->bufferPos = p->bufferPos + (UInt32)processedSizeTemp;
+ }
+ p->convertedPosEnd = (UInt32)p->filter->Filter(p->filter->p, p->buf, p->bufferPos);
+ if (p->convertedPosEnd == 0)
+ {
+ if (p->bufferPos == 0)
+ break;
+ else
+ {
+ p->convertedPosEnd = p->bufferPos;
+ continue;
+ }
+ }
+ if (p->convertedPosEnd > p->bufferPos)
+ {
+ for (; p->bufferPos < p->convertedPosEnd; p->bufferPos++)
+ p->buf[p->bufferPos] = 0;
+ p->convertedPosEnd = (UInt32)p->filter->Filter(p->filter->p, p->buf, p->bufferPos);
+ }
+ }
+ return SZ_OK;
+}
+*/
+
+/*
+typedef struct
+{
+ ISeqInStream p;
+ ISeqInStream *realStream;
+ CMixCoder mixCoder;
+ Byte buf[FILT_BUF_SIZE];
+ UInt32 bufPos;
+ UInt32 bufSize;
+} CMixCoderSeqInStream;
+
+static SRes CMixCoderSeqInStream_Read(void *pp, void *data, size_t *size)
+{
+ CMixCoderSeqInStream *p = (CMixCoderSeqInStream *)pp;
+ SRes res = SZ_OK;
+ size_t remSize = *size;
+ *size = 0;
+ while (remSize > 0)
+ {
+ if (p->bufPos == p->bufSize)
+ {
+ size_t curSize;
+ p->bufPos = p->bufSize = 0;
+ if (*size != 0)
+ break;
+ curSize = FILT_BUF_SIZE;
+ RINOK(p->realStream->Read(p->realStream, p->buf, &curSize));
+ p->bufSize = (UInt32)curSize;
+ }
+ {
+ SizeT destLen = remSize;
+ SizeT srcLen = p->bufSize - p->bufPos;
+ res = MixCoder_Code(&p->mixCoder, data, &destLen, p->buf + p->bufPos, &srcLen, 0);
+ data = (void *)((Byte *)data + destLen);
+ remSize -= destLen;
+ *size += destLen;
+ p->bufPos += srcLen;
+ }
+ }
+ return res;
+}
+*/
+
+#ifdef USE_SUBBLOCK
+typedef struct
+{
+ ISeqInStream p;
+ CSubblockEnc sb;
+ UInt64 processed;
+} CSbEncInStream;
+
+void SbEncInStream_Init(CSbEncInStream *p)
+{
+ p->processed = 0;
+ SubblockEnc_Init(&p->sb);
+}
+
+static SRes SbEncInStream_Read(void *pp, void *data, size_t *size)
+{
+ CSbEncInStream *p = (CSbEncInStream *)pp;
+ SRes res = SubblockEnc_Read(&p->sb, data, size);
+ p->processed += *size;
+ return res;
+}
+#endif
+
+typedef struct
+{
+ /* CMixCoderSeqInStream inStream; */
+ CLzma2EncHandle lzma2;
+ #ifdef USE_SUBBLOCK
+ CSbEncInStream sb;
+ #endif
+ ISzAlloc *alloc;
+ ISzAlloc *bigAlloc;
+} CLzma2WithFilters;
+
+
+static void Lzma2WithFilters_Construct(CLzma2WithFilters *p, ISzAlloc *alloc, ISzAlloc *bigAlloc)
+{
+ p->alloc = alloc;
+ p->bigAlloc = bigAlloc;
+ p->lzma2 = NULL;
+ #ifdef USE_SUBBLOCK
+ p->sb.p.Read = SbEncInStream_Read;
+ SubblockEnc_Construct(&p->sb.sb, p->alloc);
+ #endif
+}
+
+static SRes Lzma2WithFilters_Create(CLzma2WithFilters *p)
+{
+ p->lzma2 = Lzma2Enc_Create(p->alloc, p->bigAlloc);
+ if (p->lzma2 == 0)
+ return SZ_ERROR_MEM;
+ return SZ_OK;
+}
+
+static void Lzma2WithFilters_Free(CLzma2WithFilters *p)
+{
+ #ifdef USE_SUBBLOCK
+ SubblockEnc_Free(&p->sb.sb);
+ #endif
+ if (p->lzma2)
+ {
+ Lzma2Enc_Destroy(p->lzma2);
+ p->lzma2 = NULL;
+ }
+}
+
+static SRes Xz_Compress(CXzStream *xz,
+ CLzma2WithFilters *lzmaf,
+ ISeqOutStream *outStream,
+ ISeqInStream *inStream,
+ const CLzma2EncProps *lzma2Props,
+ Bool useSubblock,
+ ICompressProgress *progress)
+{
+ xz->flags = XZ_CHECK_CRC32;
+
+ RINOK(Lzma2Enc_SetProps(lzmaf->lzma2, lzma2Props));
+ RINOK(Xz_WriteHeader(xz->flags, outStream));
+
+ {
+ CSeqCheckInStream checkInStream;
+ CSeqSizeOutStream seqSizeOutStream;
+ CXzBlock block;
+ int filterIndex = 0;
+
+ XzBlock_ClearFlags(&block);
+ XzBlock_SetNumFilters(&block, 1 + (useSubblock ? 1 : 0));
+
+ if (useSubblock)
+ {
+ CXzFilter *f = &block.filters[filterIndex++];
+ f->id = XZ_ID_Subblock;
+ f->propsSize = 0;
+ }
+
+ {
+ CXzFilter *f = &block.filters[filterIndex++];
+ f->id = XZ_ID_LZMA2;
+ f->propsSize = 1;
+ f->props[0] = Lzma2Enc_WriteProperties(lzmaf->lzma2);
+ }
+
+ seqSizeOutStream.p.Write = MyWrite;
+ seqSizeOutStream.realStream = outStream;
+ seqSizeOutStream.processed = 0;
+
+ RINOK(XzBlock_WriteHeader(&block, &seqSizeOutStream.p));
+
+ checkInStream.p.Read = SeqCheckInStream_Read;
+ checkInStream.realStream = inStream;
+ SeqCheckInStream_Init(&checkInStream, XzFlags_GetCheckType(xz->flags));
+
+ #ifdef USE_SUBBLOCK
+ if (useSubblock)
+ {
+ lzmaf->sb.sb.inStream = &checkInStream.p;
+ SubblockEnc_Init(&lzmaf->sb.sb);
+ }
+ #endif
+
+ {
+ UInt64 packPos = seqSizeOutStream.processed;
+ SRes res = Lzma2Enc_Encode(lzmaf->lzma2, &seqSizeOutStream.p,
+ #ifdef USE_SUBBLOCK
+ useSubblock ? &lzmaf->sb.p:
+ #endif
+ &checkInStream.p,
+ progress);
+ RINOK(res);
+ block.unpackSize = checkInStream.processed;
+ block.packSize = seqSizeOutStream.processed - packPos;
+ }
+
+ {
+ unsigned padSize = 0;
+ Byte buf[128];
+ while((((unsigned)block.packSize + padSize) & 3) != 0)
+ buf[padSize++] = 0;
+ SeqCheckInStream_GetDigest(&checkInStream, buf + padSize);
+ RINOK(WriteBytes(&seqSizeOutStream.p, buf, padSize + XzFlags_GetCheckSize(xz->flags)));
+ RINOK(Xz_AddIndexRecord(xz, block.unpackSize, seqSizeOutStream.processed - padSize, &g_Alloc));
+ }
+ }
+ return Xz_WriteFooter(xz, outStream);
+}
+
+SRes Xz_Encode(ISeqOutStream *outStream, ISeqInStream *inStream,
+ const CLzma2EncProps *lzma2Props, Bool useSubblock,
+ ICompressProgress *progress)
+{
+ SRes res;
+ CXzStream xz;
+ CLzma2WithFilters lzmaf;
+ Xz_Construct(&xz);
+ Lzma2WithFilters_Construct(&lzmaf, &g_Alloc, &g_BigAlloc);
+ res = Lzma2WithFilters_Create(&lzmaf);
+ if (res == SZ_OK)
+ res = Xz_Compress(&xz, &lzmaf, outStream, inStream,
+ lzma2Props, useSubblock, progress);
+ Lzma2WithFilters_Free(&lzmaf);
+ Xz_Free(&xz, &g_Alloc);
+ return res;
+}
+
+SRes Xz_EncodeEmpty(ISeqOutStream *outStream)
+{
+ SRes res;
+ CXzStream xz;
+ Xz_Construct(&xz);
+ res = Xz_WriteHeader(xz.flags, outStream);
+ if (res == SZ_OK)
+ res = Xz_WriteFooter(&xz, outStream);
+ Xz_Free(&xz, &g_Alloc);
+ return res;
+}
diff --git a/src/libs/7zip/unix/C/XzEnc.h b/src/libs/7zip/unix/C/XzEnc.h
new file mode 100644
index 000000000..13390df8b
--- /dev/null
+++ b/src/libs/7zip/unix/C/XzEnc.h
@@ -0,0 +1,25 @@
+/* XzEnc.h -- Xz Encode
+2009-04-15 : Igor Pavlov : Public domain */
+
+#ifndef __XZ_ENC_H
+#define __XZ_ENC_H
+
+#include "Lzma2Enc.h"
+
+#include "Xz.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+SRes Xz_Encode(ISeqOutStream *outStream, ISeqInStream *inStream,
+ const CLzma2EncProps *lzma2Props, Bool useSubblock,
+ ICompressProgress *progress);
+
+SRes Xz_EncodeEmpty(ISeqOutStream *outStream);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/libs/7zip/unix/C/XzIn.c b/src/libs/7zip/unix/C/XzIn.c
new file mode 100644
index 000000000..f8ea86315
--- /dev/null
+++ b/src/libs/7zip/unix/C/XzIn.c
@@ -0,0 +1,306 @@
+/* XzIn.c - Xz input
+2009-06-19 : Igor Pavlov : Public domain */
+
+#include <string.h>
+
+#include "7zCrc.h"
+#include "CpuArch.h"
+#include "Xz.h"
+
+SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStream *inStream)
+{
+ Byte sig[XZ_STREAM_HEADER_SIZE];
+ RINOK(SeqInStream_Read2(inStream, sig, XZ_STREAM_HEADER_SIZE, SZ_ERROR_NO_ARCHIVE));
+ if (memcmp(sig, XZ_SIG, XZ_SIG_SIZE) != 0)
+ return SZ_ERROR_NO_ARCHIVE;
+ return Xz_ParseHeader(p, sig);
+}
+
+#define READ_VARINT_AND_CHECK(buf, pos, size, res) \
+ { unsigned s = Xz_ReadVarInt(buf + pos, size - pos, res); \
+ if (s == 0) return SZ_ERROR_ARCHIVE; pos += s; }
+
+SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStream *inStream, Bool *isIndex, UInt32 *headerSizeRes)
+{
+ Byte header[XZ_BLOCK_HEADER_SIZE_MAX];
+ unsigned headerSize;
+ *headerSizeRes = 0;
+ RINOK(SeqInStream_ReadByte(inStream, &header[0]));
+ headerSize = ((unsigned)header[0] << 2) + 4;
+ if (headerSize == 0)
+ {
+ *headerSizeRes = 1;
+ *isIndex = True;
+ return SZ_OK;
+ }
+
+ *isIndex = False;
+ *headerSizeRes = headerSize;
+ RINOK(SeqInStream_Read(inStream, header + 1, headerSize - 1));
+ return XzBlock_Parse(p, header);
+}
+
+#define ADD_SIZE_CHECH(size, val) \
+ { UInt64 newSize = size + (val); if (newSize < size) return XZ_SIZE_OVERFLOW; size = newSize; }
+
+UInt64 Xz_GetUnpackSize(const CXzStream *p)
+{
+ UInt64 size = 0;
+ size_t i;
+ for (i = 0; i < p->numBlocks; i++)
+ ADD_SIZE_CHECH(size, p->blocks[i].unpackSize);
+ return size;
+}
+
+UInt64 Xz_GetPackSize(const CXzStream *p)
+{
+ UInt64 size = 0;
+ size_t i;
+ for (i = 0; i < p->numBlocks; i++)
+ ADD_SIZE_CHECH(size, (p->blocks[i].totalSize + 3) & ~(UInt64)3);
+ return size;
+}
+
+/*
+SRes XzBlock_ReadFooter(CXzBlock *p, CXzStreamFlags f, ISeqInStream *inStream)
+{
+ return SeqInStream_Read(inStream, p->check, XzFlags_GetCheckSize(f));
+}
+*/
+
+static SRes Xz_ReadIndex2(CXzStream *p, const Byte *buf, size_t size, ISzAlloc *alloc)
+{
+ size_t i, numBlocks, crcStartPos, pos = 1;
+ UInt32 crc;
+
+ if (size < 5 || buf[0] != 0)
+ return SZ_ERROR_ARCHIVE;
+
+ size -= 4;
+ crc = CrcCalc(buf, size);
+ if (crc != GetUi32(buf + size))
+ return SZ_ERROR_ARCHIVE;
+
+ {
+ UInt64 numBlocks64;
+ READ_VARINT_AND_CHECK(buf, pos, size, &numBlocks64);
+ numBlocks = (size_t)numBlocks64;
+ if (numBlocks != numBlocks64 || numBlocks * 2 > size)
+ return SZ_ERROR_ARCHIVE;
+ }
+
+ crcStartPos = pos;
+ Xz_Free(p, alloc);
+ if (numBlocks != 0)
+ {
+ p->numBlocks = numBlocks;
+ p->numBlocksAllocated = numBlocks;
+ p->blocks = alloc->Alloc(alloc, sizeof(CXzBlockSizes) * numBlocks);
+ if (p->blocks == 0)
+ return SZ_ERROR_MEM;
+ for (i = 0; i < numBlocks; i++)
+ {
+ CXzBlockSizes *block = &p->blocks[i];
+ READ_VARINT_AND_CHECK(buf, pos, size, &block->totalSize);
+ READ_VARINT_AND_CHECK(buf, pos, size, &block->unpackSize);
+ if (block->totalSize == 0)
+ return SZ_ERROR_ARCHIVE;
+ }
+ }
+ while ((pos & 3) != 0)
+ if (buf[pos++] != 0)
+ return SZ_ERROR_ARCHIVE;
+ return (pos == size) ? SZ_OK : SZ_ERROR_ARCHIVE;
+}
+
+static SRes Xz_ReadIndex(CXzStream *p, ILookInStream *stream, UInt64 indexSize, ISzAlloc *alloc)
+{
+ SRes res;
+ size_t size;
+ Byte *buf;
+ if (indexSize > ((UInt32)1 << 31))
+ return SZ_ERROR_UNSUPPORTED;
+ size = (size_t)indexSize;
+ if (size != indexSize)
+ return SZ_ERROR_UNSUPPORTED;
+ buf = alloc->Alloc(alloc, size);
+ if (buf == 0)
+ return SZ_ERROR_MEM;
+ res = LookInStream_Read2(stream, buf, size, SZ_ERROR_UNSUPPORTED);
+ if (res == SZ_OK)
+ res = Xz_ReadIndex2(p, buf, size, alloc);
+ alloc->Free(alloc, buf);
+ return res;
+}
+
+static SRes SeekFromCur(ILookInStream *inStream, Int64 *res)
+{
+ return inStream->Seek(inStream, res, SZ_SEEK_CUR);
+}
+
+static SRes Xz_ReadBackward(CXzStream *p, ILookInStream *stream, Int64 *startOffset, ISzAlloc *alloc)
+{
+ UInt64 indexSize;
+ Byte buf[XZ_STREAM_FOOTER_SIZE];
+
+ if ((*startOffset & 3) != 0 || *startOffset < XZ_STREAM_FOOTER_SIZE)
+ return SZ_ERROR_NO_ARCHIVE;
+ *startOffset = -XZ_STREAM_FOOTER_SIZE;
+ RINOK(SeekFromCur(stream, startOffset));
+
+ RINOK(LookInStream_Read2(stream, buf, XZ_STREAM_FOOTER_SIZE, SZ_ERROR_NO_ARCHIVE));
+
+ if (memcmp(buf + 10, XZ_FOOTER_SIG, XZ_FOOTER_SIG_SIZE) != 0)
+ {
+ Int64 i = 0;
+ *startOffset += XZ_STREAM_FOOTER_SIZE;
+ for (;;)
+ {
+ int j;
+ size_t processedSize;
+ #define TEMP_BUF_SIZE (1 << 10)
+ Byte tempBuf[TEMP_BUF_SIZE];
+ if (*startOffset < XZ_STREAM_FOOTER_SIZE || i > (1 << 16))
+ return SZ_ERROR_NO_ARCHIVE;
+ processedSize = (*startOffset > TEMP_BUF_SIZE) ? TEMP_BUF_SIZE : (size_t)*startOffset;
+ i += processedSize;
+ *startOffset = -(Int64)processedSize;
+ RINOK(SeekFromCur(stream, startOffset));
+ RINOK(LookInStream_Read2(stream, tempBuf, processedSize, SZ_ERROR_NO_ARCHIVE));
+ for (j = (int)processedSize; j >= 1; j--) // FIXED j >= 0 => j >= 1
+ if (tempBuf[j -1] != 0)
+ break;
+ if (j != 0)
+ {
+ if ((j & 3) != 0)
+ return SZ_ERROR_NO_ARCHIVE;
+ *startOffset += j;
+ if (*startOffset < XZ_STREAM_FOOTER_SIZE)
+ return SZ_ERROR_NO_ARCHIVE;
+ *startOffset -= XZ_STREAM_FOOTER_SIZE;
+ RINOK(stream->Seek(stream, startOffset, SZ_SEEK_SET));
+ RINOK(LookInStream_Read2(stream, buf, XZ_STREAM_FOOTER_SIZE, SZ_ERROR_NO_ARCHIVE));
+ if (memcmp(buf + 10, XZ_FOOTER_SIG, XZ_FOOTER_SIG_SIZE) != 0)
+ return SZ_ERROR_NO_ARCHIVE;
+ break;
+ }
+ }
+ }
+
+ p->flags = (CXzStreamFlags)GetBe16(buf + 8);
+
+ if (!XzFlags_IsSupported(p->flags))
+ return SZ_ERROR_UNSUPPORTED;
+
+ if (GetUi32(buf) != CrcCalc(buf + 4, 6))
+ return SZ_ERROR_ARCHIVE;
+
+ indexSize = ((UInt64)GetUi32(buf + 4) + 1) << 2;
+
+ *startOffset = -(Int64)(indexSize + XZ_STREAM_FOOTER_SIZE);
+ RINOK(SeekFromCur(stream, startOffset));
+
+ RINOK(Xz_ReadIndex(p, stream, indexSize, alloc));
+
+ {
+ UInt64 totalSize = Xz_GetPackSize(p);
+ UInt64 sum = XZ_STREAM_HEADER_SIZE + totalSize + indexSize;
+ if (totalSize == XZ_SIZE_OVERFLOW ||
+ sum >= ((UInt64)1 << 63) ||
+ totalSize >= ((UInt64)1 << 63))
+ return SZ_ERROR_ARCHIVE;
+ *startOffset = -(Int64)sum;
+ RINOK(SeekFromCur(stream, startOffset));
+ }
+ {
+ CXzStreamFlags headerFlags;
+ CSecToRead secToRead;
+ SecToRead_CreateVTable(&secToRead);
+ secToRead.realStream = stream;
+
+ RINOK(Xz_ReadHeader(&headerFlags, &secToRead.s));
+ return (p->flags == headerFlags) ? SZ_OK : SZ_ERROR_ARCHIVE;
+ }
+}
+
+
+/* ---------- Xz Streams ---------- */
+
+void Xzs_Construct(CXzs *p)
+{
+ p->num = p->numAllocated = 0;
+ p->streams = 0;
+}
+
+void Xzs_Free(CXzs *p, ISzAlloc *alloc)
+{
+ size_t i;
+ for (i = 0; i < p->num; i++)
+ Xz_Free(&p->streams[i], alloc);
+ alloc->Free(alloc, p->streams);
+ p->num = p->numAllocated = 0;
+ p->streams = 0;
+}
+
+UInt64 Xzs_GetNumBlocks(const CXzs *p)
+{
+ UInt64 num = 0;
+ size_t i;
+ for (i = 0; i < p->num; i++)
+ num += p->streams[i].numBlocks;
+ return num;
+}
+
+UInt64 Xzs_GetUnpackSize(const CXzs *p)
+{
+ UInt64 size = 0;
+ size_t i;
+ for (i = 0; i < p->num; i++)
+ ADD_SIZE_CHECH(size, Xz_GetUnpackSize(&p->streams[i]));
+ return size;
+}
+
+/*
+UInt64 Xzs_GetPackSize(const CXzs *p)
+{
+ UInt64 size = 0;
+ size_t i;
+ for (i = 0; i < p->num; i++)
+ ADD_SIZE_CHECH(size, Xz_GetTotalSize(&p->streams[i]));
+ return size;
+}
+*/
+
+SRes Xzs_ReadBackward(CXzs *p, ILookInStream *stream, Int64 *startOffset, ICompressProgress *progress, ISzAlloc *alloc)
+{
+ Int64 endOffset = 0;
+ RINOK(stream->Seek(stream, &endOffset, SZ_SEEK_END));
+ *startOffset = endOffset;
+ for (;;)
+ {
+ CXzStream st;
+ SRes res;
+ Xz_Construct(&st);
+ res = Xz_ReadBackward(&st, stream, startOffset, alloc);
+ st.startOffset = *startOffset;
+ RINOK(res);
+ if (p->num == p->numAllocated)
+ {
+ size_t newNum = p->num + p->num / 4 + 1;
+ Byte *data = (Byte *)alloc->Alloc(alloc, newNum * sizeof(CXzStream));
+ if (data == 0)
+ return SZ_ERROR_MEM;
+ p->numAllocated = newNum;
+ memcpy(data, p->streams, p->num * sizeof(CXzStream));
+ alloc->Free(alloc, p->streams);
+ p->streams = (CXzStream *)data;
+ }
+ p->streams[p->num++] = st;
+ if (*startOffset == 0)
+ break;
+ RINOK(stream->Seek(stream, startOffset, SZ_SEEK_SET));
+ if (progress && progress->Progress(progress, endOffset - *startOffset, (UInt64)(Int64)-1) != SZ_OK)
+ return SZ_ERROR_PROGRESS;
+ }
+ return SZ_OK;
+}