summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Changes4
-rw-r--r--Makefile.in2
-rw-r--r--lzfP.h4
-rw-r--r--lzf_c.c179
4 files changed, 98 insertions, 91 deletions
diff --git a/Changes b/Changes
index b2e9891..6835086 100644
--- a/Changes
+++ b/Changes
@@ -1,3 +1,7 @@
+1.2 Mon Dec 29 13:47:28 CET 2003
+ - avoid spurious memory accesses after the to-be-compressed
+ memory region. originally reported by Michal Zalewski.
+
1.1 Tue Dec 23 05:48:32 CET 2003
- removed #warn directive, it's not worth the hassle.
- add LZF_STACK_ARG and AVOID_ERRNO configurations
diff --git a/Makefile.in b/Makefile.in
index ef5b2d3..4aca98c 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1,4 +1,4 @@
-VERSION = 1.1
+VERSION = 1.2
prefix = @prefix@
exec_prefix = @exec_prefix@
diff --git a/lzfP.h b/lzfP.h
index c007d83..8ca97b0 100644
--- a/lzfP.h
+++ b/lzfP.h
@@ -91,11 +91,11 @@
/*
* Wether to pass the LZF_STATE variable as argument, or allocate it
- * on the stack. For small-stack environments, define this to zero.
+ * on the stack. For small-stack environments, define this to 1.
* NOTE: this breaks the prototype in lzf.h.
*/
#ifndef LZF_STATE_ARG
-# define LZF_STATE_ARG 1
+# define LZF_STATE_ARG 0
#endif
/*****************************************************************************/
diff --git a/lzf_c.c b/lzf_c.c
index 0c94baa..f96a9a6 100644
--- a/lzf_c.c
+++ b/lzf_c.c
@@ -69,12 +69,12 @@
unsigned int
lzf_compress (const void *const in_data, unsigned int in_len,
void *out_data, unsigned int out_len
-#if !LZF_STATE_ARG
+#if LZF_STATE_ARG
, LZF_STATE *htab
#endif
)
{
-#if LZF_STATE_ARG
+#if !LZF_STATE_ARG
LZF_STATE htab;
#endif
const u8 **hslot;
@@ -97,107 +97,110 @@ lzf_compress (const void *const in_data, unsigned int in_len,
# endif
#endif
- do
+ for (;;)
{
- hval = NEXT (hval, ip);
- hslot = htab + IDX (hval);
- ref = *hslot; *hslot = ip;
+ if (ip < in_end - 2)
+ {
+ hval = NEXT (hval, ip);
+ hslot = htab + IDX (hval);
+ ref = *hslot; *hslot = ip;
- if (1
+ if (1
#if INIT_HTAB && !USE_MEMCPY
- && ref < ip /* the next test will actually take care of this, but it is faster */
+ && ref < ip /* the next test will actually take care of this, but this is faster */
#endif
- && (off = ip - ref - 1) < MAX_OFF
- && ip + 4 < in_end
- && ref > (u8 *)in_data
+ && (off = ip - ref - 1) < MAX_OFF
+ && ip + 4 < in_end
+ && ref > (u8 *)in_data
#if STRICT_ALIGN
- && ref[0] == ip[0]
- && ref[1] == ip[1]
- && ref[2] == ip[2]
+ && ref[0] == ip[0]
+ && ref[1] == ip[1]
+ && ref[2] == ip[2]
#else
- && *(u16 *)ref == *(u16 *)ip
- && ref[2] == ip[2]
+ && *(u16 *)ref == *(u16 *)ip
+ && ref[2] == ip[2]
#endif
- )
- {
- /* match found at *ref++ */
- unsigned int len = 2;
- unsigned int maxlen = in_end - ip - len;
- maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
-
- do
- len++;
- while (len < maxlen && ref[len] == ip[len]);
-
- if (op + lit + 1 + 3 >= out_end)
- return 0;
-
- if (lit)
- {
- *op++ = lit - 1;
- lit = -lit;
- do
- *op++ = ip[lit];
- while (++lit);
- }
-
- len -= 2;
- ip++;
-
- if (len < 7)
- {
- *op++ = (off >> 8) + (len << 5);
- }
- else
- {
- *op++ = (off >> 8) + ( 7 << 5);
- *op++ = len - 7;
- }
-
- *op++ = off;
+ )
+ {
+ /* match found at *ref++ */
+ unsigned int len = 2;
+ unsigned int maxlen = in_end - ip - len;
+ maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
+
+ do
+ len++;
+ while (len < maxlen && ref[len] == ip[len]);
+
+ if (op + lit + 1 + 3 >= out_end)
+ return 0;
+
+ if (lit)
+ {
+ *op++ = lit - 1;
+ lit = -lit;
+ do
+ *op++ = ip[lit];
+ while (++lit);
+ }
+
+ len -= 2;
+ ip++;
+
+ if (len < 7)
+ {
+ *op++ = (off >> 8) + (len << 5);
+ }
+ else
+ {
+ *op++ = (off >> 8) + ( 7 << 5);
+ *op++ = len - 7;
+ }
+
+ *op++ = off;
#if ULTRA_FAST
- ip += len;
- hval = FRST (ip);
- hval = NEXT (hval, ip);
- htab[IDX (hval)] = ip;
- ip++;
+ ip += len;
+ hval = FRST (ip);
+ hval = NEXT (hval, ip);
+ htab[IDX (hval)] = ip;
+ ip++;
#else
- do
- {
- hval = NEXT (hval, ip);
- htab[IDX (hval)] = ip;
- ip++;
- }
- while (len--);
+ do
+ {
+ hval = NEXT (hval, ip);
+ htab[IDX (hval)] = ip;
+ ip++;
+ }
+ while (len--);
#endif
- }
- else
- {
- /* one more literal byte we must copy */
- lit++;
- ip++;
-
- if (lit == MAX_LIT)
- {
- if (op + 1 + MAX_LIT >= out_end)
- return 0;
-
- *op++ = MAX_LIT - 1;
+ continue;
+ }
+ }
+ else if (ip == in_end)
+ break;
+
+ /* one more literal byte we must copy */
+ lit++;
+ ip++;
+
+ if (lit == MAX_LIT)
+ {
+ if (op + 1 + MAX_LIT >= out_end)
+ return 0;
+
+ *op++ = MAX_LIT - 1;
#if USE_MEMCPY
- memcpy (op, ip - MAX_LIT, MAX_LIT);
- op += MAX_LIT;
- lit = 0;
+ memcpy (op, ip - MAX_LIT, MAX_LIT);
+ op += MAX_LIT;
+ lit = 0;
#else
- lit = -lit;
- do
- *op++ = ip[lit];
- while (++lit);
+ lit = -lit;
+ do
+ *op++ = ip[lit];
+ while (++lit);
#endif
- }
- }
+ }
}
- while (ip < in_end);
if (lit)
{