diff options
author | root <root> | 2007-11-13 11:41:46 +0000 |
---|---|---|
committer | root <root> | 2007-11-13 11:41:46 +0000 |
commit | fd9bb318a921bb5c4d79eb7d199f52eb67054451 (patch) | |
tree | 7c8b84c2ccdc970e0c1d2a9c48b98b203fab304e | |
parent | 110fdc6e441d5e2ef11229486534db0f5e36e87d (diff) |
*** empty log message ***
-rw-r--r-- | Changes | 6 | ||||
-rw-r--r-- | bench.c | 4 |
2 files changed, 5 insertions, 5 deletions
@@ -1,15 +1,15 @@ 2.2 - - switched to GPL v2 or any later version. + - switched to 2-clause bsd with "GPL v2 or any later version" option. - speed up compression by ~10-15% in common cases by some manual unrolling. - - import some compiler tricks from JSON::XS. + - import some compiler tricks from JSON::XS, for further speed-ups. - tune hash functions depending on ULTRA_FAST or VERY_FAST settings. - for typical binary data (e.g. /bin/bash, memory dumps, canterbury corpus etc.), speed is now comparable to fastlz, but with better compression ratio. with ULTRA_FAST, it's typically 3-15% faster than fastlz while still maintaining a similar ratio. (amd64 and core 2 duo, ymmv). thanks a lot for the competition :) - - undo inline assembly, it is no longer helpful. + - undo inline assembly in compressor, it is no longer helpful. - no changes to the decompressor. - use a HLOG of 16 by default now (formerly 15). @@ -42,11 +42,11 @@ int main(void) fclose (f); for (lp = 0; lp < 1000; lp++) { - l = lzf_compress (data, DSIZE, data2, DSIZE*2); s=stamp(); + l = lzf_compress (data, DSIZE, data2, DSIZE*2); //l = fastlz_compress_level (1, data, DSIZE, data2); - j = lzf_decompress (data2, l, data3, DSIZE*2); si[0]=measure(s); + j = lzf_decompress (data2, l, data3, DSIZE*2); printf ("\r%10d (%d) ", si[0], l); if (si[0] < min && si[0] > 0) |