lib/compression/lzx-plain: relax size requirements on long file
authorDouglas Bagnall <douglas.bagnall@catalyst.net.nz>
Wed, 23 Nov 2022 00:06:41 +0000 (13:06 +1300)
committerJoseph Sutton <jsutton@samba.org>
Thu, 1 Dec 2022 22:56:39 +0000 (22:56 +0000)
We are going to change from a slow exact match algorithm to a fast
heuristic search that will not always get the same results as the
exhaustive search.

To be precise, a million zeros will compress to 112 rather than 93 bytes.

We don't insist on an exact size, because that is not an issue here.

Signed-off-by: Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
Reviewed-by: Joseph Sutton <josephsutton@catalyst.net.nz>
lib/compression/tests/test_lzxpress_plain.c

index 8ce3a7715d0ffc1b1dc621adabeb35f10af4769d..5d2a51eb4662cfa21440f9dbcf7037546854f22b 100644 (file)
@@ -309,7 +309,8 @@ static void test_lzxpress_many_zeros(void **state)
        TALLOC_CTX *tmp_ctx = talloc_new(NULL);
        const size_t N_ZEROS = 1000000;
        const uint8_t *zeros = talloc_zero_size(tmp_ctx, N_ZEROS);
-       const ssize_t expected_c_size = 93;
+       const ssize_t expected_c_size_max = 120;
+       const ssize_t expected_c_size_min = 93;
        ssize_t c_size;
        uint8_t *comp, *decomp;
        static struct timespec t_start, t_end;
@@ -327,8 +328,13 @@ static void test_lzxpress_many_zeros(void **state)
                                   N_ZEROS,
                                   comp,
                                   talloc_get_size(comp));
+       /*
+        * Because our compression depends on heuristics, we don't insist on
+        * an exact size in this case.
+        */
 
-       assert_int_equal(c_size, expected_c_size);
+       assert_true(c_size <= expected_c_size_max);
+       assert_true(c_size >= expected_c_size_min);
 
        decomp = talloc_size(tmp_ctx, N_ZEROS * 2);
        c_size = lzxpress_decompress(comp,