brotli/enc/
find_stride.rs

1use core::cmp::{max, min};
2use core::ops::{Index, IndexMut, Range};
3
4use super::super::alloc;
5use super::super::alloc::{SliceWrapper, SliceWrapperMut};
6use super::input_pair::{InputPair, InputReference};
7use super::interface;
8use super::util::FastLog2;
9use crate::enc::combined_alloc::alloc_if;
10// float32 doesn't have enough resolution for blocks of data more than 3.5 megs
11pub type floatY = f64;
12// the cost of storing a particular population of data including the approx
13// cost of a huffman table to describe the frequencies of each symbol
14pub fn HuffmanCost(population: &[u32]) -> floatY {
15    assert_eq!(population.len(), 256 * 256);
16    let mut cost: floatY = 0.0 as floatY;
17    let mut sum: floatY = 0.0 as floatY;
18    let mut buckets: floatY = 0.0 as floatY;
19    for pop in population.iter() {
20        if *pop == 0 {
21            continue;
22        }
23        cost -= *pop as floatY * FastLog2(*pop as u64) as floatY;
24        sum += *pop as floatY;
25        buckets += 1.0 as floatY;
26    }
27
28    //println!("Observed {} nonzero buckets with a sum of {}, hc={}", buckets, sum, cost);
29
30    16.0 as floatY * buckets + cost + sum * FastLog2(sum as u64) as floatY
31}
32
33// this holds a population of data assuming 1 byte of prior for that data
34// bucket_populations is therefore a 65536-long dynamically allocated buffer
35pub struct EntropyBucketPopulation<AllocU32: alloc::Allocator<u32>> {
36    pub bucket_populations: AllocU32::AllocatedMemory,
37    pub cached_bit_entropy: floatY,
38}
39
40impl<AllocU32: alloc::Allocator<u32>> EntropyBucketPopulation<AllocU32> {
41    pub fn new(m32: &mut AllocU32) -> Self {
42        let size = 256 * 256;
43        EntropyBucketPopulation::<AllocU32> {
44            cached_bit_entropy: 0.0,
45            bucket_populations: m32.alloc_cell(size),
46        }
47    }
48    pub fn free(&mut self, m32: &mut AllocU32) {
49        m32.free_cell(core::mem::take(&mut self.bucket_populations));
50    }
51    fn clone_from(&mut self, other: &EntropyBucketPopulation<AllocU32>) {
52        self.bucket_populations
53            .slice_mut()
54            .clone_from_slice(other.bucket_populations.slice());
55    }
56    fn add_assign(&mut self, other: &EntropyBucketPopulation<AllocU32>) {
57        assert_eq!(
58            self.bucket_populations.slice().len(),
59            other.bucket_populations.slice().len()
60        );
61        for (item, other_item) in self
62            .bucket_populations
63            .slice_mut()
64            .iter_mut()
65            .zip(other.bucket_populations.slice().iter())
66        {
67            *item += *other_item;
68        }
69        self.cached_bit_entropy = HuffmanCost(self.bucket_populations.slice());
70    }
71    // clear the allocated memory and reset literal population to zero
72    fn bzero(&mut self) {
73        self.cached_bit_entropy = 0.0;
74        for bp in self.bucket_populations.slice_mut().iter_mut() {
75            *bp = 0;
76        }
77    }
78    // setup population to the sum of an array of populations where the stride of that row matches. Additionally allow another optional
79    fn initiate_from(
80        &mut self,
81        rows: [&[Self]; 2],
82        rows_stride: [&[u8]; 2],
83        stride: u8,
84        do_clear: bool,
85    ) {
86        self.cached_bit_entropy = 0.0;
87        let mut found_any = false;
88        for (sub_row, sub_stride) in rows.iter().zip(rows_stride.iter()) {
89            for (item, istride) in sub_row.iter().zip(sub_stride.iter()) {
90                if *istride != stride {
91                    continue; // if we chain, then optional was already filtered by stride
92                }
93                if do_clear && !found_any {
94                    self.bucket_populations
95                        .slice_mut()
96                        .clone_from_slice(item.bucket_populations.slice());
97                    found_any = true;
98                } else {
99                    for (dst, src) in self
100                        .bucket_populations
101                        .slice_mut()
102                        .iter_mut()
103                        .zip(item.bucket_populations.slice().iter())
104                    {
105                        *dst += *src;
106                    }
107                }
108            }
109        }
110        if do_clear && !found_any {
111            self.bzero();
112        } else {
113            self.cached_bit_entropy = HuffmanCost(self.bucket_populations.slice());
114        }
115    }
116    fn bit_cost_of_data_subset(
117        &mut self,
118        data0: &[u8],
119        mut stride: u8,
120        mut prev_bytes: [u8; NUM_STRIDES],
121        scratch: &mut EntropyBucketPopulation<AllocU32>,
122    ) -> floatY {
123        prev_bytes.reverse();
124        stride = max(1, stride); // we return stride=1 to mean 1 away
125        scratch
126            .bucket_populations
127            .slice_mut()
128            .clone_from_slice(self.bucket_populations.slice());
129        scratch.bucket_populations.slice_mut()[65535] += 1; // to demonstrate that we have
130        scratch.bucket_populations.slice_mut()[65535] -= 1; // to demonstrate that we have write capability
131        let mut stray_count = 0.0 as floatY;
132        assert_eq!((NUM_STRIDES - 1) & NUM_STRIDES, 0); // must be power of two
133        for (index, val) in data0.iter().enumerate() {
134            let prior_byte =
135                prev_bytes[(index + (NUM_STRIDES - stride as usize)) & (NUM_STRIDES - 1)];
136            let loc = &mut scratch.bucket_populations.slice_mut()
137                [prior_byte as usize * 256 + *val as usize];
138            if *loc == 0 {
139                stray_count += 1.0;
140            } else {
141                *loc -= 1;
142            }
143            prev_bytes[index & (NUM_STRIDES - 1)] = *val;
144        }
145        if self.cached_bit_entropy == 0.0 as floatY {
146            self.cached_bit_entropy = HuffmanCost(self.bucket_populations.slice());
147        }
148        debug_assert_eq!(
149            HuffmanCost(self.bucket_populations.slice()),
150            self.cached_bit_entropy
151        );
152
153        scratch.cached_bit_entropy = HuffmanCost(scratch.bucket_populations.slice());
154        self.cached_bit_entropy - scratch.cached_bit_entropy + stray_count * 8.0
155    }
156}
157
158const NUM_STRIDES: usize = 8;
159#[derive(Copy, Clone)]
160pub struct BucketPopIndex {
161    pub val: u8,
162    pub six_bits: u8,
163    pub stride: u8,
164}
165
166impl<AllocU32: alloc::Allocator<u32>> Index<BucketPopIndex> for EntropyBucketPopulation<AllocU32> {
167    type Output = u32;
168    fn index(&self, index: BucketPopIndex) -> &u32 {
169        &self.bucket_populations.slice()
170            [index.val as usize + index.six_bits as usize * 256 + index.stride as usize * 256 * 64]
171    }
172}
173impl<AllocU32: alloc::Allocator<u32>> IndexMut<BucketPopIndex>
174    for EntropyBucketPopulation<AllocU32>
175{
176    fn index_mut(&mut self, index: BucketPopIndex) -> &mut u32 {
177        &mut self.bucket_populations.slice_mut()
178            [index.val as usize + index.six_bits as usize * 256 + index.stride as usize * 256 * 64]
179    }
180}
181
182pub struct EntropyTally<AllocU32: alloc::Allocator<u32>> {
183    pop: [EntropyBucketPopulation<AllocU32>; NUM_STRIDES],
184}
185
186const NUM_LEVELS: usize = 4;
187const NUM_NODES: usize = (1 << (NUM_LEVELS)) - 1;
188pub const NUM_LEAF_NODES: usize = (NUM_NODES + 1) >> 1;
189
190pub struct EntropyPyramid<AllocU32: alloc::Allocator<u32>> {
191    pop: [EntropyBucketPopulation<AllocU32>; NUM_NODES],
192    stride: [u8; NUM_NODES],
193}
194
195impl<AllocU32: alloc::Allocator<u32>> EntropyPyramid<AllocU32> {
196    pub fn last_level_range(&self) -> Range<usize> {
197        (NUM_NODES - (1 << (NUM_LEVELS - 1)))..NUM_NODES
198    }
199    pub fn byte_index_to_pyramid_index(&self, byte_index: usize, metablock_size: usize) -> usize {
200        let range = self.last_level_range();
201        min(
202            range.start + (range.end - range.start) * byte_index / metablock_size,
203            range.end - 1,
204        ) // since we tally after the end of the literal block, it could be after the pyramid
205    }
206    pub fn reset_scratch_to_deepest_level(&self, output: &mut EntropyTally<AllocU32>) {
207        let mut has_modified = [false; NUM_STRIDES];
208        //println!("Last level range {:?}", self.last_level_range());
209        for index in self.last_level_range() {
210            if has_modified[self.stride[index] as usize] {
211                output.pop[self.stride[index] as usize].add_assign(&self.pop[index]);
212            } else {
213                output.pop[self.stride[index] as usize].clone_from(&self.pop[index]);
214                has_modified[self.stride[index] as usize] = true;
215            }
216        }
217        for stride in 0..NUM_STRIDES {
218            if !has_modified[stride] {
219                output.pop[stride].bzero();
220                output.pop[stride].cached_bit_entropy = 0.0;
221            } else {
222                output.pop[stride].cached_bit_entropy =
223                    HuffmanCost(output.pop[stride].bucket_populations.slice());
224            }
225            //println!("BASE PYRAMID {} = {}", stride,output.pop[stride].cached_bit_entropy);
226        }
227    }
228    pub fn stride_last_level_range(&self) -> [u8; NUM_LEAF_NODES] {
229        let mut ret = [0u8; NUM_LEAF_NODES];
230        ret.clone_from_slice(self.stride.split_at(self.stride.len() - NUM_LEAF_NODES).1);
231        ret
232    }
233    pub fn free(&mut self, m32: &mut AllocU32) {
234        for item in self.pop.iter_mut() {
235            item.free(m32);
236        }
237    }
238    pub fn disabled_placeholder(_m32: &mut AllocU32) -> Self {
239        EntropyPyramid::<AllocU32> {
240            pop: [
241                EntropyBucketPopulation {
242                    cached_bit_entropy: 0.0,
243                    bucket_populations: AllocU32::AllocatedMemory::default(),
244                },
245                EntropyBucketPopulation {
246                    cached_bit_entropy: 0.0,
247                    bucket_populations: AllocU32::AllocatedMemory::default(),
248                },
249                EntropyBucketPopulation {
250                    cached_bit_entropy: 0.0,
251                    bucket_populations: AllocU32::AllocatedMemory::default(),
252                },
253                EntropyBucketPopulation {
254                    cached_bit_entropy: 0.0,
255                    bucket_populations: AllocU32::AllocatedMemory::default(),
256                },
257                EntropyBucketPopulation {
258                    cached_bit_entropy: 0.0,
259                    bucket_populations: AllocU32::AllocatedMemory::default(),
260                },
261                EntropyBucketPopulation {
262                    cached_bit_entropy: 0.0,
263                    bucket_populations: AllocU32::AllocatedMemory::default(),
264                },
265                EntropyBucketPopulation {
266                    cached_bit_entropy: 0.0,
267                    bucket_populations: AllocU32::AllocatedMemory::default(),
268                },
269                EntropyBucketPopulation {
270                    cached_bit_entropy: 0.0,
271                    bucket_populations: AllocU32::AllocatedMemory::default(),
272                },
273                EntropyBucketPopulation {
274                    cached_bit_entropy: 0.0,
275                    bucket_populations: AllocU32::AllocatedMemory::default(),
276                },
277                EntropyBucketPopulation {
278                    cached_bit_entropy: 0.0,
279                    bucket_populations: AllocU32::AllocatedMemory::default(),
280                },
281                EntropyBucketPopulation {
282                    cached_bit_entropy: 0.0,
283                    bucket_populations: AllocU32::AllocatedMemory::default(),
284                },
285                EntropyBucketPopulation {
286                    cached_bit_entropy: 0.0,
287                    bucket_populations: AllocU32::AllocatedMemory::default(),
288                },
289                EntropyBucketPopulation {
290                    cached_bit_entropy: 0.0,
291                    bucket_populations: AllocU32::AllocatedMemory::default(),
292                },
293                EntropyBucketPopulation {
294                    cached_bit_entropy: 0.0,
295                    bucket_populations: AllocU32::AllocatedMemory::default(),
296                },
297                EntropyBucketPopulation {
298                    cached_bit_entropy: 0.0,
299                    bucket_populations: AllocU32::AllocatedMemory::default(),
300                },
301            ],
302            stride: [0; NUM_NODES],
303        }
304    }
305    pub fn new(m32: &mut AllocU32) -> Self {
306        let size = 256 * 256;
307        EntropyPyramid::<AllocU32> {
308            pop: [
309                EntropyBucketPopulation::<AllocU32> {
310                    cached_bit_entropy: 0.0,
311                    bucket_populations: m32.alloc_cell(size),
312                },
313                EntropyBucketPopulation::<AllocU32> {
314                    cached_bit_entropy: 0.0,
315                    bucket_populations: m32.alloc_cell(size),
316                },
317                EntropyBucketPopulation::<AllocU32> {
318                    cached_bit_entropy: 0.0,
319                    bucket_populations: m32.alloc_cell(size),
320                },
321                EntropyBucketPopulation::<AllocU32> {
322                    cached_bit_entropy: 0.0,
323                    bucket_populations: m32.alloc_cell(size),
324                },
325                EntropyBucketPopulation::<AllocU32> {
326                    cached_bit_entropy: 0.0,
327                    bucket_populations: m32.alloc_cell(size),
328                },
329                EntropyBucketPopulation::<AllocU32> {
330                    cached_bit_entropy: 0.0,
331                    bucket_populations: m32.alloc_cell(size),
332                },
333                EntropyBucketPopulation::<AllocU32> {
334                    cached_bit_entropy: 0.0,
335                    bucket_populations: m32.alloc_cell(size),
336                },
337                EntropyBucketPopulation::<AllocU32> {
338                    cached_bit_entropy: 0.0,
339                    bucket_populations: m32.alloc_cell(size),
340                },
341                EntropyBucketPopulation::<AllocU32> {
342                    cached_bit_entropy: 0.0,
343                    bucket_populations: m32.alloc_cell(size),
344                },
345                EntropyBucketPopulation::<AllocU32> {
346                    cached_bit_entropy: 0.0,
347                    bucket_populations: m32.alloc_cell(size),
348                },
349                EntropyBucketPopulation::<AllocU32> {
350                    cached_bit_entropy: 0.0,
351                    bucket_populations: m32.alloc_cell(size),
352                },
353                EntropyBucketPopulation::<AllocU32> {
354                    cached_bit_entropy: 0.0,
355                    bucket_populations: m32.alloc_cell(size),
356                },
357                EntropyBucketPopulation::<AllocU32> {
358                    cached_bit_entropy: 0.0,
359                    bucket_populations: m32.alloc_cell(size),
360                },
361                EntropyBucketPopulation::<AllocU32> {
362                    cached_bit_entropy: 0.0,
363                    bucket_populations: m32.alloc_cell(size),
364                },
365                EntropyBucketPopulation::<AllocU32> {
366                    cached_bit_entropy: 0.0,
367                    bucket_populations: m32.alloc_cell(size),
368                },
369            ],
370            stride: [0; NUM_NODES],
371        }
372    }
373    pub fn bit_cost_of_literals(
374        &mut self,
375        data0: &[u8],
376        start_index: u32,
377        metablock_len: usize,
378        stride: u8,
379        previous_bytes: [u8; NUM_STRIDES],
380        scratch: &mut EntropyTally<AllocU32>,
381    ) -> floatY {
382        assert!(stride as usize <= NUM_STRIDES);
383
384        self.pop[self.byte_index_to_pyramid_index(start_index as usize, metablock_len)]
385            .bit_cost_of_data_subset(data0, stride, previous_bytes, &mut scratch.pop[0])
386    }
387    fn populate_entry_stride1(&mut self, input: InputPair, index: u32) {
388        let mut prev_val = 0;
389        let pyr_item = &mut self.pop[index as usize];
390        pyr_item.bzero();
391        assert_eq!(pyr_item.bucket_populations.slice()[65535], 0);
392        for val in input.0.slice().iter().chain(input.1.slice().iter()) {
393            pyr_item.bucket_populations.slice_mut()[prev_val as usize * 256 + *val as usize] += 1;
394            prev_val = *val;
395        }
396        pyr_item.cached_bit_entropy = HuffmanCost(pyr_item.bucket_populations.slice());
397        self.stride[index as usize] = 0;
398    }
399    fn populate_entry(
400        &mut self,
401        input: InputPair,
402        scratch: &mut EntropyTally<AllocU32>,
403        index: u32,
404        mirror_range: Option<Range<usize>>,
405        prev_range: Option<Range<usize>>,
406    ) {
407        let mut initial_entropies = [0.0 as floatY; NUM_STRIDES];
408        let nothing: &[EntropyBucketPopulation<AllocU32>] = &[];
409        let nothing_u8: &[u8] = &[];
410        {
411            let pop_ranges = [
412                match mirror_range {
413                    None => nothing,
414                    Some(ref ir) => &self.pop[ir.clone()],
415                },
416                match prev_range {
417                    None => nothing,
418                    Some(ref pr) => &self.pop[pr.clone()],
419                },
420            ];
421            let stride_ranges = [
422                match mirror_range {
423                    None => nothing_u8,
424                    Some(ref ir) => &self.stride[ir.clone()],
425                },
426                match prev_range {
427                    None => nothing_u8,
428                    Some(ref pr) => &self.stride[pr.clone()],
429                },
430            ];
431            for stride in 0..NUM_STRIDES {
432                scratch.pop[stride].initiate_from(pop_ranges, stride_ranges, stride as u8, true);
433                initial_entropies[stride] = scratch.pop[stride].cached_bit_entropy;
434            }
435        }
436        scratch.observe_input_stream(input.0.slice(), input.1.slice());
437        let mut best_entropy_index = 0;
438        let mut min_entropy_value = (scratch.pop[0].cached_bit_entropy - initial_entropies[0]);
439        //println!("{} OLD ENTROPY {:} NEW_ENTROPY {:}", best_entropy_index, scratch.pop[0].cached_bit_entropy, initial_entropies[0]);
440        for stride in 1..NUM_STRIDES {
441            let entropy_value = scratch.pop[stride].cached_bit_entropy - initial_entropies[stride];
442            //println!("{} OLD ENTROPY {:} NEW_ENTROPY {:}", stride, scratch.pop[stride].cached_bit_entropy, initial_entropies[stride]);
443            if entropy_value < min_entropy_value {
444                best_entropy_index = stride;
445                min_entropy_value = entropy_value;
446            }
447        }
448        self.pop[index as usize].clone_from(&scratch.pop[best_entropy_index]);
449        self.stride[index as usize] = best_entropy_index as u8;
450    }
451    pub fn populate_stride1(&mut self, input0: &[u8], input1: &[u8]) {
452        let input = InputPair(
453            InputReference {
454                data: input0,
455                orig_offset: 0,
456            },
457            InputReference {
458                data: input1,
459                orig_offset: input0.len(),
460            },
461        );
462        for i in 0..2 {
463            let first_range = if i == 0 {
464                input.split_at(input.len() >> 1).0
465            } else {
466                input.split_at(input.len() >> 1).1
467            };
468            for j in 0..2 {
469                let second_range = if j == 0 {
470                    first_range.split_at(input.len() >> 2).0
471                } else {
472                    first_range.split_at(input.len() >> 2).1
473                };
474                if NUM_LEVELS == 4 {
475                    for k in 0..2 {
476                        let third_range = if j == 0 {
477                            second_range.split_at(input.len() >> 3).0
478                        } else {
479                            second_range.split_at(input.len() >> 3).1
480                        };
481                        self.populate_entry_stride1(third_range, 7 + ((i << 2) + (j << 1) + k));
482                    }
483                } else {
484                    assert_eq!(NUM_LEVELS, 3); // we hard coded the 3 levels for now... we can add more later or make this into some kind of recursion
485                    self.populate_entry_stride1(second_range, 3 + ((i << 1) + j));
486                }
487            }
488        }
489    }
490    pub fn populate(&mut self, input0: &[u8], input1: &[u8], scratch: &mut EntropyTally<AllocU32>) {
491        let input = InputPair(
492            InputReference {
493                data: input0,
494                orig_offset: 0,
495            },
496            InputReference {
497                data: input1,
498                orig_offset: input0.len(),
499            },
500        );
501        self.populate_entry(input, scratch, 0, None, None); // BASE
502
503        // LEVEL 1
504        self.populate_entry(
505            input.split_at(input.len() >> 1).0,
506            scratch,
507            1,
508            Some(0..1),
509            None,
510        );
511        self.populate_entry(
512            input.split_at(input.len() >> 1).1,
513            scratch,
514            2,
515            None,
516            Some(1..2),
517        ); // should we use the range from 0..1??
518
519        // LEVEL 2
520        self.populate_entry(
521            input.split_at(input.len() >> 2).0,
522            scratch,
523            3,
524            Some(1..3),
525            None,
526        );
527        self.populate_entry(
528            input
529                .split_at(input.len() >> 1)
530                .0
531                .split_at(input.len() >> 2)
532                .1,
533            scratch,
534            4,
535            Some(2..3),
536            Some(3..4),
537        );
538        self.populate_entry(
539            input
540                .split_at(input.len() >> 1)
541                .1
542                .split_at(input.len() >> 2)
543                .0,
544            scratch,
545            5,
546            Some(3..5),
547            None,
548        );
549        self.populate_entry(
550            input
551                .split_at(input.len() >> 1)
552                .1
553                .split_at(input.len() >> 2)
554                .1,
555            scratch,
556            6,
557            Some(3..6),
558            None,
559        );
560        if NUM_LEVELS == 4 {
561            // level 4
562            self.populate_entry(
563                input
564                    .split_at(input.len() >> 1)
565                    .0
566                    .split_at(input.len() >> 2)
567                    .0
568                    .split_at(input.len() >> 3)
569                    .0,
570                scratch,
571                7,
572                Some(4..7),
573                None,
574            );
575            self.populate_entry(
576                input
577                    .split_at(input.len() >> 1)
578                    .0
579                    .split_at(input.len() >> 2)
580                    .0
581                    .split_at(input.len() >> 3)
582                    .1,
583                scratch,
584                8,
585                Some(4..7),
586                Some(7..8),
587            );
588            self.populate_entry(
589                input
590                    .split_at(input.len() >> 1)
591                    .0
592                    .split_at(input.len() >> 2)
593                    .1
594                    .split_at(input.len() >> 3)
595                    .0,
596                scratch,
597                9,
598                Some(5..7),
599                Some(7..9),
600            );
601            self.populate_entry(
602                input
603                    .split_at(input.len() >> 1)
604                    .0
605                    .split_at(input.len() >> 2)
606                    .1
607                    .split_at(input.len() >> 3)
608                    .1,
609                scratch,
610                0x0a,
611                Some(5..7),
612                Some(7..0xa),
613            );
614
615            self.populate_entry(
616                input
617                    .split_at(input.len() >> 1)
618                    .1
619                    .split_at(input.len() >> 2)
620                    .0
621                    .split_at(input.len() >> 3)
622                    .0,
623                scratch,
624                0xb,
625                Some(6..7),
626                Some(7..0xb),
627            );
628            self.populate_entry(
629                input
630                    .split_at(input.len() >> 1)
631                    .1
632                    .split_at(input.len() >> 2)
633                    .0
634                    .split_at(input.len() >> 3)
635                    .1,
636                scratch,
637                0xc,
638                Some(6..7),
639                Some(7..0xc),
640            );
641            self.populate_entry(
642                input
643                    .split_at(input.len() >> 1)
644                    .1
645                    .split_at(input.len() >> 2)
646                    .1
647                    .split_at(input.len() >> 3)
648                    .0,
649                scratch,
650                0xd,
651                None,
652                Some(7..0xd),
653            );
654            self.populate_entry(
655                input
656                    .split_at(input.len() >> 1)
657                    .1
658                    .split_at(input.len() >> 2)
659                    .1
660                    .split_at(input.len() >> 3)
661                    .1,
662                scratch,
663                0xe,
664                None,
665                Some(7..0xe),
666            );
667        } else {
668            assert_eq!(NUM_LEVELS, 3); // we hard coded the 3 levels for now... we can add more later or make this into some kind of recursion
669        }
670    }
671}
672
673impl<AllocU32: alloc::Allocator<u32>> EntropyTally<AllocU32> {
674    pub fn new(m32: &mut AllocU32, max_stride_arg: Option<u8>) -> EntropyTally<AllocU32> {
675        let size = 256 * 256;
676        let max_stride = max_stride_arg.unwrap_or(NUM_STRIDES as u8);
677        EntropyTally::<AllocU32> {
678            pop: [
679                EntropyBucketPopulation {
680                    cached_bit_entropy: 0.0,
681                    bucket_populations: alloc_if(max_stride > 0, m32, size),
682                },
683                EntropyBucketPopulation {
684                    cached_bit_entropy: 0.0,
685                    bucket_populations: alloc_if(max_stride > 1, m32, size),
686                },
687                EntropyBucketPopulation {
688                    cached_bit_entropy: 0.0,
689                    bucket_populations: alloc_if(max_stride > 2, m32, size),
690                },
691                EntropyBucketPopulation {
692                    cached_bit_entropy: 0.0,
693                    bucket_populations: alloc_if(max_stride > 3, m32, size),
694                },
695                EntropyBucketPopulation {
696                    cached_bit_entropy: 0.0,
697                    bucket_populations: alloc_if(max_stride > 4, m32, size),
698                },
699                EntropyBucketPopulation {
700                    cached_bit_entropy: 0.0,
701                    bucket_populations: alloc_if(max_stride > 5, m32, size),
702                },
703                EntropyBucketPopulation {
704                    cached_bit_entropy: 0.0,
705                    bucket_populations: alloc_if(max_stride > 6, m32, size),
706                },
707                EntropyBucketPopulation {
708                    cached_bit_entropy: 0.0,
709                    bucket_populations: alloc_if(max_stride > 7, m32, size),
710                },
711            ],
712        }
713    }
714    pub fn disabled_placeholder(m32: &mut AllocU32) -> EntropyTally<AllocU32> {
715        Self::new(m32, Some(0))
716    }
717    fn observe_input_stream(&mut self, input0: &[u8], input1: &[u8]) {
718        let mut priors = [0u8; NUM_STRIDES];
719        for val in input0.iter().chain(input1.iter()) {
720            for stride in 0..NUM_STRIDES {
721                self.pop[stride].bucket_populations.slice_mut()
722                    [priors[stride] as usize * 256 + (*val as usize)] += 1;
723            }
724            {
725                let mut tmp = [0u8; NUM_STRIDES - 1];
726                tmp.clone_from_slice(&priors[..(NUM_STRIDES - 1)]);
727                priors[1..].clone_from_slice(&tmp[..]);
728                priors[0] = *val;
729            }
730        }
731        for stride in 0..NUM_STRIDES {
732            self.pop[stride].cached_bit_entropy =
733                HuffmanCost(self.pop[stride].bucket_populations.slice());
734        }
735    }
736    fn identify_best_population_and_update_cache(&mut self) -> u8 {
737        let mut old_bit_entropy: [floatY; NUM_STRIDES] = [0.0; NUM_STRIDES];
738        for (obe, be) in old_bit_entropy.iter_mut().zip(self.pop.iter_mut()) {
739            *obe = be.cached_bit_entropy;
740            if *obe != 0.0 {
741                be.cached_bit_entropy = HuffmanCost(be.bucket_populations.slice());
742            }
743        }
744        let mut best_stride = 0u8;
745        let mut best_entropy = self.pop[0].cached_bit_entropy - old_bit_entropy[0];
746        //println!("Weighing {} as {}", best_stride, best_entropy);
747        for index in 1..NUM_STRIDES {
748            let cur = self.pop[index].cached_bit_entropy - old_bit_entropy[index];
749            //println!("Weighing {} as {} = [{} - {}]", index, cur, self.pop[index].cached_bit_entropy, old_bit_entropy[index]);
750            if (best_entropy == 0.0 || cur < best_entropy) && old_bit_entropy[index] > 0.0 {
751                best_stride = index as u8;
752                best_entropy = cur;
753            }
754        }
755        best_stride
756    }
757    pub fn peek(&mut self) -> &mut EntropyBucketPopulation<AllocU32> {
758        &mut self.pop[0]
759    }
760    pub fn get_previous_bytes(
761        &self,
762        input0: &[u8],
763        input1: &[u8],
764        bytes_processed: usize,
765    ) -> [u8; NUM_STRIDES] {
766        let mut retval = [0u8; NUM_STRIDES];
767        for index in 0..NUM_STRIDES {
768            let bp_offset = index + 1;
769            if bp_offset <= bytes_processed {
770                let offset = bytes_processed - bp_offset;
771                if offset >= input0.len() {
772                    retval[index] = input1[offset - input0.len()];
773                } else {
774                    retval[index] = input0[offset];
775                }
776            }
777        }
778        retval
779    }
780    pub fn pick_best_stride<InputReference: SliceWrapper<u8>>(
781        &mut self,
782        commands: &[interface::Command<InputReference>],
783        input0: &[u8],
784        input1: &[u8],
785        bytes_processed: &mut usize,
786        entropy_pyramid: &EntropyPyramid<AllocU32>,
787        stride_detection_quality: u8,
788    ) -> u8 {
789        if stride_detection_quality == 0 {
790            return 0;
791        }
792        //println!("ENTROPY PYRAMID {:?}", entropy_pyramid.stride);
793        if stride_detection_quality > 1 {
794            entropy_pyramid.reset_scratch_to_deepest_level(self);
795        }
796        let mut pyramid_byte_index: usize = 0;
797        for cmd in commands.iter() {
798            match *cmd {
799                interface::Command::Copy(ref copy) => {
800                    *bytes_processed += copy.num_bytes as usize;
801                }
802                interface::Command::Dict(ref dict) => {
803                    *bytes_processed += dict.final_size as usize;
804                }
805                interface::Command::Literal(ref lit) => {
806                    if stride_detection_quality > 1 {
807                        let mut priors = self.get_previous_bytes(input0, input1, *bytes_processed);
808                        for (lindex, val) in lit.data.slice().iter().enumerate() {
809                            if lindex == NUM_STRIDES {
810                                let vpriors = self.get_previous_bytes(
811                                    input0,
812                                    input1,
813                                    NUM_STRIDES + *bytes_processed,
814                                );
815                                assert_eq!(vpriors, priors);
816                            }
817                            for (index, prior) in priors.iter().enumerate() {
818                                self.pop[index].bucket_populations.slice_mut()
819                                    [256 * (*prior as usize) + *val as usize] += 1;
820                                // increment the population value of this literal
821                                // for the respective prior for the stride index
822                            }
823                            {
824                                //reset prior values for the next item
825                                let mut tmp = [0u8; 7];
826                                tmp.clone_from_slice(&priors[..7]);
827                                priors[1..].clone_from_slice(&tmp[..]);
828                                priors[0] = *val;
829                            }
830                        }
831                    }
832                    *bytes_processed += lit.data.slice().len();
833                    pyramid_byte_index = *bytes_processed;
834                }
835                interface::Command::BlockSwitchCommand(_)
836                | interface::Command::BlockSwitchLiteral(_)
837                | interface::Command::BlockSwitchDistance(_)
838                | interface::Command::PredictionMode(_) => {}
839            }
840        }
841
842        //println!("ENTROPY PYRAMID {:?} selected {}", entropy_pyramid.stride, best_stride);
843
844        if stride_detection_quality > 1 {
845            self.identify_best_population_and_update_cache() + 1
846        } else {
847            entropy_pyramid.stride[entropy_pyramid
848                .byte_index_to_pyramid_index(pyramid_byte_index, input0.len() + input1.len())]
849                + 1
850        }
851    }
852    pub fn free(&mut self, m32: &mut AllocU32) {
853        for item in self.pop.iter_mut() {
854            m32.free_cell(core::mem::take(&mut item.bucket_populations))
855        }
856    }
857    pub fn is_free(&mut self) -> bool {
858        self.pop[0].bucket_populations.slice().is_empty()
859    }
860}