brotli/enc/
prior_eval.rs

1use core;
2use core::cmp::min;
3#[cfg(feature = "simd")]
4use core::simd::prelude::SimdPartialOrd;
5
6use super::super::alloc;
7use super::super::alloc::{Allocator, SliceWrapper, SliceWrapperMut};
8use super::backward_references::BrotliEncoderParams;
9use super::input_pair::{InputPair, InputReference, InputReferenceMut};
10use super::ir_interpret::{push_base, IRInterpreter};
11use super::util::{floatX, FastLog2u16};
12use super::{find_stride, interface, s16, v8};
13use crate::enc::combined_alloc::{alloc_default, alloc_if};
14
15// the high nibble, followed by the low nibbles
16pub const CONTEXT_MAP_PRIOR_SIZE: usize = 256 * 17;
17pub const STRIDE_PRIOR_SIZE: usize = 256 * 256 * 2;
18pub const ADV_PRIOR_SIZE: usize = 65536 + (20 << 16);
19pub const DEFAULT_SPEED: (u16, u16) = (8, 8192);
20
21pub enum WhichPrior {
22    CM = 0,
23    ADV = 1,
24    SLOW_CM = 2,
25    FAST_CM = 3,
26    STRIDE1 = 4,
27    STRIDE2 = 5,
28    STRIDE3 = 6,
29    STRIDE4 = 7,
30    //    STRIDE8 = 8,
31    NUM_PRIORS = 8,
32    // future ideas
33}
34
35pub trait Prior {
36    fn lookup_lin(
37        stride_byte: u8,
38        selected_context: u8,
39        actual_context: usize,
40        high_nibble: Option<u8>,
41    ) -> usize;
42    #[inline(always)]
43    fn lookup_mut(
44        data: &mut [s16],
45        stride_byte: u8,
46        selected_context: u8,
47        actual_context: usize,
48        high_nibble: Option<u8>,
49    ) -> CDF {
50        let index = Self::lookup_lin(stride_byte, selected_context, actual_context, high_nibble);
51        CDF::from(&mut data[index])
52    }
53    #[inline(always)]
54    fn lookup(
55        data: &[s16],
56        stride_byte: u8,
57        selected_context: u8,
58        actual_context: usize,
59        high_nibble: Option<u8>,
60    ) -> &s16 {
61        let index = Self::lookup_lin(stride_byte, selected_context, actual_context, high_nibble);
62        &data[index]
63    }
64    #[allow(unused_variables)]
65    #[inline(always)]
66    fn score_index(
67        stride_byte: u8,
68        selected_context: u8,
69        actual_context: usize,
70        high_nibble: Option<u8>,
71    ) -> usize {
72        let which = Self::which();
73        assert!(which < WhichPrior::NUM_PRIORS as usize);
74        assert!(actual_context < 256);
75        if let Some(nibble) = high_nibble {
76            WhichPrior::NUM_PRIORS as usize * (actual_context + 4096 + 256 * nibble as usize)
77                + which
78        } else {
79            WhichPrior::NUM_PRIORS as usize * (actual_context + 256 * (stride_byte >> 4) as usize)
80                + which
81        }
82    }
83    fn which() -> usize;
84}
85
86#[inline(always)]
87fn upper_score_index(stride_byte: u8, _selected_context: u8, actual_context: usize) -> usize {
88    actual_context + 256 * (stride_byte >> 4) as usize
89}
90#[inline(always)]
91fn lower_score_index(
92    _stride_byte: u8,
93    _selected_context: u8,
94    actual_context: usize,
95    high_nibble: u8,
96) -> usize {
97    debug_assert!(actual_context < 256);
98    debug_assert!(high_nibble < 16);
99    actual_context + 4096 + 256 * high_nibble as usize
100}
101
102#[allow(unused_variables)]
103#[inline(always)]
104fn stride_lookup_lin(
105    stride_byte: u8,
106    selected_context: u8,
107    actual_context: usize,
108    high_nibble: Option<u8>,
109) -> usize {
110    if let Some(nibble) = high_nibble {
111        1 + 2 * (actual_context | ((stride_byte as usize & 0x0f) << 8) | ((nibble as usize) << 12))
112    } else {
113        2 * (actual_context | ((stride_byte as usize) << 8))
114    }
115}
116pub struct Stride1Prior {}
117impl Stride1Prior {
118    #[inline(always)]
119    pub fn offset() -> usize {
120        0
121    }
122}
123
124impl Prior for Stride1Prior {
125    #[inline(always)]
126    fn lookup_lin(
127        stride_byte: u8,
128        selected_context: u8,
129        actual_context: usize,
130        high_nibble: Option<u8>,
131    ) -> usize {
132        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
133    }
134    #[inline(always)]
135    fn which() -> usize {
136        WhichPrior::STRIDE1 as usize
137    }
138}
139/*impl StridePrior for Stride1Prior {
140    const STRIDE_OFFSET:usize = 0;
141}*/
142pub struct Stride2Prior {}
143impl Stride2Prior {
144    #[inline(always)]
145    pub fn offset() -> usize {
146        1
147    }
148}
149
150impl Prior for Stride2Prior {
151    #[inline(always)]
152    fn lookup_lin(
153        stride_byte: u8,
154        selected_context: u8,
155        actual_context: usize,
156        high_nibble: Option<u8>,
157    ) -> usize {
158        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
159    }
160    #[inline]
161    fn which() -> usize {
162        WhichPrior::STRIDE2 as usize
163    }
164}
165/*impl StridePrior for Stride2Prior {
166    const STRIDE_OFFSET:usize = 1;
167}*/
168pub struct Stride3Prior {}
169impl Stride3Prior {
170    #[inline(always)]
171    pub fn offset() -> usize {
172        2
173    }
174}
175
176impl Prior for Stride3Prior {
177    #[inline(always)]
178    fn lookup_lin(
179        stride_byte: u8,
180        selected_context: u8,
181        actual_context: usize,
182        high_nibble: Option<u8>,
183    ) -> usize {
184        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
185    }
186    #[inline(always)]
187    fn which() -> usize {
188        WhichPrior::STRIDE3 as usize
189    }
190}
191
192/*impl StridePrior for Stride3Prior {
193    const STRIDE_OFFSET:usize = 2;
194}*/
195pub struct Stride4Prior {}
196impl Stride4Prior {
197    #[inline(always)]
198    pub fn offset() -> usize {
199        3
200    }
201}
202impl Prior for Stride4Prior {
203    #[inline(always)]
204    fn lookup_lin(
205        stride_byte: u8,
206        selected_context: u8,
207        actual_context: usize,
208        high_nibble: Option<u8>,
209    ) -> usize {
210        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
211    }
212    #[inline]
213    fn which() -> usize {
214        WhichPrior::STRIDE4 as usize
215    }
216}
217
218/*impl StridePrior for Stride4Prior {
219    const STRIDE_OFFSET:usize = 3;
220}*/
221/*pub struct Stride8Prior{
222}
223impl StridePrior for Stride8Prior {
224    const STRIDE_OFFSET:usize = 7;
225}
226impl Stride8Prior {
227    #[inline(always)]
228    pub fn offset() -> usize{
229        7
230    }
231}
232impl Prior for Stride8Prior {
233    fn lookup_lin(stride_byte:u8, selected_context:u8, actual_context:usize, high_nibble: Option<u8>) -> usize {
234        stride_lookup_lin(stride_byte, selected_context, actual_context, high_nibble)
235    }
236    #[inline]
237    fn which() -> usize {
238      WhichPrior::STRIDE8 as usize
239    }
240}
241*/
242pub struct CMPrior {}
243impl Prior for CMPrior {
244    #[allow(unused_variables)]
245    #[inline(always)]
246    fn lookup_lin(
247        stride_byte: u8,
248        selected_context: u8,
249        actual_context: usize,
250        high_nibble: Option<u8>,
251    ) -> usize {
252        if let Some(nibble) = high_nibble {
253            (nibble as usize + 1) + 17 * actual_context
254        } else {
255            17 * actual_context
256        }
257    }
258    #[inline(always)]
259    fn which() -> usize {
260        WhichPrior::CM as usize
261    }
262}
263pub struct FastCMPrior {}
264impl Prior for FastCMPrior {
265    #[allow(unused_variables)]
266    #[inline(always)]
267    fn lookup_lin(
268        stride_byte: u8,
269        selected_context: u8,
270        actual_context: usize,
271        high_nibble: Option<u8>,
272    ) -> usize {
273        if let Some(nibble) = high_nibble {
274            2 * actual_context
275        } else {
276            2 * actual_context + 1
277        }
278    }
279    #[inline(always)]
280    fn which() -> usize {
281        WhichPrior::FAST_CM as usize
282    }
283}
284
285pub struct SlowCMPrior {}
286impl Prior for SlowCMPrior {
287    #[allow(unused_variables)]
288    #[inline(always)]
289    fn lookup_lin(
290        stride_byte: u8,
291        selected_context: u8,
292        actual_context: usize,
293        high_nibble: Option<u8>,
294    ) -> usize {
295        if let Some(nibble) = high_nibble {
296            (nibble as usize + 1) + 17 * actual_context
297        } else {
298            17 * actual_context
299        }
300    }
301    #[inline]
302    fn which() -> usize {
303        WhichPrior::SLOW_CM as usize
304    }
305}
306
307pub struct AdvPrior {}
308impl Prior for AdvPrior {
309    #[allow(unused_variables)]
310    #[inline(always)]
311    fn lookup_lin(
312        stride_byte: u8,
313        selected_context: u8,
314        actual_context: usize,
315        high_nibble: Option<u8>,
316    ) -> usize {
317        if let Some(nibble) = high_nibble {
318            65536
319                + (actual_context | ((stride_byte as usize) << 8) | ((nibble as usize & 0xf) << 16))
320        } else {
321            actual_context | ((stride_byte as usize & 0xf0) << 8)
322        }
323    }
324    #[inline(always)]
325    fn which() -> usize {
326        WhichPrior::ADV as usize
327    }
328}
329
330pub struct CDF<'a> {
331    cdf: &'a mut s16,
332}
333
334impl<'a> CDF<'a> {
335    #[inline(always)]
336    pub fn cost(&self, nibble_u8: u8) -> floatX {
337        let nibble = nibble_u8 as usize & 0xf;
338        let mut pdf = self.cdf[nibble];
339        if nibble_u8 != 0 {
340            pdf -= self.cdf[(nibble - 1)];
341        }
342        FastLog2u16(self.cdf[15] as u16) - FastLog2u16(pdf as u16)
343    }
344    #[inline(always)]
345    pub fn update(&mut self, nibble_u8: u8, speed: (u16, u16)) {
346        let mut cdf = *self.cdf;
347        let increment_v = s16::splat(speed.0 as i16);
348        let one_to_16 = s16::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
349        let mask_v: s16 = one_to_16.simd_gt(s16::splat(i16::from(nibble_u8))).to_int();
350        cdf = cdf + (increment_v & mask_v);
351        if cdf[15] >= speed.1 as i16 {
352            let cdf_bias = one_to_16;
353            cdf = cdf + cdf_bias - ((cdf + cdf_bias) >> 2);
354        }
355        *self.cdf = cdf;
356    }
357}
358
359impl<'a> From<&'a mut s16> for CDF<'a> {
360    #[inline(always)]
361    fn from(cdf: &'a mut s16) -> CDF<'a> {
362        CDF { cdf }
363    }
364}
365
366pub fn init_cdfs(cdfs: &mut [s16]) {
367    for item in cdfs.iter_mut() {
368        *item = s16::from([4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64]);
369    }
370}
371
372pub struct PriorEval<
373    'a,
374    Alloc: alloc::Allocator<s16> + alloc::Allocator<u32> + alloc::Allocator<v8>,
375> {
376    input: InputPair<'a>,
377    context_map: interface::PredictionModeContextMap<InputReferenceMut<'a>>,
378    block_type: u8,
379    local_byte_offset: usize,
380    _nop: <Alloc as Allocator<u32>>::AllocatedMemory,
381    cm_priors: <Alloc as Allocator<s16>>::AllocatedMemory,
382    slow_cm_priors: <Alloc as Allocator<s16>>::AllocatedMemory,
383    fast_cm_priors: <Alloc as Allocator<s16>>::AllocatedMemory,
384    stride_priors: [<Alloc as Allocator<s16>>::AllocatedMemory; 4],
385    adv_priors: <Alloc as Allocator<s16>>::AllocatedMemory,
386    _stride_pyramid_leaves: [u8; find_stride::NUM_LEAF_NODES],
387    score: <Alloc as Allocator<v8>>::AllocatedMemory,
388    cm_speed: [(u16, u16); 2],
389    stride_speed: [(u16, u16); 2],
390    cur_stride: u8,
391}
392
393impl<'a, Alloc: alloc::Allocator<s16> + alloc::Allocator<u32> + alloc::Allocator<v8>>
394    PriorEval<'a, Alloc>
395{
396    pub fn new(
397        alloc: &mut Alloc,
398        input: InputPair<'a>,
399        stride: [u8; find_stride::NUM_LEAF_NODES],
400        prediction_mode: interface::PredictionModeContextMap<InputReferenceMut<'a>>,
401        params: &BrotliEncoderParams,
402    ) -> Self {
403        let do_alloc = params.prior_bitmask_detection != 0;
404        let mut cm_speed = prediction_mode.context_map_speed();
405        let mut stride_speed = prediction_mode.stride_context_speed();
406        if cm_speed[0] == (0, 0) {
407            cm_speed[0] = params.literal_adaptation[2]
408        }
409        if cm_speed[0] == (0, 0) {
410            cm_speed[0] = DEFAULT_SPEED;
411        }
412        if cm_speed[1] == (0, 0) {
413            cm_speed[1] = params.literal_adaptation[3]
414        }
415        if cm_speed[1] == (0, 0) {
416            cm_speed[1] = cm_speed[0];
417        }
418        if stride_speed[0] == (0, 0) {
419            stride_speed[0] = params.literal_adaptation[0]
420        }
421        if stride_speed[0] == (0, 0) {
422            stride_speed[0] = DEFAULT_SPEED;
423        }
424        if stride_speed[1] == (0, 0) {
425            stride_speed[1] = params.literal_adaptation[1]
426        }
427        if stride_speed[1] == (0, 0) {
428            stride_speed[1] = stride_speed[0];
429        }
430        let mut ret = PriorEval::<Alloc> {
431            input,
432            context_map: prediction_mode,
433            block_type: 0,
434            cur_stride: 1,
435            local_byte_offset: 0,
436            _nop: alloc_default::<u32, Alloc>(),
437            cm_priors: alloc_if::<s16, _>(do_alloc, alloc, CONTEXT_MAP_PRIOR_SIZE),
438            slow_cm_priors: alloc_if::<s16, _>(do_alloc, alloc, CONTEXT_MAP_PRIOR_SIZE),
439            fast_cm_priors: alloc_if::<s16, _>(do_alloc, alloc, CONTEXT_MAP_PRIOR_SIZE),
440            stride_priors: [
441                alloc_if::<s16, _>(do_alloc, alloc, STRIDE_PRIOR_SIZE),
442                alloc_if::<s16, _>(do_alloc, alloc, STRIDE_PRIOR_SIZE),
443                alloc_if::<s16, _>(do_alloc, alloc, STRIDE_PRIOR_SIZE),
444                alloc_if::<s16, _>(do_alloc, alloc, STRIDE_PRIOR_SIZE),
445                /*if do_alloc {m16x16.alloc_cell(STRIDE_PRIOR_SIZE)} else {
446                Alloc16x16::AllocatedMemory::default()},*/
447            ],
448            adv_priors: alloc_if::<s16, _>(do_alloc, alloc, ADV_PRIOR_SIZE),
449            _stride_pyramid_leaves: stride,
450            score: alloc_if::<v8, _>(do_alloc, alloc, 8192),
451            cm_speed,
452            stride_speed,
453        };
454        init_cdfs(ret.cm_priors.slice_mut());
455        init_cdfs(ret.slow_cm_priors.slice_mut());
456        init_cdfs(ret.fast_cm_priors.slice_mut());
457        init_cdfs(ret.stride_priors[0].slice_mut());
458        init_cdfs(ret.stride_priors[1].slice_mut());
459        init_cdfs(ret.stride_priors[2].slice_mut());
460        init_cdfs(ret.stride_priors[3].slice_mut());
461        //init_cdfs(ret.stride_priors[4].slice_mut());
462        init_cdfs(ret.adv_priors.slice_mut());
463        ret
464    }
465    pub fn choose_bitmask(&mut self) {
466        let epsilon = 6.0;
467        let mut max_popularity = 0u32;
468        let mut max_popularity_index = 0u8;
469        assert_eq!(WhichPrior::NUM_PRIORS as usize, 8);
470        let mut popularity = [0u32; 8];
471        let mut bitmask = [0u8; super::interface::NUM_MIXING_VALUES];
472        for (i, score) in self.score.slice().iter().enumerate() {
473            let cm_score = score[WhichPrior::CM as usize];
474            let slow_cm_score = score[WhichPrior::SLOW_CM as usize];
475            let fast_cm_score = score[WhichPrior::FAST_CM as usize] + 16.0;
476            let stride1_score = score[WhichPrior::STRIDE1 as usize];
477            let stride2_score = score[WhichPrior::STRIDE2 as usize];
478            let stride3_score = score[WhichPrior::STRIDE3 as usize] + 16.0;
479            let stride4_score = score[WhichPrior::STRIDE4 as usize];
480            //let stride8_score = score[WhichPrior::STRIDE8] * 1.125 + 16.0;
481            let stride8_score = stride4_score + 1.0; // FIXME: never lowest -- ignore stride 8
482            let stride_score = min(
483                stride1_score as u64,
484                min(
485                    stride2_score as u64,
486                    min(
487                        stride3_score as u64,
488                        min(stride4_score as u64, stride8_score as u64),
489                    ),
490                ),
491            );
492
493            let adv_score = score[WhichPrior::ADV as usize];
494            if adv_score + epsilon < (stride_score as floatX)
495                && adv_score + epsilon < cm_score
496                && adv_score + epsilon < slow_cm_score
497                && adv_score + epsilon < fast_cm_score
498            {
499                bitmask[i] = 1;
500            } else if slow_cm_score + epsilon < (stride_score as floatX)
501                && slow_cm_score + epsilon < cm_score
502                && slow_cm_score + epsilon < fast_cm_score
503            {
504                bitmask[i] = 2;
505            } else if fast_cm_score + epsilon < (stride_score as floatX)
506                && fast_cm_score + epsilon < cm_score
507            {
508                bitmask[i] = 3;
509            } else if epsilon + (stride_score as floatX) < cm_score {
510                bitmask[i] = WhichPrior::STRIDE1 as u8;
511                if stride_score == stride8_score as u64 {
512                    //bitmask[i] = WhichPrior::STRIDE8 as u8;
513                }
514                if stride_score == stride4_score as u64 {
515                    bitmask[i] = WhichPrior::STRIDE4 as u8;
516                }
517                if stride_score == stride3_score as u64 {
518                    bitmask[i] = WhichPrior::STRIDE3 as u8;
519                }
520                if stride_score == stride2_score as u64 {
521                    bitmask[i] = WhichPrior::STRIDE2 as u8;
522                }
523                if stride_score == stride1_score as u64 {
524                    bitmask[i] = WhichPrior::STRIDE1 as u8;
525                }
526            } else {
527                bitmask[i] = 0;
528            }
529            if stride_score == 0 {
530                bitmask[i] = max_popularity_index;
531                //eprintln!("Miss {}[{}] ~ {}", bitmask[i], i, max_popularity_index);
532            } else {
533                popularity[bitmask[i] as usize] += 1;
534                if popularity[bitmask[i] as usize] > max_popularity {
535                    max_popularity = popularity[bitmask[i] as usize];
536                    max_popularity_index = bitmask[i];
537                }
538                //eprintln!("Score {} {} {} {} {}: {}[{}] max={},{}", cm_score, adv_score, slow_cm_score, fast_cm_score, stride_score, bitmask[i], i, max_popularity, max_popularity_index);
539            }
540        }
541        self.context_map.set_mixing_values(&bitmask);
542    }
543    pub fn free(&mut self, alloc: &mut Alloc) {
544        <Alloc as Allocator<v8>>::free_cell(alloc, core::mem::take(&mut self.score));
545        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.cm_priors));
546        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.slow_cm_priors));
547        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.fast_cm_priors));
548        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.stride_priors[0]));
549        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.stride_priors[1]));
550        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.stride_priors[2]));
551        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.stride_priors[3]));
552        //<Alloc as Allocator<s16>>::free_cell(alloc, core::mem::replace(&mut self.stride_priors[4], alloc_default::<s16, Alloc>()));
553        <Alloc as Allocator<s16>>::free_cell(alloc, core::mem::take(&mut self.adv_priors));
554    }
555
556    pub fn take_prediction_mode(
557        &mut self,
558    ) -> interface::PredictionModeContextMap<InputReferenceMut<'a>> {
559        core::mem::replace(
560            &mut self.context_map,
561            interface::PredictionModeContextMap::<InputReferenceMut<'a>> {
562                literal_context_map: InputReferenceMut::default(),
563                predmode_speed_and_distance_context_map: InputReferenceMut::default(),
564            },
565        )
566    }
567    fn update_cost_base(
568        &mut self,
569        stride_prior: [u8; 8],
570        stride_prior_offset: usize,
571        selected_bits: u8,
572        cm_prior: usize,
573        literal: u8,
574    ) {
575        let mut l_score = v8::splat(0.0);
576        let mut h_score = v8::splat(0.0);
577        let base_stride_prior =
578            stride_prior[stride_prior_offset.wrapping_sub(self.cur_stride as usize) & 7];
579        let hscore_index = upper_score_index(base_stride_prior, selected_bits, cm_prior);
580        let lscore_index =
581            lower_score_index(base_stride_prior, selected_bits, cm_prior, literal >> 4);
582        {
583            type CurPrior = CMPrior;
584            let mut cdf = CurPrior::lookup_mut(
585                self.cm_priors.slice_mut(),
586                base_stride_prior,
587                selected_bits,
588                cm_prior,
589                None,
590            );
591            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
592            cdf.update(literal >> 4, self.cm_speed[1]);
593        }
594        {
595            type CurPrior = CMPrior;
596            let mut cdf = CurPrior::lookup_mut(
597                self.cm_priors.slice_mut(),
598                base_stride_prior,
599                selected_bits,
600                cm_prior,
601                Some(literal >> 4),
602            );
603            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
604            cdf.update(literal & 0xf, self.cm_speed[0]);
605        }
606        {
607            type CurPrior = SlowCMPrior;
608            let mut cdf = CurPrior::lookup_mut(
609                self.slow_cm_priors.slice_mut(),
610                base_stride_prior,
611                selected_bits,
612                cm_prior,
613                None,
614            );
615            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
616            cdf.update(literal >> 4, (0, 1024));
617        }
618        {
619            type CurPrior = SlowCMPrior;
620            let mut cdf = CurPrior::lookup_mut(
621                self.slow_cm_priors.slice_mut(),
622                base_stride_prior,
623                selected_bits,
624                cm_prior,
625                Some(literal >> 4),
626            );
627            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
628            cdf.update(literal & 0xf, (0, 1024));
629        }
630        {
631            type CurPrior = FastCMPrior;
632            let mut cdf = CurPrior::lookup_mut(
633                self.fast_cm_priors.slice_mut(),
634                base_stride_prior,
635                selected_bits,
636                cm_prior,
637                None,
638            );
639            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
640            cdf.update(literal >> 4, self.cm_speed[0]);
641        }
642        {
643            type CurPrior = FastCMPrior;
644            let mut cdf = CurPrior::lookup_mut(
645                self.fast_cm_priors.slice_mut(),
646                base_stride_prior,
647                selected_bits,
648                cm_prior,
649                Some(literal >> 4),
650            );
651            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
652            cdf.update(literal & 0xf, self.cm_speed[0]);
653        }
654        {
655            type CurPrior = Stride1Prior;
656            let mut cdf = CurPrior::lookup_mut(
657                self.stride_priors[0].slice_mut(),
658                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
659                selected_bits,
660                cm_prior,
661                None,
662            );
663            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
664            cdf.update(literal >> 4, self.stride_speed[1]);
665        }
666        {
667            type CurPrior = Stride1Prior;
668            let mut cdf = CurPrior::lookup_mut(
669                self.stride_priors[0].slice_mut(),
670                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
671                selected_bits,
672                cm_prior,
673                Some(literal >> 4),
674            );
675            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
676            cdf.update(literal & 0xf, self.stride_speed[0]);
677        }
678        {
679            type CurPrior = Stride2Prior;
680            let mut cdf = CurPrior::lookup_mut(
681                self.stride_priors[1].slice_mut(),
682                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
683                selected_bits,
684                cm_prior,
685                None,
686            );
687            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
688            cdf.update(literal >> 4, self.stride_speed[1]);
689        }
690        {
691            type CurPrior = Stride2Prior;
692            let mut cdf = CurPrior::lookup_mut(
693                self.stride_priors[1].slice_mut(),
694                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
695                selected_bits,
696                cm_prior,
697                Some(literal >> 4),
698            );
699            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
700            cdf.update(literal & 0xf, self.stride_speed[0]);
701        }
702        {
703            type CurPrior = Stride3Prior;
704            let mut cdf = CurPrior::lookup_mut(
705                self.stride_priors[2].slice_mut(),
706                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
707                selected_bits,
708                cm_prior,
709                None,
710            );
711            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
712            cdf.update(literal >> 4, self.stride_speed[1]);
713        }
714        {
715            type CurPrior = Stride3Prior;
716            let mut cdf = CurPrior::lookup_mut(
717                self.stride_priors[2].slice_mut(),
718                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
719                selected_bits,
720                cm_prior,
721                Some(literal >> 4),
722            );
723            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
724            cdf.update(literal & 0xf, self.stride_speed[0]);
725        }
726        {
727            type CurPrior = Stride4Prior;
728            let mut cdf = CurPrior::lookup_mut(
729                self.stride_priors[3].slice_mut(),
730                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
731                selected_bits,
732                cm_prior,
733                None,
734            );
735            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
736            cdf.update(literal >> 4, self.stride_speed[1]);
737        }
738        {
739            type CurPrior = Stride4Prior;
740            let mut cdf = CurPrior::lookup_mut(
741                self.stride_priors[3].slice_mut(),
742                stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
743                selected_bits,
744                cm_prior,
745                Some(literal >> 4),
746            );
747            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
748            cdf.update(literal & 0xf, self.stride_speed[0]);
749        }
750        /*       {
751                   type CurPrior = Stride8Prior;
752                   let mut cdf = CurPrior::lookup_mut(self.stride_priors[4].slice_mut(),
753                                                      stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset())&7], selected_bits, cm_prior, None);
754                   h_score[CurPrior::which()] = cdf.cost(literal>>4);
755                   cdf.update(literal >> 4, self.stride_speed[1]);
756               }
757               {
758                   type CurPrior = Stride8Prior;
759                   let mut cdf = CurPrior::lookup_mut(self.stride_priors[4].slice_mut(),
760                                                      stride_prior[stride_prior_offset.wrapping_sub(CurPrior::offset()) & 7],
761                                                      selected_bits,
762                                                      cm_prior,
763                                                      Some(literal >> 4));
764                   l_score[CurPrior::which()] = cdf.cost(literal&0xf);
765                   cdf.update(literal&0xf, self.stride_speed[0]);
766               }
767        */
768        type CurPrior = AdvPrior;
769        {
770            let mut cdf = CurPrior::lookup_mut(
771                self.adv_priors.slice_mut(),
772                base_stride_prior,
773                selected_bits,
774                cm_prior,
775                None,
776            );
777            h_score[CurPrior::which()] = cdf.cost(literal >> 4);
778            cdf.update(literal >> 4, self.stride_speed[1]);
779        }
780        {
781            let mut cdf = CurPrior::lookup_mut(
782                self.adv_priors.slice_mut(),
783                base_stride_prior,
784                selected_bits,
785                cm_prior,
786                Some(literal >> 4),
787            );
788            l_score[CurPrior::which()] = cdf.cost(literal & 0xf);
789            cdf.update(literal & 0xf, self.stride_speed[0]);
790        }
791        self.score.slice_mut()[lscore_index] += l_score;
792        self.score.slice_mut()[hscore_index] += h_score;
793    }
794}
795impl<'a, Alloc: alloc::Allocator<s16> + alloc::Allocator<u32> + alloc::Allocator<v8>> IRInterpreter
796    for PriorEval<'a, Alloc>
797{
798    #[inline]
799    fn inc_local_byte_offset(&mut self, inc: usize) {
800        self.local_byte_offset += inc;
801    }
802    #[inline]
803    fn local_byte_offset(&self) -> usize {
804        self.local_byte_offset
805    }
806    #[inline]
807    fn update_block_type(&mut self, new_type: u8, stride: u8) {
808        self.block_type = new_type;
809        self.cur_stride = stride;
810    }
811    #[inline]
812    fn block_type(&self) -> u8 {
813        self.block_type
814    }
815    #[inline]
816    fn literal_data_at_offset(&self, index: usize) -> u8 {
817        self.input[index]
818    }
819    #[inline]
820    fn literal_context_map(&self) -> &[u8] {
821        self.context_map.literal_context_map.slice()
822    }
823    #[inline]
824    fn prediction_mode(&self) -> crate::interface::LiteralPredictionModeNibble {
825        self.context_map.literal_prediction_mode()
826    }
827    #[inline]
828    fn update_cost(
829        &mut self,
830        stride_prior: [u8; 8],
831        stride_prior_offset: usize,
832        selected_bits: u8,
833        cm_prior: usize,
834        literal: u8,
835    ) {
836        //let stride = self.cur_stride as usize;
837        self.update_cost_base(
838            stride_prior,
839            stride_prior_offset,
840            selected_bits,
841            cm_prior,
842            literal,
843        )
844    }
845}
846
847impl<'a, 'b, Alloc: alloc::Allocator<s16> + alloc::Allocator<u32> + alloc::Allocator<v8>>
848    interface::CommandProcessor<'b> for PriorEval<'a, Alloc>
849{
850    #[inline]
851    fn push(&mut self, val: interface::Command<InputReference<'b>>) {
852        push_base(self, val)
853    }
854}