1use std::hash::{BuildHasher, Hash, Hasher};
19use std::marker::PhantomData;
20
21use differential_dataflow::consolidation::ConsolidatingContainerBuilder;
22use differential_dataflow::containers::{Columnation, TimelyStack};
23use differential_dataflow::difference::{Multiply, Semigroup};
24use differential_dataflow::lattice::Lattice;
25use differential_dataflow::trace::{Batcher, Builder, Description};
26use differential_dataflow::{AsCollection, Collection, Hashable, VecCollection};
27use timely::container::{DrainContainer, PushInto};
28use timely::dataflow::channels::pact::{Exchange, ParallelizationContract, Pipeline};
29use timely::dataflow::operators::Capability;
30use timely::dataflow::operators::generic::builder_rc::{
31 OperatorBuilder as OperatorBuilderRc, OperatorBuilder,
32};
33use timely::dataflow::operators::generic::operator::{self, Operator};
34use timely::dataflow::operators::generic::{
35 InputHandleCore, OperatorInfo, OutputBuilder, OutputBuilderSession,
36};
37use timely::dataflow::{Scope, Stream, StreamCore};
38use timely::progress::{Antichain, Timestamp};
39use timely::{Container, ContainerBuilder, Data, PartialOrder};
40
41pub trait StreamExt<G, C1>
43where
44 C1: Container + DrainContainer,
45 G: Scope,
46{
47 fn unary_fallible<DCB, ECB, B, P>(
58 &self,
59 pact: P,
60 name: &str,
61 constructor: B,
62 ) -> (StreamCore<G, DCB::Container>, StreamCore<G, ECB::Container>)
63 where
64 DCB: ContainerBuilder,
65 ECB: ContainerBuilder,
66 B: FnOnce(
67 Capability<G::Timestamp>,
68 OperatorInfo,
69 ) -> Box<
70 dyn FnMut(
71 &mut InputHandleCore<G::Timestamp, C1, P::Puller>,
72 &mut OutputBuilderSession<'_, G::Timestamp, DCB>,
73 &mut OutputBuilderSession<'_, G::Timestamp, ECB>,
74 ) + 'static,
75 >,
76 P: ParallelizationContract<G::Timestamp, C1>;
77
78 fn flat_map_fallible<DCB, ECB, D2, E, I, L>(
83 &self,
84 name: &str,
85 logic: L,
86 ) -> (StreamCore<G, DCB::Container>, StreamCore<G, ECB::Container>)
87 where
88 DCB: ContainerBuilder + PushInto<D2>,
89 ECB: ContainerBuilder + PushInto<E>,
90 I: IntoIterator<Item = Result<D2, E>>,
91 L: for<'a> FnMut(C1::Item<'a>) -> I + 'static;
92
93 fn expire_stream_at(&self, name: &str, expiration: G::Timestamp) -> StreamCore<G, C1>;
95}
96
97pub trait CollectionExt<G, D1, R>
99where
100 G: Scope,
101 R: Semigroup,
102{
103 fn empty(scope: &G) -> VecCollection<G, D1, R>;
105
106 fn map_fallible<DCB, ECB, D2, E, L>(
115 &self,
116 name: &str,
117 mut logic: L,
118 ) -> (VecCollection<G, D2, R>, VecCollection<G, E, R>)
119 where
120 DCB: ContainerBuilder<Container = Vec<(D2, G::Timestamp, R)>>
121 + PushInto<(D2, G::Timestamp, R)>,
122 ECB: ContainerBuilder<Container = Vec<(E, G::Timestamp, R)>>
123 + PushInto<(E, G::Timestamp, R)>,
124 D2: Data,
125 E: Data,
126 L: FnMut(D1) -> Result<D2, E> + 'static,
127 {
128 self.flat_map_fallible::<DCB, ECB, _, _, _, _>(name, move |record| Some(logic(record)))
129 }
130
131 fn flat_map_fallible<DCB, ECB, D2, E, I, L>(
136 &self,
137 name: &str,
138 logic: L,
139 ) -> (Collection<G, DCB::Container>, Collection<G, ECB::Container>)
140 where
141 DCB: ContainerBuilder + PushInto<(D2, G::Timestamp, R)>,
142 ECB: ContainerBuilder + PushInto<(E, G::Timestamp, R)>,
143 D2: Data,
144 E: Data,
145 I: IntoIterator<Item = Result<D2, E>>,
146 L: FnMut(D1) -> I + 'static;
147
148 fn expire_collection_at(&self, name: &str, expiration: G::Timestamp)
150 -> VecCollection<G, D1, R>;
151
152 fn explode_one<D2, R2, L>(&self, logic: L) -> VecCollection<G, D2, <R2 as Multiply<R>>::Output>
157 where
158 D2: differential_dataflow::Data,
159 R2: Semigroup + Multiply<R>,
160 <R2 as Multiply<R>>::Output: Data + Semigroup,
161 L: FnMut(D1) -> (D2, R2) + 'static,
162 G::Timestamp: Lattice;
163
164 fn ensure_monotonic<E, IE>(
169 &self,
170 into_err: IE,
171 ) -> (VecCollection<G, D1, R>, VecCollection<G, E, R>)
172 where
173 E: Data,
174 IE: Fn(D1, R) -> (E, R) + 'static,
175 R: num_traits::sign::Signed;
176
177 fn consolidate_named_if<Ba>(self, must_consolidate: bool, name: &str) -> Self
180 where
181 D1: differential_dataflow::ExchangeData + Hash + Columnation,
182 R: Semigroup + differential_dataflow::ExchangeData + Columnation,
183 G::Timestamp: Lattice + Columnation,
184 Ba: Batcher<
185 Input = Vec<((D1, ()), G::Timestamp, R)>,
186 Output = TimelyStack<((D1, ()), G::Timestamp, R)>,
187 Time = G::Timestamp,
188 > + 'static;
189
190 fn consolidate_named<Ba>(self, name: &str) -> Self
192 where
193 D1: differential_dataflow::ExchangeData + Hash + Columnation,
194 R: Semigroup + differential_dataflow::ExchangeData + Columnation,
195 G::Timestamp: Lattice + Columnation,
196 Ba: Batcher<
197 Input = Vec<((D1, ()), G::Timestamp, R)>,
198 Output = TimelyStack<((D1, ()), G::Timestamp, R)>,
199 Time = G::Timestamp,
200 > + 'static;
201}
202
203impl<G, C1> StreamExt<G, C1> for StreamCore<G, C1>
204where
205 C1: Container + DrainContainer,
206 G: Scope,
207{
208 fn unary_fallible<DCB, ECB, B, P>(
209 &self,
210 pact: P,
211 name: &str,
212 constructor: B,
213 ) -> (StreamCore<G, DCB::Container>, StreamCore<G, ECB::Container>)
214 where
215 DCB: ContainerBuilder,
216 ECB: ContainerBuilder,
217 B: FnOnce(
218 Capability<G::Timestamp>,
219 OperatorInfo,
220 ) -> Box<
221 dyn FnMut(
222 &mut InputHandleCore<G::Timestamp, C1, P::Puller>,
223 &mut OutputBuilderSession<'_, G::Timestamp, DCB>,
224 &mut OutputBuilderSession<'_, G::Timestamp, ECB>,
225 ) + 'static,
226 >,
227 P: ParallelizationContract<G::Timestamp, C1>,
228 {
229 let mut builder = OperatorBuilderRc::new(name.into(), self.scope());
230 builder.set_notify(false);
231
232 let operator_info = builder.operator_info();
233
234 let mut input = builder.new_input(self, pact);
235 let (ok_output, ok_stream) = builder.new_output();
236 let mut ok_output = OutputBuilder::from(ok_output);
237 let (err_output, err_stream) = builder.new_output();
238 let mut err_output = OutputBuilder::from(err_output);
239
240 builder.build(move |mut capabilities| {
241 let capability = capabilities.pop().unwrap();
243 let mut logic = constructor(capability, operator_info);
244 move |_frontiers| {
245 let mut ok_output_handle = ok_output.activate();
246 let mut err_output_handle = err_output.activate();
247 logic(&mut input, &mut ok_output_handle, &mut err_output_handle);
248 }
249 });
250
251 (ok_stream, err_stream)
252 }
253
254 #[allow(clippy::redundant_closure)]
260 fn flat_map_fallible<DCB, ECB, D2, E, I, L>(
261 &self,
262 name: &str,
263 mut logic: L,
264 ) -> (StreamCore<G, DCB::Container>, StreamCore<G, ECB::Container>)
265 where
266 DCB: ContainerBuilder + PushInto<D2>,
267 ECB: ContainerBuilder + PushInto<E>,
268 I: IntoIterator<Item = Result<D2, E>>,
269 L: for<'a> FnMut(C1::Item<'a>) -> I + 'static,
270 {
271 self.unary_fallible::<DCB, ECB, _, _>(Pipeline, name, move |_, _| {
272 Box::new(move |input, ok_output, err_output| {
273 input.for_each_time(|time, data| {
274 let mut ok_session = ok_output.session_with_builder(&time);
275 let mut err_session = err_output.session_with_builder(&time);
276 for r in data
277 .flat_map(DrainContainer::drain)
278 .flat_map(|d1| logic(d1))
279 {
280 match r {
281 Ok(d2) => ok_session.give(d2),
282 Err(e) => err_session.give(e),
283 }
284 }
285 })
286 })
287 })
288 }
289
290 fn expire_stream_at(&self, name: &str, expiration: G::Timestamp) -> StreamCore<G, C1> {
291 let name = format!("expire_stream_at({name})");
292 self.unary_frontier(Pipeline, &name.clone(), move |cap, _| {
293 let cap = Some(cap.delayed(&expiration));
297 let mut warned = false;
298 move |(input, frontier), output| {
299 let _ = ∩
300 let frontier = frontier.frontier();
301 if !frontier.less_than(&expiration) && !warned {
302 tracing::warn!(
310 name = name,
311 frontier = ?frontier,
312 expiration = ?expiration,
313 "frontier not less than expiration"
314 );
315 warned = true;
316 }
317 input.for_each(|time, data| {
318 let mut session = output.session(&time);
319 session.give_container(data);
320 });
321 }
322 })
323 }
324}
325
326impl<G, D1, R> CollectionExt<G, D1, R> for VecCollection<G, D1, R>
327where
328 G: Scope,
329 G::Timestamp: Data,
330 D1: Data,
331 R: Semigroup + 'static,
332{
333 fn empty(scope: &G) -> VecCollection<G, D1, R> {
334 operator::empty(scope).as_collection()
335 }
336
337 fn flat_map_fallible<DCB, ECB, D2, E, I, L>(
338 &self,
339 name: &str,
340 mut logic: L,
341 ) -> (Collection<G, DCB::Container>, Collection<G, ECB::Container>)
342 where
343 DCB: ContainerBuilder + PushInto<(D2, G::Timestamp, R)>,
344 ECB: ContainerBuilder + PushInto<(E, G::Timestamp, R)>,
345 D2: Data,
346 E: Data,
347 I: IntoIterator<Item = Result<D2, E>>,
348 L: FnMut(D1) -> I + 'static,
349 {
350 let (ok_stream, err_stream) =
351 self.inner
352 .flat_map_fallible::<DCB, ECB, _, _, _, _>(name, move |(d1, t, r)| {
353 logic(d1).into_iter().map(move |res| match res {
354 Ok(d2) => Ok((d2, t.clone(), r.clone())),
355 Err(e) => Err((e, t.clone(), r.clone())),
356 })
357 });
358 (ok_stream.as_collection(), err_stream.as_collection())
359 }
360
361 fn expire_collection_at(
362 &self,
363 name: &str,
364 expiration: G::Timestamp,
365 ) -> VecCollection<G, D1, R> {
366 self.inner
367 .expire_stream_at(name, expiration)
368 .as_collection()
369 }
370
371 fn explode_one<D2, R2, L>(
372 &self,
373 mut logic: L,
374 ) -> VecCollection<G, D2, <R2 as Multiply<R>>::Output>
375 where
376 D2: differential_dataflow::Data,
377 R2: Semigroup + Multiply<R>,
378 <R2 as Multiply<R>>::Output: Data + Semigroup,
379 L: FnMut(D1) -> (D2, R2) + 'static,
380 G::Timestamp: Lattice,
381 {
382 self.inner
383 .unary::<ConsolidatingContainerBuilder<_>, _, _, _>(
384 Pipeline,
385 "ExplodeOne",
386 move |_, _| {
387 move |input, output| {
388 input.for_each(|time, data| {
389 output
390 .session_with_builder(&time)
391 .give_iterator(data.drain(..).map(|(x, t, d)| {
392 let (x, d2) = logic(x);
393 (x, t, d2.multiply(&d))
394 }));
395 });
396 }
397 },
398 )
399 .as_collection()
400 }
401
402 fn ensure_monotonic<E, IE>(
403 &self,
404 into_err: IE,
405 ) -> (VecCollection<G, D1, R>, VecCollection<G, E, R>)
406 where
407 E: Data,
408 IE: Fn(D1, R) -> (E, R) + 'static,
409 R: num_traits::sign::Signed,
410 {
411 let (oks, errs) = self
412 .inner
413 .unary_fallible(Pipeline, "EnsureMonotonic", move |_, _| {
414 Box::new(move |input, ok_output, err_output| {
415 input.for_each(|time, data| {
416 let mut ok_session = ok_output.session(&time);
417 let mut err_session = err_output.session(&time);
418 for (x, t, d) in data.drain(..) {
419 if d.is_positive() {
420 ok_session.give((x, t, d))
421 } else {
422 let (e, d2) = into_err(x, d);
423 err_session.give((e, t, d2))
424 }
425 }
426 })
427 })
428 });
429 (oks.as_collection(), errs.as_collection())
430 }
431
432 fn consolidate_named_if<Ba>(self, must_consolidate: bool, name: &str) -> Self
433 where
434 D1: differential_dataflow::ExchangeData + Hash + Columnation,
435 R: Semigroup + differential_dataflow::ExchangeData + Columnation,
436 G::Timestamp: Lattice + Ord + Columnation,
437 Ba: Batcher<
438 Input = Vec<((D1, ()), G::Timestamp, R)>,
439 Output = TimelyStack<((D1, ()), G::Timestamp, R)>,
440 Time = G::Timestamp,
441 > + 'static,
442 {
443 if must_consolidate {
444 let random_state = ahash::RandomState::with_seeds(
462 0x243f_6a88_85a3_08d3,
463 0x1319_8a2e_0370_7344,
464 0xa409_3822_299f_31d0,
465 0x082e_fa98_ec4e_6c89,
466 );
467 let exchange = Exchange::new(move |update: &((D1, _), G::Timestamp, R)| {
468 let data = &(update.0).0;
469 let mut h = random_state.build_hasher();
470 data.hash(&mut h);
471 h.finish()
472 });
473 consolidate_pact::<Ba, _, _>(&self.map(|k| (k, ())).inner, exchange, name)
474 .unary(Pipeline, "unpack consolidated", |_, _| {
475 |input, output| {
476 input.for_each(|time, data| {
477 let mut session = output.session(&time);
478 for ((k, ()), t, d) in
479 data.iter().flatten().flat_map(|chunk| chunk.iter())
480 {
481 session.give((k.clone(), t.clone(), d.clone()))
482 }
483 })
484 }
485 })
486 .as_collection()
487 } else {
488 self
489 }
490 }
491
492 fn consolidate_named<Ba>(self, name: &str) -> Self
493 where
494 D1: differential_dataflow::ExchangeData + Hash + Columnation,
495 R: Semigroup + differential_dataflow::ExchangeData + Columnation,
496 G::Timestamp: Lattice + Ord + Columnation,
497 Ba: Batcher<
498 Input = Vec<((D1, ()), G::Timestamp, R)>,
499 Output = TimelyStack<((D1, ()), G::Timestamp, R)>,
500 Time = G::Timestamp,
501 > + 'static,
502 {
503 let exchange =
504 Exchange::new(move |update: &((D1, ()), G::Timestamp, R)| (update.0).0.hashed());
505
506 consolidate_pact::<Ba, _, _>(&self.map(|k| (k, ())).inner, exchange, name)
507 .unary(Pipeline, &format!("Unpack {name}"), |_, _| {
508 |input, output| {
509 input.for_each(|time, data| {
510 let mut session = output.session(&time);
511 for ((k, ()), t, d) in data.iter().flatten().flat_map(|chunk| chunk.iter())
512 {
513 session.give((k.clone(), t.clone(), d.clone()))
514 }
515 })
516 }
517 })
518 .as_collection()
519 }
520}
521
522pub fn consolidate_pact<Ba, P, G>(
529 stream: &StreamCore<G, Ba::Input>,
530 pact: P,
531 name: &str,
532) -> Stream<G, Vec<Ba::Output>>
533where
534 G: Scope,
535 Ba: Batcher<Time = G::Timestamp> + 'static,
536 Ba::Input: Container + Clone + 'static,
537 Ba::Output: Clone,
538 P: ParallelizationContract<G::Timestamp, Ba::Input>,
539{
540 stream.unary_frontier(pact, name, |_cap, info| {
541 let logger = stream
543 .scope()
544 .logger_for("differential/arrange")
545 .map(Into::into);
546
547 let mut batcher = Ba::new(logger, info.global_id);
548 let mut capabilities = Antichain::<Capability<G::Timestamp>>::new();
550 let mut prev_frontier = Antichain::from_elem(G::Timestamp::minimum());
551
552 move |(input, frontier), output| {
553 input.for_each(|cap, data| {
554 capabilities.insert(cap.retain());
555 batcher.push_container(data);
556 });
557
558 if prev_frontier.borrow() != frontier.frontier() {
559 if capabilities
560 .elements()
561 .iter()
562 .any(|c| !frontier.less_equal(c.time()))
563 {
564 let mut upper = Antichain::new(); for (index, capability) in capabilities.elements().iter().enumerate() {
568 if !frontier.less_equal(capability.time()) {
569 upper.clear();
573 for time in frontier.frontier().iter() {
574 upper.insert(time.clone());
575 }
576 for other_capability in &capabilities.elements()[(index + 1)..] {
577 upper.insert(other_capability.time().clone());
578 }
579
580 let mut session = output.session(&capabilities.elements()[index]);
582 let output =
584 batcher.seal::<ConsolidateBuilder<_, Ba::Output>>(upper.clone());
585 session.give(output);
586 }
587 }
588
589 let mut new_capabilities = Antichain::new();
595 for time in batcher.frontier().iter() {
596 if let Some(capability) = capabilities
597 .elements()
598 .iter()
599 .find(|c| c.time().less_equal(time))
600 {
601 new_capabilities.insert(capability.delayed(time));
602 } else {
603 panic!("failed to find capability");
604 }
605 }
606
607 capabilities = new_capabilities;
608 }
609
610 prev_frontier.clear();
611 prev_frontier.extend(frontier.frontier().iter().cloned());
612 }
613 }
614 })
615}
616
617struct ConsolidateBuilder<T, I> {
619 _marker: PhantomData<(T, I)>,
620}
621
622impl<T, I> Builder for ConsolidateBuilder<T, I>
623where
624 T: Timestamp,
625 I: Clone,
626{
627 type Input = I;
628 type Time = T;
629 type Output = Vec<I>;
630
631 fn new() -> Self {
632 Self {
633 _marker: PhantomData,
634 }
635 }
636
637 fn with_capacity(_keys: usize, _vals: usize, _upds: usize) -> Self {
638 Self::new()
639 }
640
641 fn push(&mut self, _chunk: &mut Self::Input) {
642 unimplemented!("ConsolidateBuilder::push")
643 }
644
645 fn done(self, _: Description<Self::Time>) -> Self::Output {
646 unimplemented!("ConsolidateBuilder::done")
647 }
648
649 fn seal(chain: &mut Vec<Self::Input>, _description: Description<Self::Time>) -> Self::Output {
650 std::mem::take(chain)
651 }
652}
653
654pub trait ConcatenateFlatten<G: Scope, C: Container + DrainContainer> {
656 fn concatenate_flatten<I, CB>(&self, sources: I) -> StreamCore<G, CB::Container>
676 where
677 I: IntoIterator<Item = StreamCore<G, C>>,
678 CB: ContainerBuilder + for<'a> PushInto<C::Item<'a>>;
679}
680
681impl<G, C> ConcatenateFlatten<G, C> for StreamCore<G, C>
682where
683 G: Scope,
684 C: Container + DrainContainer,
685{
686 fn concatenate_flatten<I, CB>(&self, sources: I) -> StreamCore<G, CB::Container>
687 where
688 I: IntoIterator<Item = StreamCore<G, C>>,
689 CB: ContainerBuilder + for<'a> PushInto<C::Item<'a>>,
690 {
691 let clone = self.clone();
692 self.scope()
693 .concatenate_flatten::<_, CB>(Some(clone).into_iter().chain(sources))
694 }
695}
696
697impl<G, C> ConcatenateFlatten<G, C> for G
698where
699 G: Scope,
700 C: Container + DrainContainer,
701{
702 fn concatenate_flatten<I, CB>(&self, sources: I) -> StreamCore<G, CB::Container>
703 where
704 I: IntoIterator<Item = StreamCore<G, C>>,
705 CB: ContainerBuilder + for<'a> PushInto<C::Item<'a>>,
706 {
707 let mut builder = OperatorBuilder::new("ConcatenateFlatten".to_string(), self.clone());
708 builder.set_notify(false);
709
710 let mut handles = sources
712 .into_iter()
713 .map(|s| builder.new_input(&s, Pipeline))
714 .collect::<Vec<_>>();
715
716 let (output, result) = builder.new_output::<CB::Container>();
718 let mut output = OutputBuilder::<_, CB>::from(output);
719
720 builder.build(move |_capability| {
721 move |_frontier| {
722 let mut output = output.activate();
723 for handle in handles.iter_mut() {
724 handle.for_each_time(|time, data| {
725 output
726 .session_with_builder(&time)
727 .give_iterator(data.flat_map(DrainContainer::drain));
728 })
729 }
730 }
731 });
732
733 result
734 }
735}