timely/
worker.rs

1//! The root of each single-threaded worker.
2
3use std::rc::Rc;
4use std::cell::{RefCell, RefMut};
5use std::any::Any;
6use std::str::FromStr;
7use std::time::{Instant, Duration};
8use std::collections::HashMap;
9use std::collections::hash_map::Entry;
10use std::sync::Arc;
11
12use crate::communication::{Allocate, Exchangeable, Push, Pull};
13use crate::communication::allocator::thread::{ThreadPusher, ThreadPuller};
14use crate::scheduling::{Schedule, Scheduler, Activations};
15use crate::progress::timestamp::{Refines};
16use crate::progress::SubgraphBuilder;
17use crate::progress::operate::Operate;
18use crate::dataflow::scopes::Child;
19use crate::logging::TimelyLogger;
20
21/// Different ways in which timely's progress tracking can work.
22///
23/// These options drive some buffering and accumulation that timely
24/// can do to try and trade volume of progress traffic against latency.
25/// By accumulating updates longer, a smaller total volume of messages
26/// are sent.
27///
28/// The `ProgressMode::Demand` variant is the most robust, and least
29/// likely to lead to catastrophic performance. The `Eager` variant
30/// is useful for getting the smallest latencies on systems with few
31/// workers, but does risk saturating the system with progress messages
32/// and should be used with care, or not at all.
33///
34/// If you are not certain which option to use, prefer `Demand`, and
35/// perhaps monitor the progress messages through timely's logging
36/// infrastructure to see if their volume is surprisingly high.
37#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)]
38pub enum ProgressMode {
39    /// Eagerly transmit all progress updates produced by a worker.
40    ///
41    /// Progress messages are transmitted without consideration for the
42    /// possibility that they may unblock other workers. This can result
43    /// in a substantial volume of messages that do not result in a
44    /// change to the lower bound of outstanding work.
45    Eager,
46    /// Delay transmission of progress updates until any could advance
47    /// the global frontier of timestamps.
48    ///
49    /// As timely executes, the progress messages inform each worker of
50    /// the outstanding work remaining in the system. As workers work,
51    /// they produce changes to this outstanding work. This option
52    /// delays the communication of those changes until they might
53    /// possibly cause a change in the lower bound of all outstanding
54    /// work.
55    ///
56    /// The most common case this remedies is when one worker transmits
57    /// messages to other workers, that worker holds a capability for the
58    /// operator and timestamp. Other workers will receive messages, and
59    /// with this option will not immediately acknowledge receiving the
60    /// messages, because the held capability is strictly prior to what
61    /// the messages can affect. Once the capability is released, the
62    /// progress messages are unblocked and transmitted, in accumulated
63    /// form.
64    #[default]
65    Demand,
66}
67
68impl FromStr for ProgressMode {
69    type Err = String;
70
71    fn from_str(s: &str) -> Result<ProgressMode, String> {
72        match s {
73            "eager" => Ok(ProgressMode::Eager),
74            "demand" => Ok(ProgressMode::Demand),
75            _ => Err(format!("unknown progress mode: {}", s)),
76        }
77    }
78}
79
80/// Worker configuration.
81#[derive(Debug, Default, Clone)]
82pub struct Config {
83    /// The progress mode to use.
84    pub(crate) progress_mode: ProgressMode,
85    /// A map from parameter name to typed parameter values.
86    registry: HashMap<String, Arc<dyn Any + Send + Sync>>,
87}
88
89impl Config {
90    /// Installs options into a [getopts::Options] struct that correspond
91    /// to the parameters in the configuration.
92    ///
93    /// It is the caller's responsibility to ensure that the installed options
94    /// do not conflict with any other options that may exist in `opts`, or
95    /// that may be installed into `opts` in the future.
96    ///
97    /// This method is only available if the `getopts` feature is enabled, which
98    /// it is by default.
99    #[cfg(feature = "getopts")]
100    pub fn install_options(opts: &mut getopts::Options) {
101        opts.optopt("", "progress-mode", "progress tracking mode (eager or demand)", "MODE");
102    }
103
104    /// Instantiates a configuration based upon the parsed options in `matches`.
105    ///
106    /// The `matches` object must have been constructed from a
107    /// [getopts::Options] which contained at least the options installed by
108    /// [Self::install_options].
109    ///
110    /// This method is only available if the `getopts` feature is enabled, which
111    /// it is by default.
112    #[cfg(feature = "getopts")]
113    pub fn from_matches(matches: &getopts::Matches) -> Result<Config, String> {
114        let progress_mode = matches
115            .opt_get_default("progress-mode", ProgressMode::Eager)?;
116        Ok(Config::default().progress_mode(progress_mode))
117    }
118
119    /// Sets the progress mode to `progress_mode`.
120    pub fn progress_mode(mut self, progress_mode: ProgressMode) -> Self {
121        self.progress_mode = progress_mode;
122        self
123    }
124
125    /// Sets a typed configuration parameter for the given `key`.
126    ///
127    /// It is recommended to install a single configuration struct using a key
128    /// that uniquely identifies your project, to avoid clashes. For example,
129    /// differential dataflow registers a configuration struct under the key
130    /// "differential".
131    ///
132    /// # Examples
133    /// ```rust
134    /// let mut config = timely::Config::process(3);
135    /// config.worker.set("example".to_string(), 7u64);
136    /// timely::execute(config, |worker| {
137    ///    use crate::timely::worker::AsWorker;
138    ///    assert_eq!(worker.config().get::<u64>("example"), Some(&7));
139    /// }).unwrap();
140    /// ```
141    pub fn set<T>(&mut self, key: String, val: T) -> &mut Self
142    where
143        T: Send + Sync + 'static,
144    {
145        self.registry.insert(key, Arc::new(val));
146        self
147    }
148
149    /// Gets the value for configured parameter `key`.
150    ///
151    /// Returns `None` if `key` has not previously been set with
152    /// [Config::set], or if the specified `T` does not match the `T`
153    /// from the call to `set`.
154    ///
155    /// # Examples
156    /// ```rust
157    /// let mut config = timely::Config::process(3);
158    /// config.worker.set("example".to_string(), 7u64);
159    /// timely::execute(config, |worker| {
160    ///    use crate::timely::worker::AsWorker;
161    ///    assert_eq!(worker.config().get::<u64>("example"), Some(&7));
162    /// }).unwrap();
163    /// ```
164    pub fn get<T: 'static>(&self, key: &str) -> Option<&T> {
165        self.registry.get(key).and_then(|val| val.downcast_ref())
166    }
167}
168
169/// Methods provided by the root Worker.
170///
171/// These methods are often proxied by child scopes, and this trait provides access.
172pub trait AsWorker : Scheduler {
173    /// Returns the worker configuration parameters.
174    fn config(&self) -> &Config;
175    /// Index of the worker among its peers.
176    fn index(&self) -> usize;
177    /// Number of peer workers.
178    fn peers(&self) -> usize;
179    /// Allocates a new channel from a supplied identifier and address.
180    ///
181    /// The identifier is used to identify the underlying channel and route
182    /// its data. It should be distinct from other identifiers passed used
183    /// for allocation, but can otherwise be arbitrary.
184    ///
185    /// The address should specify a path to an operator that should be
186    /// scheduled in response to the receipt of records on the channel.
187    /// Most commonly, this would be the address of the *target* of the
188    /// channel.
189    fn allocate<T: Exchangeable>(&mut self, identifier: usize, address: Rc<[usize]>) -> (Vec<Box<dyn Push<T>>>, Box<dyn Pull<T>>);
190    /// Constructs a pipeline channel from the worker to itself.
191    ///
192    /// By default this method uses the native channel allocation mechanism, but the expectation is
193    /// that this behavior will be overridden to be more efficient.
194    fn pipeline<T: 'static>(&mut self, identifier: usize, address: Rc<[usize]>) -> (ThreadPusher<T>, ThreadPuller<T>);
195
196    /// Allocates a broadcast channel, where each pushed message is received by all.
197    fn broadcast<T: Exchangeable + Clone>(&mut self, identifier: usize, address: Rc<[usize]>) -> (Box<dyn Push<T>>, Box<dyn Pull<T>>);
198
199    /// Allocates a new worker-unique identifier.
200    fn new_identifier(&mut self) -> usize;
201    /// The next worker-unique identifier to be allocated.
202    fn peek_identifier(&self) -> usize;
203    /// Provides access to named logging streams.
204    fn log_register(&self) -> ::std::cell::RefMut<crate::logging_core::Registry>;
205    /// Provides access to the timely logging stream.
206    fn logging(&self) -> Option<crate::logging::TimelyLogger> { self.log_register().get("timely").map(Into::into) }
207}
208
209/// A `Worker` is the entry point to a timely dataflow computation. It wraps a `Allocate`,
210/// and has a list of dataflows that it manages.
211pub struct Worker<A: Allocate> {
212    config: Config,
213    timer: Instant,
214    paths: Rc<RefCell<HashMap<usize, Rc<[usize]>>>>,
215    allocator: Rc<RefCell<A>>,
216    identifiers: Rc<RefCell<usize>>,
217    // dataflows: Rc<RefCell<Vec<Wrapper>>>,
218    dataflows: Rc<RefCell<HashMap<usize, Wrapper>>>,
219    dataflow_counter: Rc<RefCell<usize>>,
220    logging: Rc<RefCell<crate::logging_core::Registry>>,
221
222    activations: Rc<RefCell<Activations>>,
223    active_dataflows: Vec<usize>,
224
225    // Temporary storage for channel identifiers during dataflow construction.
226    // These are then associated with a dataflow once constructed.
227    temp_channel_ids: Rc<RefCell<Vec<usize>>>,
228}
229
230impl<A: Allocate> AsWorker for Worker<A> {
231    fn config(&self) -> &Config { &self.config }
232    fn index(&self) -> usize { self.allocator.borrow().index() }
233    fn peers(&self) -> usize { self.allocator.borrow().peers() }
234    fn allocate<D: Exchangeable>(&mut self, identifier: usize, address: Rc<[usize]>) -> (Vec<Box<dyn Push<D>>>, Box<dyn Pull<D>>) {
235        if address.is_empty() { panic!("Unacceptable address: Length zero"); }
236        let mut paths = self.paths.borrow_mut();
237        paths.insert(identifier, address);
238        self.temp_channel_ids.borrow_mut().push(identifier);
239        self.allocator.borrow_mut().allocate(identifier)
240    }
241    fn pipeline<T: 'static>(&mut self, identifier: usize, address: Rc<[usize]>) -> (ThreadPusher<T>, ThreadPuller<T>) {
242        if address.is_empty() { panic!("Unacceptable address: Length zero"); }
243        let mut paths = self.paths.borrow_mut();
244        paths.insert(identifier, address);
245        self.temp_channel_ids.borrow_mut().push(identifier);
246        self.allocator.borrow_mut().pipeline(identifier)
247    }
248    fn broadcast<T: Exchangeable + Clone>(&mut self, identifier: usize, address: Rc<[usize]>) -> (Box<dyn Push<T>>, Box<dyn Pull<T>>) {
249        if address.is_empty() { panic!("Unacceptable address: Length zero"); }
250        let mut paths = self.paths.borrow_mut();
251        paths.insert(identifier, address);
252        self.temp_channel_ids.borrow_mut().push(identifier);
253        self.allocator.borrow_mut().broadcast(identifier)
254    }
255
256    fn new_identifier(&mut self) -> usize { self.new_identifier() }
257    fn peek_identifier(&self) -> usize { self.peek_identifier() }
258    fn log_register(&self) -> RefMut<crate::logging_core::Registry> {
259        self.log_register()
260    }
261}
262
263impl<A: Allocate> Scheduler for Worker<A> {
264    fn activations(&self) -> Rc<RefCell<Activations>> {
265        Rc::clone(&self.activations)
266    }
267}
268
269impl<A: Allocate> Worker<A> {
270    /// Allocates a new `Worker` bound to a channel allocator.
271    pub fn new(config: Config, c: A) -> Worker<A> {
272        let now = Instant::now();
273        Worker {
274            config,
275            timer: now,
276            paths:  Default::default(),
277            allocator: Rc::new(RefCell::new(c)),
278            identifiers:  Default::default(),
279            dataflows: Default::default(),
280            dataflow_counter:  Default::default(),
281            logging: Rc::new(RefCell::new(crate::logging_core::Registry::new(now))),
282            activations: Rc::new(RefCell::new(Activations::new(now))),
283            active_dataflows: Default::default(),
284            temp_channel_ids:  Default::default(),
285        }
286    }
287
288    /// Performs one step of the computation.
289    ///
290    /// A step gives each dataflow operator a chance to run, and is the
291    /// main way to ensure that a computation proceeds.
292    ///
293    /// # Examples
294    ///
295    /// ```
296    /// timely::execute_from_args(::std::env::args(), |worker| {
297    ///
298    ///     use timely::dataflow::operators::{ToStream, Inspect};
299    ///
300    ///     worker.dataflow::<usize,_,_>(|scope| {
301    ///         (0 .. 10)
302    ///             .to_stream(scope)
303    ///             .inspect(|x| println!("{:?}", x));
304    ///     });
305    ///
306    ///     worker.step();
307    /// });
308    /// ```
309    pub fn step(&mut self) -> bool {
310        self.step_or_park(Some(Duration::from_secs(0)))
311    }
312
313    /// Performs one step of the computation.
314    ///
315    /// A step gives each dataflow operator a chance to run, and is the
316    /// main way to ensure that a computation proceeds.
317    ///
318    /// This method takes an optional timeout and may park the thread until
319    /// there is work to perform or until this timeout expires. A value of
320    /// `None` allows the worker to park indefinitely, whereas a value of
321    /// `Some(Duration::new(0, 0))` will return without parking the thread.
322    ///
323    /// # Examples
324    ///
325    /// ```
326    /// timely::execute_from_args(::std::env::args(), |worker| {
327    ///
328    ///     use std::time::Duration;
329    ///     use timely::dataflow::operators::{ToStream, Inspect};
330    ///
331    ///     worker.dataflow::<usize,_,_>(|scope| {
332    ///         (0 .. 10)
333    ///             .to_stream(scope)
334    ///             .inspect(|x| println!("{:?}", x));
335    ///     });
336    ///
337    ///     worker.step_or_park(Some(Duration::from_secs(1)));
338    /// });
339    /// ```
340    pub fn step_or_park(&mut self, duration: Option<Duration>) -> bool {
341
342        {   // Process channel events. Activate responders.
343            let mut allocator = self.allocator.borrow_mut();
344            allocator.receive();
345            let events = allocator.events();
346            let mut borrow = events.borrow_mut();
347            let paths = self.paths.borrow();
348            borrow.sort_unstable();
349            borrow.dedup();
350            for channel in borrow.drain(..) {
351                // Consider tracking whether a channel
352                // in non-empty, and only activating
353                // on the basis of non-empty channels.
354                // TODO: This is a sloppy way to deal
355                // with channels that may not be alloc'd.
356                if let Some(path) = paths.get(&channel) {
357                    self.activations
358                        .borrow_mut()
359                        .activate(&path[..]);
360                }
361            }
362        }
363
364        // Organize activations.
365        self.activations
366            .borrow_mut()
367            .advance();
368
369        // Consider parking only if we have no pending events, some dataflows, and a non-zero duration.
370        let empty_for = self.activations.borrow().empty_for();
371        // Determine the minimum park duration, where `None` are an absence of a constraint.
372        let delay = match (duration, empty_for) {
373            (Some(x), Some(y)) => Some(std::cmp::min(x,y)),
374            (x, y) => x.or(y),
375        };
376
377        if delay != Some(Duration::new(0,0)) {
378
379            // Log parking and flush log.
380            if let Some(l) = self.logging().as_mut() {
381                l.log(crate::logging::ParkEvent::park(delay));
382                l.flush();
383            }
384
385            self.allocator
386                .borrow()
387                .await_events(delay);
388
389            // Log return from unpark.
390            self.logging().as_mut().map(|l| l.log(crate::logging::ParkEvent::unpark()));
391        }
392        else {   // Schedule active dataflows.
393
394            let active_dataflows = &mut self.active_dataflows;
395            self.activations
396                .borrow_mut()
397                .for_extensions(&[], |index| active_dataflows.push(index));
398
399            let mut dataflows = self.dataflows.borrow_mut();
400            for index in active_dataflows.drain(..) {
401                // Step dataflow if it exists, remove if not incomplete.
402                if let Entry::Occupied(mut entry) = dataflows.entry(index) {
403                    // TODO: This is a moment at which a scheduling decision is being made.
404                    let incomplete = entry.get_mut().step();
405                    if !incomplete {
406                        let mut paths = self.paths.borrow_mut();
407                        for channel in entry.get_mut().channel_ids.drain(..) {
408                            paths.remove(&channel);
409                        }
410                        entry.remove_entry();
411                    }
412                }
413            }
414        }
415
416        // Clean up, indicate if dataflows remain.
417        self.logging.borrow_mut().flush();
418        self.allocator.borrow_mut().release();
419        !self.dataflows.borrow().is_empty()
420    }
421
422    /// Calls `self.step()` as long as `func` evaluates to `true`.
423    ///
424    /// This method will continually execute even if there is not work
425    /// for the worker to perform. Consider using the similar method
426    /// `Self::step_or_park_while(duration)` to allow the worker to yield
427    /// control if that is appropriate.
428    ///
429    /// # Examples
430    ///
431    /// ```
432    /// timely::execute_from_args(::std::env::args(), |worker| {
433    ///
434    ///     use timely::dataflow::operators::{ToStream, Inspect, Probe};
435    ///
436    ///     let probe =
437    ///     worker.dataflow::<usize,_,_>(|scope| {
438    ///         (0 .. 10)
439    ///             .to_stream(scope)
440    ///             .inspect(|x| println!("{:?}", x))
441    ///             .probe()
442    ///     });
443    ///
444    ///     worker.step_while(|| probe.less_than(&0));
445    /// });
446    /// ```
447    pub fn step_while<F: FnMut()->bool>(&mut self, func: F) {
448        self.step_or_park_while(Some(Duration::from_secs(0)), func)
449    }
450
451    /// Calls `self.step_or_park(duration)` as long as `func` evaluates to `true`.
452    ///
453    /// This method may yield whenever there is no work to perform, as performed
454    /// by `Self::step_or_park()`. Please consult the documentation for further
455    /// information about that method and its behavior. In particular, the method
456    /// can park the worker indefinitely, if no new work re-awakens the worker.
457    ///
458    /// # Examples
459    ///
460    /// ```
461    /// timely::execute_from_args(::std::env::args(), |worker| {
462    ///
463    ///     use timely::dataflow::operators::{ToStream, Inspect, Probe};
464    ///
465    ///     let probe =
466    ///     worker.dataflow::<usize,_,_>(|scope| {
467    ///         (0 .. 10)
468    ///             .to_stream(scope)
469    ///             .inspect(|x| println!("{:?}", x))
470    ///             .probe()
471    ///     });
472    ///
473    ///     worker.step_or_park_while(None, || probe.less_than(&0));
474    /// });
475    /// ```
476    pub fn step_or_park_while<F: FnMut()->bool>(&mut self, duration: Option<Duration>, mut func: F) {
477        while func() { self.step_or_park(duration); }
478    }
479
480    /// The index of the worker out of its peers.
481    ///
482    /// # Examples
483    /// ```
484    /// timely::execute_from_args(::std::env::args(), |worker| {
485    ///
486    ///     let index = worker.index();
487    ///     let peers = worker.peers();
488    ///     let timer = worker.timer();
489    ///
490    ///     println!("{:?}\tWorker {} of {}", timer.elapsed(), index, peers);
491    ///
492    /// });
493    /// ```
494    pub fn index(&self) -> usize { self.allocator.borrow().index() }
495    /// The total number of peer workers.
496    ///
497    /// # Examples
498    /// ```
499    /// timely::execute_from_args(::std::env::args(), |worker| {
500    ///
501    ///     let index = worker.index();
502    ///     let peers = worker.peers();
503    ///     let timer = worker.timer();
504    ///
505    ///     println!("{:?}\tWorker {} of {}", timer.elapsed(), index, peers);
506    ///
507    /// });
508    /// ```
509    pub fn peers(&self) -> usize { self.allocator.borrow().peers() }
510
511    /// A timer started at the initiation of the timely computation.
512    ///
513    /// # Examples
514    /// ```
515    /// timely::execute_from_args(::std::env::args(), |worker| {
516    ///
517    ///     let index = worker.index();
518    ///     let peers = worker.peers();
519    ///     let timer = worker.timer();
520    ///
521    ///     println!("{:?}\tWorker {} of {}", timer.elapsed(), index, peers);
522    ///
523    /// });
524    /// ```
525    pub fn timer(&self) -> Instant { self.timer }
526
527    /// Allocate a new worker-unique identifier.
528    ///
529    /// This method is public, though it is not expected to be widely used outside
530    /// of the timely dataflow system.
531    pub fn new_identifier(&mut self) -> usize {
532        *self.identifiers.borrow_mut() += 1;
533        *self.identifiers.borrow() - 1
534    }
535
536    /// The next worker-unique identifier to be allocated.
537    pub fn peek_identifier(&self) -> usize {
538        *self.identifiers.borrow()
539    }
540
541    /// Access to named loggers.
542    ///
543    /// # Examples
544    ///
545    /// ```
546    /// timely::execute_from_args(::std::env::args(), |worker| {
547    ///
548    ///     worker.log_register()
549    ///           .insert::<timely::logging::TimelyEventBuilder,_>("timely", |time, data|
550    ///               println!("{:?}\t{:?}", time, data)
551    ///           );
552    /// });
553    /// ```
554    pub fn log_register(&self) -> ::std::cell::RefMut<crate::logging_core::Registry> {
555        self.logging.borrow_mut()
556    }
557
558    /// Construct a new dataflow.
559    ///
560    /// # Examples
561    /// ```
562    /// timely::execute_from_args(::std::env::args(), |worker| {
563    ///
564    ///     // We must supply the timestamp type here, although
565    ///     // it would generally be determined by type inference.
566    ///     worker.dataflow::<usize,_,_>(|scope| {
567    ///
568    ///         // uses of `scope` to build dataflow
569    ///
570    ///     });
571    /// });
572    /// ```
573    pub fn dataflow<T, R, F>(&mut self, func: F) -> R
574    where
575        T: Refines<()>,
576        F: FnOnce(&mut Child<Self, T>)->R,
577    {
578        let logging = self.logging.borrow_mut().get("timely").map(Into::into);
579        self.dataflow_core("Dataflow", logging, Box::new(()), |_, child| func(child))
580    }
581
582    /// Construct a new dataflow with a (purely cosmetic) name.
583    ///
584    /// # Examples
585    /// ```
586    /// timely::execute_from_args(::std::env::args(), |worker| {
587    ///
588    ///     // We must supply the timestamp type here, although
589    ///     // it would generally be determined by type inference.
590    ///     worker.dataflow_named::<usize,_,_>("Some Dataflow", |scope| {
591    ///
592    ///         // uses of `scope` to build dataflow
593    ///
594    ///     });
595    /// });
596    /// ```
597    pub fn dataflow_named<T, R, F>(&mut self, name: &str, func: F) -> R
598    where
599        T: Refines<()>,
600        F: FnOnce(&mut Child<Self, T>)->R,
601    {
602        let logging = self.logging.borrow_mut().get("timely").map(Into::into);
603        self.dataflow_core(name, logging, Box::new(()), |_, child| func(child))
604    }
605
606    /// Construct a new dataflow with specific configurations.
607    ///
608    /// This method constructs a new dataflow, using a name, logger, and additional
609    /// resources specified as argument. The name is cosmetic, the logger is used to
610    /// handle events generated by the dataflow, and the additional resources are kept
611    /// alive for as long as the dataflow is alive (use case: shared library bindings).
612    ///
613    /// # Examples
614    /// ```
615    /// timely::execute_from_args(::std::env::args(), |worker| {
616    ///
617    ///     // We must supply the timestamp type here, although
618    ///     // it would generally be determined by type inference.
619    ///     worker.dataflow_core::<usize,_,_,_>(
620    ///         "dataflow X",           // Dataflow name
621    ///         None,                   // Optional logger
622    ///         37,                     // Any resources
623    ///         |resources, scope| {    // Closure
624    ///
625    ///             // uses of `resources`, `scope`to build dataflow
626    ///
627    ///         }
628    ///     );
629    /// });
630    /// ```
631    pub fn dataflow_core<T, R, F, V>(&mut self, name: &str, mut logging: Option<TimelyLogger>, mut resources: V, func: F) -> R
632    where
633        T: Refines<()>,
634        F: FnOnce(&mut V, &mut Child<Self, T>)->R,
635        V: Any+'static,
636    {
637        let dataflow_index = self.allocate_dataflow_index();
638        let addr = vec![dataflow_index].into();
639        let identifier = self.new_identifier();
640
641        let type_name = std::any::type_name::<T>();
642        let progress_logging = self.logging.borrow_mut().get(&format!("timely/progress/{type_name}"));
643        let summary_logging = self.logging.borrow_mut().get(&format!("timely/summary/{type_name}"));
644        let subscope = SubgraphBuilder::new_from(addr, identifier, logging.clone(), summary_logging, name);
645        let subscope = RefCell::new(subscope);
646
647        let result = {
648            let mut builder = Child {
649                subgraph: &subscope,
650                parent: self.clone(),
651                logging: logging.clone(),
652                progress_logging,
653            };
654            func(&mut resources, &mut builder)
655        };
656
657        let mut operator = subscope.into_inner().build(self);
658
659        if let Some(l) = logging.as_mut() {
660            l.log(crate::logging::OperatesEvent {
661                id: identifier,
662                addr: operator.path().to_vec(),
663                name: operator.name().to_string(),
664            });
665            l.flush();
666        }
667
668        operator.get_internal_summary();
669        operator.set_external_summary();
670
671        let mut temp_channel_ids = self.temp_channel_ids.borrow_mut();
672        let channel_ids = temp_channel_ids.drain(..).collect::<Vec<_>>();
673
674        let wrapper = Wrapper {
675            logging,
676            identifier,
677            operate: Some(Box::new(operator)),
678            resources: Some(Box::new(resources)),
679            channel_ids,
680        };
681        self.dataflows.borrow_mut().insert(dataflow_index, wrapper);
682
683        result
684
685    }
686
687    /// Drops an identified dataflow.
688    ///
689    /// This method removes the identified dataflow, which will no longer be scheduled.
690    /// Various other resources will be cleaned up, though the method is currently in
691    /// public beta rather than expected to work. Please report all crashes and unmet
692    /// expectations!
693    pub fn drop_dataflow(&mut self, dataflow_identifier: usize) {
694        if let Some(mut entry) = self.dataflows.borrow_mut().remove(&dataflow_identifier) {
695            // Garbage collect channel_id to path information.
696            let mut paths = self.paths.borrow_mut();
697            for channel in entry.channel_ids.drain(..) {
698                paths.remove(&channel);
699            }
700        }
701    }
702
703    /// Returns the next index to be used for dataflow construction.
704    ///
705    /// This identifier will appear in the address of contained operators, and can
706    /// be used to drop the dataflow using `self.drop_dataflow()`.
707    pub fn next_dataflow_index(&self) -> usize {
708        *self.dataflow_counter.borrow()
709    }
710
711    /// List the current dataflow indices.
712    pub fn installed_dataflows(&self) -> Vec<usize> {
713        self.dataflows.borrow().keys().cloned().collect()
714    }
715
716    /// Returns `true` if there is at least one dataflow under management.
717    pub fn has_dataflows(&self) -> bool {
718        !self.dataflows.borrow().is_empty()
719    }
720
721    // Acquire a new distinct dataflow identifier.
722    fn allocate_dataflow_index(&self) -> usize {
723        *self.dataflow_counter.borrow_mut() += 1;
724        *self.dataflow_counter.borrow() - 1
725    }
726}
727
728impl<A: Allocate> Clone for Worker<A> {
729    fn clone(&self) -> Self {
730        Worker {
731            config: self.config.clone(),
732            timer: self.timer,
733            paths: Rc::clone(&self.paths),
734            allocator: Rc::clone(&self.allocator),
735            identifiers: Rc::clone(&self.identifiers),
736            dataflows: Rc::clone(&self.dataflows),
737            dataflow_counter: Rc::clone(&self.dataflow_counter),
738            logging: Rc::clone(&self.logging),
739            activations: Rc::clone(&self.activations),
740            active_dataflows: Vec::new(),
741            temp_channel_ids: Rc::clone(&self.temp_channel_ids),
742        }
743    }
744}
745
746struct Wrapper {
747    logging: Option<TimelyLogger>,
748    identifier: usize,
749    operate: Option<Box<dyn Schedule>>,
750    resources: Option<Box<dyn Any>>,
751    channel_ids: Vec<usize>,
752}
753
754impl Wrapper {
755    /// Steps the dataflow, indicates if it remains incomplete.
756    ///
757    /// If the dataflow is incomplete, this call will drop it and its resources,
758    /// dropping the dataflow first and then the resources (so that, e.g., shared
759    /// library bindings will outlive the dataflow).
760    fn step(&mut self) -> bool {
761
762        // Perhaps log information about the start of the schedule call.
763        if let Some(l) = self.logging.as_mut() {
764            l.log(crate::logging::ScheduleEvent::start(self.identifier));
765        }
766
767        let incomplete = self.operate.as_mut().map(|op| op.schedule()).unwrap_or(false);
768        if !incomplete {
769            self.operate = None;
770            self.resources = None;
771        }
772
773        // Perhaps log information about the stop of the schedule call.
774        if let Some(l) = self.logging.as_mut() {
775            l.log(crate::logging::ScheduleEvent::stop(self.identifier));
776        }
777
778        incomplete
779    }
780}
781
782impl Drop for Wrapper {
783    fn drop(&mut self) {
784        if let Some(l) = self.logging.as_mut() {
785            l.log(crate::logging::ShutdownEvent { id: self.identifier });
786        }
787        // ensure drop order
788        self.operate = None;
789        self.resources = None;
790    }
791}