pub struct Controller<T: Timestamp + Lattice + Codec64 + From<EpochMillis> + TimestampManipulation> {Show 37 fields
pub(crate) build_info: &'static BuildInfo,
pub(crate) now: NowFn,
pub(crate) envd_epoch: NonZeroI64,
pub(crate) read_only: bool,
pub(crate) collections: BTreeMap<GlobalId, CollectionState<T>>,
pub(crate) dropped_objects: BTreeMap<GlobalId, BTreeSet<ReplicaId>>,
pub(crate) persist_table_worker: PersistTableWriteWorker<T>,
pub(crate) txns_read: TxnsRead<T>,
pub(crate) txns_metrics: Arc<Metrics>,
pub(crate) stashed_response: Option<(Option<ReplicaId>, StorageResponse<T>)>,
pub(crate) pending_table_handle_drops_tx: UnboundedSender<GlobalId>,
pub(crate) pending_table_handle_drops_rx: UnboundedReceiver<GlobalId>,
pub(crate) pending_oneshot_ingestions: BTreeMap<Uuid, PendingOneshotIngestion>,
pub(crate) collection_manager: CollectionManager<T>,
pub(crate) introspection_ids: BTreeMap<IntrospectionType, GlobalId>,
pub(crate) introspection_tokens: Arc<Mutex<BTreeMap<GlobalId, Box<dyn Any + Send + Sync>>>>,
pub(crate) source_statistics: Arc<Mutex<SourceStatistics>>,
pub(crate) sink_statistics: Arc<Mutex<BTreeMap<GlobalId, StatsState<SinkStatisticsUpdate>>>>,
pub(crate) statistics_interval_sender: Sender<Duration>,
pub(crate) instances: BTreeMap<StorageInstanceId, Instance<T>>,
pub(crate) initialized: bool,
pub(crate) config: StorageConfiguration,
pub(crate) persist_location: PersistLocation,
pub(crate) persist: Arc<PersistClientCache>,
pub(crate) metrics: StorageControllerMetrics,
pub(crate) recorded_frontiers: BTreeMap<GlobalId, (Antichain<T>, Antichain<T>)>,
pub(crate) recorded_replica_frontiers: BTreeMap<(GlobalId, ReplicaId), Antichain<T>>,
pub(crate) wallclock_lag: WallclockLagFn<T>,
pub(crate) wallclock_lag_history_last_refresh: Instant,
pub(crate) wallclock_lag_histogram_last_refresh: Instant,
pub(crate) storage_collections: Arc<dyn StorageCollections<Timestamp = T> + Send + Sync>,
pub(crate) migrated_storage_collections: BTreeSet<GlobalId>,
pub(crate) maintenance_ticker: Interval,
pub(crate) maintenance_scheduled: bool,
pub(crate) instance_response_tx: UnboundedSender<(Option<ReplicaId>, StorageResponse<T>)>,
pub(crate) instance_response_rx: UnboundedReceiver<(Option<ReplicaId>, StorageResponse<T>)>,
pub(crate) persist_warm_task: Option<AbortOnDropHandle<Box<dyn Debug + Send>>>,
}
Expand description
A storage controller for a storage instance.
Fields§
§build_info: &'static BuildInfo
The build information for this process.
now: NowFn
A function that returns the current time.
envd_epoch: NonZeroI64
The fencing token for this instance of the controller.
read_only: bool
Whether or not this controller is in read-only mode.
When in read-only mode, neither this controller nor the instances controlled by it are allowed to affect changes to external systems (largely persist).
collections: BTreeMap<GlobalId, CollectionState<T>>
Collections maintained by the storage controller.
This collection only grows, although individual collections may be rendered unusable. This is to prevent the re-binding of identifiers to other descriptions.
dropped_objects: BTreeMap<GlobalId, BTreeSet<ReplicaId>>
Map from IDs of objects that have been dropped to replicas we are still expecting DroppedId messages from. This is cleared out once all replicas have responded.
We use this only to catch problems in the protocol between controller and replicas, for example we can differentiate between late messages for objects that have already been dropped and unexpected (read erroneous) messages from the replica.
persist_table_worker: PersistTableWriteWorker<T>
Write handle for table shards.
txns_read: TxnsRead<T>
A shared TxnsCache running in a task and communicated with over a channel.
txns_metrics: Arc<Metrics>
§stashed_response: Option<(Option<ReplicaId>, StorageResponse<T>)>
§pending_table_handle_drops_tx: UnboundedSender<GlobalId>
Channel for sending table handle drops.
pending_table_handle_drops_rx: UnboundedReceiver<GlobalId>
Channel for receiving table handle drops.
pending_oneshot_ingestions: BTreeMap<Uuid, PendingOneshotIngestion>
Closures that can be used to send responses from oneshot ingestions.
collection_manager: CollectionManager<T>
Interface for managed collections
introspection_ids: BTreeMap<IntrospectionType, GlobalId>
Tracks which collection is responsible for which IntrospectionType
.
introspection_tokens: Arc<Mutex<BTreeMap<GlobalId, Box<dyn Any + Send + Sync>>>>
Tokens for tasks that drive updating introspection collections. Dropping this will make sure that any tasks (or other resources) will stop when needed.
source_statistics: Arc<Mutex<SourceStatistics>>
Consolidated metrics updates to periodically write. We do not eagerly initialize this,
and its contents are entirely driven by StorageResponse::StatisticsUpdates
’s, as well
as webhook statistics.
sink_statistics: Arc<Mutex<BTreeMap<GlobalId, StatsState<SinkStatisticsUpdate>>>>
Consolidated metrics updates to periodically write. We do not eagerly initialize this,
and its contents are entirely driven by StorageResponse::StatisticsUpdates
’s.
statistics_interval_sender: Sender<Duration>
A way to update the statistics interval in the statistics tasks.
instances: BTreeMap<StorageInstanceId, Instance<T>>
Clients for all known storage instances.
initialized: bool
Set to true
once initialization_complete
has been called.
config: StorageConfiguration
Storage configuration to apply to newly provisioned instances, and use during purification.
persist_location: PersistLocation
The persist location where all storage collections are being written to
persist: Arc<PersistClientCache>
A persist client used to write to storage collections
metrics: StorageControllerMetrics
Metrics of the Storage controller
recorded_frontiers: BTreeMap<GlobalId, (Antichain<T>, Antichain<T>)>
(read, write)
frontiers that have been recorded in the Frontiers
collection, kept to be
able to retract old rows.
recorded_replica_frontiers: BTreeMap<(GlobalId, ReplicaId), Antichain<T>>
Write frontiers that have been recorded in the ReplicaFrontiers
collection, kept to be
able to retract old rows.
wallclock_lag: WallclockLagFn<T>
A function that computes the lag between the given time and wallclock time.
wallclock_lag_history_last_refresh: Instant
The last time WallclockLagHistory
introspection was refreshed.
wallclock_lag_histogram_last_refresh: Instant
The last time WallclockLagHistogram
introspection was refreshed.
storage_collections: Arc<dyn StorageCollections<Timestamp = T> + Send + Sync>
Handle to a StorageCollections.
migrated_storage_collections: BTreeSet<GlobalId>
Migrated storage collections that can be written even in read only mode.
maintenance_ticker: Interval
Ticker for scheduling periodic maintenance work.
maintenance_scheduled: bool
Whether maintenance work was scheduled.
instance_response_tx: UnboundedSender<(Option<ReplicaId>, StorageResponse<T>)>
Shared transmit channel for replicas to send responses.
instance_response_rx: UnboundedReceiver<(Option<ReplicaId>, StorageResponse<T>)>
Receive end for replica responses.
persist_warm_task: Option<AbortOnDropHandle<Box<dyn Debug + Send>>>
Background task run at startup to warm persist state.
Implementations§
Source§impl<T> Controller<T>where
T: Timestamp + Lattice + TotalOrder + Codec64 + From<EpochMillis> + TimestampManipulation + Into<Datum<'static>>,
StorageCommand<T>: RustType<ProtoStorageCommand>,
StorageResponse<T>: RustType<ProtoStorageResponse>,
Self: StorageController<Timestamp = T>,
impl<T> Controller<T>where
T: Timestamp + Lattice + TotalOrder + Codec64 + From<EpochMillis> + TimestampManipulation + Into<Datum<'static>>,
StorageCommand<T>: RustType<ProtoStorageCommand>,
StorageResponse<T>: RustType<ProtoStorageResponse>,
Self: StorageController<Timestamp = T>,
Sourcepub async fn new(
build_info: &'static BuildInfo,
persist_location: PersistLocation,
persist_clients: Arc<PersistClientCache>,
now: NowFn,
wallclock_lag: WallclockLagFn<T>,
txns_metrics: Arc<TxnMetrics>,
envd_epoch: NonZeroI64,
read_only: bool,
metrics_registry: &MetricsRegistry,
controller_metrics: ControllerMetrics,
connection_context: ConnectionContext,
txn: &dyn StorageTxn<T>,
storage_collections: Arc<dyn StorageCollections<Timestamp = T> + Send + Sync>,
) -> Self
pub async fn new( build_info: &'static BuildInfo, persist_location: PersistLocation, persist_clients: Arc<PersistClientCache>, now: NowFn, wallclock_lag: WallclockLagFn<T>, txns_metrics: Arc<TxnMetrics>, envd_epoch: NonZeroI64, read_only: bool, metrics_registry: &MetricsRegistry, controller_metrics: ControllerMetrics, connection_context: ConnectionContext, txn: &dyn StorageTxn<T>, storage_collections: Arc<dyn StorageCollections<Timestamp = T> + Send + Sync>, ) -> Self
Create a new storage controller from a client it should wrap.
Note that when creating a new storage controller, you must also reconcile it with the previous state.
§Panics
If this function is called before prepare_initialization
.
pub(crate) fn set_hold_policies( &mut self, policies: Vec<(GlobalId, ReadPolicy<T>)>, )
pub(crate) fn update_write_frontiers( &mut self, updates: &[(GlobalId, Antichain<T>)], )
pub(crate) fn update_hold_capabilities( &mut self, updates: &mut BTreeMap<GlobalId, ChangeBatch<T>>, )
Sourcepub(crate) fn validate_collection_ids(
&self,
ids: impl Iterator<Item = GlobalId>,
) -> Result<(), StorageError<T>>
pub(crate) fn validate_collection_ids( &self, ids: impl Iterator<Item = GlobalId>, ) -> Result<(), StorageError<T>>
Validate that a collection exists for all identifiers, and error if any do not.
Sourcepub(crate) fn validate_export_ids(
&self,
ids: impl Iterator<Item = GlobalId>,
) -> Result<(), StorageError<T>>
pub(crate) fn validate_export_ids( &self, ids: impl Iterator<Item = GlobalId>, ) -> Result<(), StorageError<T>>
Validate that a collection exists for all identifiers, and error if any do not.
Sourcepub(crate) async fn open_data_handles(
&self,
id: &GlobalId,
shard: ShardId,
relation_desc: RelationDesc,
persist_client: &PersistClient,
) -> WriteHandle<SourceData, (), T, StorageDiff>
pub(crate) async fn open_data_handles( &self, id: &GlobalId, shard: ShardId, relation_desc: RelationDesc, persist_client: &PersistClient, ) -> WriteHandle<SourceData, (), T, StorageDiff>
Opens a write and critical since handles for the given shard
.
since
is an optional since
that the read handle will be forwarded to if it is less than
its current since.
This will halt!
the process if we cannot successfully acquire a critical handle with our
current epoch.
Sourcepub(crate) fn register_introspection_collection(
&mut self,
id: GlobalId,
introspection_type: IntrospectionType,
write_handle: WriteHandle<SourceData, (), T, StorageDiff>,
persist_client: PersistClient,
) -> Result<(), StorageError<T>>
pub(crate) fn register_introspection_collection( &mut self, id: GlobalId, introspection_type: IntrospectionType, write_handle: WriteHandle<SourceData, (), T, StorageDiff>, persist_client: PersistClient, ) -> Result<(), StorageError<T>>
Registers the given introspection collection and does any preparatory work that we have to do before we start writing to it. This preparatory work will include partial truncation or other cleanup schemes, depending on introspection type.
Sourcepub(crate) fn reconcile_dangling_statistics(&self)
pub(crate) fn reconcile_dangling_statistics(&self)
Remove statistics for sources/sinks that were dropped but still have statistics rows hanging around.
Sourcepub(crate) fn append_shard_mappings<I>(&self, global_ids: I, diff: Diff)
pub(crate) fn append_shard_mappings<I>(&self, global_ids: I, diff: Diff)
Appends a new global ID, shard ID pair to the appropriate collection.
Use a diff
of 1 to append a new entry; -1 to retract an existing
entry.
§Panics
- If
self.collections
does not have an entry forglobal_id
. - If
IntrospectionType::ShardMapping
’sGlobalId
is not registered as a managed collection. - If diff is any value other than
1
or-1
.
Sourcepub(crate) fn determine_collection_dependencies(
&self,
self_id: GlobalId,
data_source: &DataSource<T>,
) -> Result<Vec<GlobalId>, StorageError<T>>
pub(crate) fn determine_collection_dependencies( &self, self_id: GlobalId, data_source: &DataSource<T>, ) -> Result<Vec<GlobalId>, StorageError<T>>
Determines and returns this collection’s dependencies, if any.
pub(crate) async fn read_handle_for_snapshot( &self, id: GlobalId, ) -> Result<ReadHandle<SourceData, (), T, StorageDiff>, StorageError<T>>
Sourcepub(crate) fn record_status_updates(&mut self, updates: Vec<StatusUpdate>)
pub(crate) fn record_status_updates(&mut self, updates: Vec<StatusUpdate>)
Handles writing of status updates for sources/sinks to the appropriate status relation
pub(crate) fn collection( &self, id: GlobalId, ) -> Result<&CollectionState<T>, StorageError<T>>
Sourcepub(crate) fn run_ingestion(
&mut self,
id: GlobalId,
) -> Result<(), StorageError<T>>
pub(crate) fn run_ingestion( &mut self, id: GlobalId, ) -> Result<(), StorageError<T>>
Runs the identified ingestion using the current definition of the ingestion in-memory.
Sourcepub(crate) fn run_export(&mut self, id: GlobalId) -> Result<(), StorageError<T>>
pub(crate) fn run_export(&mut self, id: GlobalId) -> Result<(), StorageError<T>>
Runs the identified export using the current definition of the export that we have in memory.
Sourcepub(crate) fn update_frontier_introspection(&mut self)
pub(crate) fn update_frontier_introspection(&mut self)
Update introspection with the current frontiers of storage objects.
This method is invoked by Controller::maintain
, which we expect to be called once per
second during normal operation.
Sourcepub(crate) fn refresh_wallclock_lag(&mut self)
pub(crate) fn refresh_wallclock_lag(&mut self)
Refresh the wallclock lag introspection and metrics with the current lag values.
We measure the lag of write frontiers behind the wallclock time every second and track the
maximum over 60 measurements (i.e., one minute). Every minute, we emit a new lag event to
the WallclockLagHistory
introspection with the current maximum.
This method is invoked by ComputeController::maintain
, which we expect to be called once
per second during normal operation.
Trait Implementations§
Source§impl<T> Debug for Controller<T>
impl<T> Debug for Controller<T>
Source§impl<T> StorageController for Controller<T>where
T: Timestamp + Lattice + TotalOrder + Codec64 + From<EpochMillis> + TimestampManipulation + Into<Datum<'static>> + Display,
StorageCommand<T>: RustType<ProtoStorageCommand>,
StorageResponse<T>: RustType<ProtoStorageResponse>,
impl<T> StorageController for Controller<T>where
T: Timestamp + Lattice + TotalOrder + Codec64 + From<EpochMillis> + TimestampManipulation + Into<Datum<'static>> + Display,
StorageCommand<T>: RustType<ProtoStorageCommand>,
StorageResponse<T>: RustType<ProtoStorageResponse>,
Source§fn config(&self) -> &StorageConfiguration
fn config(&self) -> &StorageConfiguration
Get the current configuration
Source§fn create_collections_for_bootstrap<'life0, 'life1, 'life2, 'async_trait>(
&'life0 mut self,
storage_metadata: &'life1 StorageMetadata,
register_ts: Option<Self::Timestamp>,
collections: Vec<(GlobalId, CollectionDescription<Self::Timestamp>)>,
migrated_storage_collections: &'life2 BTreeSet<GlobalId>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn create_collections_for_bootstrap<'life0, 'life1, 'life2, 'async_trait>(
&'life0 mut self,
storage_metadata: &'life1 StorageMetadata,
register_ts: Option<Self::Timestamp>,
collections: Vec<(GlobalId, CollectionDescription<Self::Timestamp>)>,
migrated_storage_collections: &'life2 BTreeSet<GlobalId>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
Create and “execute” the described collection.
“Execute” is in scare quotes because what executing a collection means varies widely based on the type of collection you’re creating.
The general process creating a collection undergoes is:
- Enrich the description we get from the user with the metadata only the storage controller’s metadata. This is mostly a matter of separating concerns.
- Generate write and read persist handles for the collection.
- Store the collection’s metadata in the appropriate field.
- “Execute” the collection. What that means is contingent on the type of collection. so consult the code for more details.
Source§fn create_oneshot_ingestion<'life0, 'async_trait>(
&'life0 mut self,
ingestion_id: Uuid,
collection_id: GlobalId,
instance_id: StorageInstanceId,
request: OneshotIngestionRequest,
result_tx: OneshotResultCallback<ProtoBatch>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn create_oneshot_ingestion<'life0, 'async_trait>(
&'life0 mut self,
ingestion_id: Uuid,
collection_id: GlobalId,
instance_id: StorageInstanceId,
request: OneshotIngestionRequest,
result_tx: OneshotResultCallback<ProtoBatch>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Create a oneshot ingestion.
Source§fn alter_export_connections<'life0, 'async_trait>(
&'life0 mut self,
exports: BTreeMap<GlobalId, StorageSinkConnection>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn alter_export_connections<'life0, 'async_trait>(
&'life0 mut self,
exports: BTreeMap<GlobalId, StorageSinkConnection>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Create the sinks described by the ExportDescription
.
Source§fn drop_sinks(
&mut self,
storage_metadata: &StorageMetadata,
identifiers: Vec<GlobalId>,
) -> Result<(), StorageError<Self::Timestamp>>
fn drop_sinks( &mut self, storage_metadata: &StorageMetadata, identifiers: Vec<GlobalId>, ) -> Result<(), StorageError<Self::Timestamp>>
Drops the read capability for the sinks and allows their resources to be reclaimed.
type Timestamp = T
Source§fn initialization_complete(&mut self)
fn initialization_complete(&mut self)
Source§fn update_parameters(&mut self, config_params: StorageParameters)
fn update_parameters(&mut self, config_params: StorageParameters)
Source§fn collection_metadata(
&self,
id: GlobalId,
) -> Result<CollectionMetadata, StorageError<Self::Timestamp>>
fn collection_metadata( &self, id: GlobalId, ) -> Result<CollectionMetadata, StorageError<Self::Timestamp>>
id
.Source§fn collection_hydrated(
&self,
collection_id: GlobalId,
) -> Result<bool, StorageError<Self::Timestamp>>
fn collection_hydrated( &self, collection_id: GlobalId, ) -> Result<bool, StorageError<Self::Timestamp>>
true
iff the given collection/ingestion has been hydrated. Read moreSource§fn collections_hydrated_on_replicas(
&self,
target_replica_ids: Option<Vec<ReplicaId>>,
exclude_collections: &BTreeSet<GlobalId>,
) -> Result<bool, StorageError<Self::Timestamp>>
fn collections_hydrated_on_replicas( &self, target_replica_ids: Option<Vec<ReplicaId>>, exclude_collections: &BTreeSet<GlobalId>, ) -> Result<bool, StorageError<Self::Timestamp>>
true
if each non-transient, non-excluded collection is
hydrated on at least one of the provided replicas. Read moreSource§fn collection_frontiers(
&self,
id: GlobalId,
) -> Result<(Antichain<Self::Timestamp>, Antichain<Self::Timestamp>), StorageError<Self::Timestamp>>
fn collection_frontiers( &self, id: GlobalId, ) -> Result<(Antichain<Self::Timestamp>, Antichain<Self::Timestamp>), StorageError<Self::Timestamp>>
Source§fn collections_frontiers(
&self,
ids: Vec<GlobalId>,
) -> Result<Vec<(GlobalId, Antichain<T>, Antichain<T>)>, StorageError<Self::Timestamp>>
fn collections_frontiers( &self, ids: Vec<GlobalId>, ) -> Result<Vec<(GlobalId, Antichain<T>, Antichain<T>)>, StorageError<Self::Timestamp>>
Source§fn active_collection_metadatas(&self) -> Vec<(GlobalId, CollectionMetadata)>
fn active_collection_metadatas(&self) -> Vec<(GlobalId, CollectionMetadata)>
Source§fn active_ingestions(
&self,
instance_id: StorageInstanceId,
) -> Box<dyn Iterator<Item = &GlobalId> + '_>
fn active_ingestions( &self, instance_id: StorageInstanceId, ) -> Box<dyn Iterator<Item = &GlobalId> + '_>
Source§fn check_exists(
&self,
id: GlobalId,
) -> Result<(), StorageError<Self::Timestamp>>
fn check_exists( &self, id: GlobalId, ) -> Result<(), StorageError<Self::Timestamp>>
GlobalId
. Returns
an error if the collection does not exist.Source§fn create_instance(&mut self, id: StorageInstanceId)
fn create_instance(&mut self, id: StorageInstanceId)
Source§fn drop_instance(&mut self, id: StorageInstanceId)
fn drop_instance(&mut self, id: StorageInstanceId)
Source§fn update_instance_workload_class(
&mut self,
id: StorageInstanceId,
workload_class: Option<String>,
)
fn update_instance_workload_class( &mut self, id: StorageInstanceId, workload_class: Option<String>, )
Source§fn connect_replica(
&mut self,
instance_id: StorageInstanceId,
replica_id: ReplicaId,
location: ClusterReplicaLocation,
)
fn connect_replica( &mut self, instance_id: StorageInstanceId, replica_id: ReplicaId, location: ClusterReplicaLocation, )
Source§fn drop_replica(
&mut self,
instance_id: StorageInstanceId,
replica_id: ReplicaId,
)
fn drop_replica( &mut self, instance_id: StorageInstanceId, replica_id: ReplicaId, )
Source§fn evolve_nullability_for_bootstrap<'life0, 'life1, 'async_trait>(
&'life0 mut self,
storage_metadata: &'life1 StorageMetadata,
collections: Vec<(GlobalId, RelationDesc)>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn evolve_nullability_for_bootstrap<'life0, 'life1, 'async_trait>(
&'life0 mut self,
storage_metadata: &'life1 StorageMetadata,
collections: Vec<(GlobalId, RelationDesc)>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Source§fn check_alter_ingestion_source_desc(
&mut self,
ingestion_id: GlobalId,
source_desc: &SourceDesc,
) -> Result<(), StorageError<Self::Timestamp>>
fn check_alter_ingestion_source_desc( &mut self, ingestion_id: GlobalId, source_desc: &SourceDesc, ) -> Result<(), StorageError<Self::Timestamp>>
Source§fn alter_ingestion_source_desc<'life0, 'async_trait>(
&'life0 mut self,
ingestion_id: GlobalId,
source_desc: SourceDesc,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn alter_ingestion_source_desc<'life0, 'async_trait>(
&'life0 mut self,
ingestion_id: GlobalId,
source_desc: SourceDesc,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
SourceDesc
.Source§fn alter_ingestion_connections<'life0, 'async_trait>(
&'life0 mut self,
source_connections: BTreeMap<GlobalId, GenericSourceConnection<InlinedConnection>>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn alter_ingestion_connections<'life0, 'async_trait>(
&'life0 mut self,
source_connections: BTreeMap<GlobalId, GenericSourceConnection<InlinedConnection>>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
GenericSourceConnection
.Source§fn alter_ingestion_export_data_configs<'life0, 'async_trait>(
&'life0 mut self,
source_exports: BTreeMap<GlobalId, SourceExportDataConfig>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn alter_ingestion_export_data_configs<'life0, 'async_trait>(
&'life0 mut self,
source_exports: BTreeMap<GlobalId, SourceExportDataConfig>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn alter_table_desc<'life0, 'async_trait>(
&'life0 mut self,
existing_collection: GlobalId,
new_collection: GlobalId,
new_desc: RelationDesc,
expected_version: RelationVersion,
register_ts: Self::Timestamp,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Source§fn export(
&self,
id: GlobalId,
) -> Result<&ExportState<Self::Timestamp>, StorageError<Self::Timestamp>>
fn export( &self, id: GlobalId, ) -> Result<&ExportState<Self::Timestamp>, StorageError<Self::Timestamp>>
Source§fn export_mut(
&mut self,
id: GlobalId,
) -> Result<&mut ExportState<Self::Timestamp>, StorageError<Self::Timestamp>>
fn export_mut( &mut self, id: GlobalId, ) -> Result<&mut ExportState<Self::Timestamp>, StorageError<Self::Timestamp>>
Source§fn cancel_oneshot_ingestion(
&mut self,
ingestion_id: Uuid,
) -> Result<(), StorageError<Self::Timestamp>>
fn cancel_oneshot_ingestion( &mut self, ingestion_id: Uuid, ) -> Result<(), StorageError<Self::Timestamp>>
Source§fn alter_export<'life0, 'async_trait>(
&'life0 mut self,
id: GlobalId,
new_description: ExportDescription<Self::Timestamp>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn alter_export<'life0, 'async_trait>(
&'life0 mut self,
id: GlobalId,
new_description: ExportDescription<Self::Timestamp>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
ExportDescription
.Source§fn drop_tables(
&mut self,
storage_metadata: &StorageMetadata,
identifiers: Vec<GlobalId>,
ts: Self::Timestamp,
) -> Result<(), StorageError<Self::Timestamp>>
fn drop_tables( &mut self, storage_metadata: &StorageMetadata, identifiers: Vec<GlobalId>, ts: Self::Timestamp, ) -> Result<(), StorageError<Self::Timestamp>>
Source§fn drop_sources(
&mut self,
storage_metadata: &StorageMetadata,
identifiers: Vec<GlobalId>,
) -> Result<(), StorageError<Self::Timestamp>>
fn drop_sources( &mut self, storage_metadata: &StorageMetadata, identifiers: Vec<GlobalId>, ) -> Result<(), StorageError<Self::Timestamp>>
Source§fn drop_sources_unvalidated(
&mut self,
storage_metadata: &StorageMetadata,
ids: Vec<GlobalId>,
) -> Result<(), StorageError<Self::Timestamp>>
fn drop_sources_unvalidated( &mut self, storage_metadata: &StorageMetadata, ids: Vec<GlobalId>, ) -> Result<(), StorageError<Self::Timestamp>>
Source§fn drop_sinks_unvalidated(
&mut self,
storage_metadata: &StorageMetadata,
sinks_to_drop: Vec<GlobalId>,
)
fn drop_sinks_unvalidated( &mut self, storage_metadata: &StorageMetadata, sinks_to_drop: Vec<GlobalId>, )
Source§fn append_table(
&mut self,
write_ts: Self::Timestamp,
advance_to: Self::Timestamp,
commands: Vec<(GlobalId, Vec<TableData>)>,
) -> Result<Receiver<Result<(), StorageError<Self::Timestamp>>>, StorageError<Self::Timestamp>>
fn append_table( &mut self, write_ts: Self::Timestamp, advance_to: Self::Timestamp, commands: Vec<(GlobalId, Vec<TableData>)>, ) -> Result<Receiver<Result<(), StorageError<Self::Timestamp>>>, StorageError<Self::Timestamp>>
Source§fn monotonic_appender(
&self,
id: GlobalId,
) -> Result<MonotonicAppender<Self::Timestamp>, StorageError<Self::Timestamp>>
fn monotonic_appender( &self, id: GlobalId, ) -> Result<MonotonicAppender<Self::Timestamp>, StorageError<Self::Timestamp>>
MonotonicAppender
which is a channel that can be used to monotonically
append to the specified GlobalId
.Source§fn webhook_statistics(
&self,
id: GlobalId,
) -> Result<Arc<WebhookStatistics>, StorageError<Self::Timestamp>>
fn webhook_statistics( &self, id: GlobalId, ) -> Result<Arc<WebhookStatistics>, StorageError<Self::Timestamp>>
WebhookStatistics
which can be used to report user-facing
statistics for this given webhhook, specified by the GlobalId
.Source§fn ready<'life0, 'async_trait>(
&'life0 mut self,
) -> Pin<Box<dyn Future<Output = ()> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn ready<'life0, 'async_trait>(
&'life0 mut self,
) -> Pin<Box<dyn Future<Output = ()> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Source§fn process(
&mut self,
storage_metadata: &StorageMetadata,
) -> Result<Option<Response<T>>, Error>
fn process( &mut self, storage_metadata: &StorageMetadata, ) -> Result<Option<Response<T>>, Error>
StorageController::ready
.Source§fn inspect_persist_state<'life0, 'async_trait>(
&'life0 self,
id: GlobalId,
) -> Pin<Box<dyn Future<Output = Result<Value, Error>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn inspect_persist_state<'life0, 'async_trait>(
&'life0 self,
id: GlobalId,
) -> Pin<Box<dyn Future<Output = Result<Value, Error>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Source§fn append_introspection_updates(
&mut self,
type_: IntrospectionType,
updates: Vec<(Row, Diff)>,
)
fn append_introspection_updates( &mut self, type_: IntrospectionType, updates: Vec<(Row, Diff)>, )
Source§fn append_status_introspection_updates(
&mut self,
type_: IntrospectionType,
updates: Vec<StatusUpdate>,
)
fn append_status_introspection_updates( &mut self, type_: IntrospectionType, updates: Vec<StatusUpdate>, )
Source§fn update_introspection_collection(
&mut self,
type_: IntrospectionType,
op: StorageWriteOp,
)
fn update_introspection_collection( &mut self, type_: IntrospectionType, op: StorageWriteOp, )
fn real_time_recent_timestamp<'life0, 'async_trait>(
&'life0 self,
timestamp_objects: BTreeSet<GlobalId>,
timeout: Duration,
) -> Pin<Box<dyn Future<Output = Result<BoxFuture<Result<Self::Timestamp, StorageError<Self::Timestamp>>>, StorageError<Self::Timestamp>>> + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Source§fn create_collections<'life0, 'life1, 'async_trait>(
&'life0 mut self,
storage_metadata: &'life1 StorageMetadata,
register_ts: Option<Self::Timestamp>,
collections: Vec<(GlobalId, CollectionDescription<Self::Timestamp>)>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
'life0: 'async_trait,
'life1: 'async_trait,
Self: 'async_trait,
fn create_collections<'life0, 'life1, 'async_trait>(
&'life0 mut self,
storage_metadata: &'life1 StorageMetadata,
register_ts: Option<Self::Timestamp>,
collections: Vec<(GlobalId, CollectionDescription<Self::Timestamp>)>,
) -> Pin<Box<dyn Future<Output = Result<(), StorageError<Self::Timestamp>>> + 'async_trait>>where
'life0: 'async_trait,
'life1: 'async_trait,
Self: 'async_trait,
Auto Trait Implementations§
impl<T> !Freeze for Controller<T>
impl<T> !RefUnwindSafe for Controller<T>
impl<T> Send for Controller<T>
impl<T> !Sync for Controller<T>
impl<T> Unpin for Controller<T>where
T: Unpin,
impl<T> !UnwindSafe for Controller<T>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> FmtForward for T
impl<T> FmtForward for T
Source§fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
self
to use its Binary
implementation when Debug
-formatted.Source§fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
self
to use its Display
implementation when
Debug
-formatted.Source§fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
self
to use its LowerExp
implementation when
Debug
-formatted.Source§fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
self
to use its LowerHex
implementation when
Debug
-formatted.Source§fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
self
to use its Octal
implementation when Debug
-formatted.Source§fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
self
to use its Pointer
implementation when
Debug
-formatted.Source§fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
self
to use its UpperExp
implementation when
Debug
-formatted.Source§fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
self
to use its UpperHex
implementation when
Debug
-formatted.Source§impl<T> FutureExt for T
impl<T> FutureExt for T
Source§fn with_context(self, otel_cx: Context) -> WithContext<Self>
fn with_context(self, otel_cx: Context) -> WithContext<Self>
Source§fn with_current_context(self) -> WithContext<Self>
fn with_current_context(self) -> WithContext<Self>
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
Source§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T
in a tonic::Request
Source§impl<T, U> OverrideFrom<Option<&T>> for Uwhere
U: OverrideFrom<T>,
impl<T, U> OverrideFrom<Option<&T>> for Uwhere
U: OverrideFrom<T>,
Source§impl<T> Pipe for Twhere
T: ?Sized,
impl<T> Pipe for Twhere
T: ?Sized,
Source§fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
Source§fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
self
and passes that borrow into the pipe function. Read moreSource§fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
self
and passes that borrow into the pipe function. Read moreSource§fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
Source§fn pipe_borrow_mut<'a, B, R>(
&'a mut self,
func: impl FnOnce(&'a mut B) -> R,
) -> R
fn pipe_borrow_mut<'a, B, R>( &'a mut self, func: impl FnOnce(&'a mut B) -> R, ) -> R
Source§fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
self
, then passes self.as_ref()
into the pipe function.Source§fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
self
, then passes self.as_mut()
into the pipe
function.Source§fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
self
, then passes self.deref()
into the pipe function.Source§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<P, R> ProtoType<R> for Pwhere
R: RustType<P>,
impl<P, R> ProtoType<R> for Pwhere
R: RustType<P>,
Source§fn into_rust(self) -> Result<R, TryFromProtoError>
fn into_rust(self) -> Result<R, TryFromProtoError>
RustType::from_proto
.Source§fn from_rust(rust: &R) -> P
fn from_rust(rust: &R) -> P
RustType::into_proto
.Source§impl<'a, S, T> Semigroup<&'a S> for Twhere
T: Semigroup<S>,
impl<'a, S, T> Semigroup<&'a S> for Twhere
T: Semigroup<S>,
Source§fn plus_equals(&mut self, rhs: &&'a S)
fn plus_equals(&mut self, rhs: &&'a S)
std::ops::AddAssign
, for types that do not implement AddAssign
.Source§impl<T> Tap for T
impl<T> Tap for T
Source§fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
Borrow<B>
of a value. Read moreSource§fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
BorrowMut<B>
of a value. Read moreSource§fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
AsRef<R>
view of a value. Read moreSource§fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
AsMut<R>
view of a value. Read moreSource§fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
Deref::Target
of a value. Read moreSource§fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
Deref::Target
of a value. Read moreSource§fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
.tap()
only in debug builds, and is erased in release builds.Source§fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
.tap_mut()
only in debug builds, and is erased in release
builds.Source§fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
.tap_borrow()
only in debug builds, and is erased in release
builds.Source§fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
.tap_borrow_mut()
only in debug builds, and is erased in release
builds.Source§fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
.tap_ref()
only in debug builds, and is erased in release
builds.Source§fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
.tap_ref_mut()
only in debug builds, and is erased in release
builds.Source§fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
.tap_deref()
only in debug builds, and is erased in release
builds.