opentelemetry_proto/proto/tonic/opentelemetry.proto.metrics.v1.rs
1// This file is @generated by prost-build.
2/// MetricsData represents the metrics data that can be stored in a persistent
3/// storage, OR can be embedded by other protocols that transfer OTLP metrics
4/// data but do not implement the OTLP protocol.
5///
6/// The main difference between this message and collector protocol is that
7/// in this message there will not be any "control" or "metadata" specific to
8/// OTLP protocol.
9///
10/// When new fields are added into this message, the OTLP request MUST be updated
11/// as well.
12#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
13#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
14#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
15#[allow(clippy::derive_partial_eq_without_eq)]
16#[derive(Clone, PartialEq, ::prost::Message)]
17pub struct MetricsData {
18 /// An array of ResourceMetrics.
19 /// For data coming from a single resource this array will typically contain
20 /// one element. Intermediary nodes that receive data from multiple origins
21 /// typically batch the data before forwarding further and in that case this
22 /// array will contain multiple elements.
23 #[prost(message, repeated, tag = "1")]
24 pub resource_metrics: ::prost::alloc::vec::Vec<ResourceMetrics>,
25}
26/// A collection of ScopeMetrics from a Resource.
27#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
28#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
29#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
30#[allow(clippy::derive_partial_eq_without_eq)]
31#[derive(Clone, PartialEq, ::prost::Message)]
32pub struct ResourceMetrics {
33 /// The resource for the metrics in this message.
34 /// If this field is not set then no resource info is known.
35 #[prost(message, optional, tag = "1")]
36 pub resource: ::core::option::Option<super::super::resource::v1::Resource>,
37 /// A list of metrics that originate from a resource.
38 #[prost(message, repeated, tag = "2")]
39 pub scope_metrics: ::prost::alloc::vec::Vec<ScopeMetrics>,
40 /// The Schema URL, if known. This is the identifier of the Schema that the resource data
41 /// is recorded in. To learn more about Schema URL see
42 /// <https://opentelemetry.io/docs/specs/otel/schemas/#schema-url>
43 /// This schema_url applies to the data in the "resource" field. It does not apply
44 /// to the data in the "scope_metrics" field which have their own schema_url field.
45 #[prost(string, tag = "3")]
46 pub schema_url: ::prost::alloc::string::String,
47}
48/// A collection of Metrics produced by an Scope.
49#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
50#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
51#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
52#[allow(clippy::derive_partial_eq_without_eq)]
53#[derive(Clone, PartialEq, ::prost::Message)]
54pub struct ScopeMetrics {
55 /// The instrumentation scope information for the metrics in this message.
56 /// Semantically when InstrumentationScope isn't set, it is equivalent with
57 /// an empty instrumentation scope name (unknown).
58 #[prost(message, optional, tag = "1")]
59 pub scope: ::core::option::Option<super::super::common::v1::InstrumentationScope>,
60 /// A list of metrics that originate from an instrumentation library.
61 #[prost(message, repeated, tag = "2")]
62 pub metrics: ::prost::alloc::vec::Vec<Metric>,
63 /// The Schema URL, if known. This is the identifier of the Schema that the metric data
64 /// is recorded in. To learn more about Schema URL see
65 /// <https://opentelemetry.io/docs/specs/otel/schemas/#schema-url>
66 /// This schema_url applies to all metrics in the "metrics" field.
67 #[prost(string, tag = "3")]
68 pub schema_url: ::prost::alloc::string::String,
69}
70/// Defines a Metric which has one or more timeseries. The following is a
71/// brief summary of the Metric data model. For more details, see:
72///
73/// <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md>
74///
75///
76/// The data model and relation between entities is shown in the
77/// diagram below. Here, "DataPoint" is the term used to refer to any
78/// one of the specific data point value types, and "points" is the term used
79/// to refer to any one of the lists of points contained in the Metric.
80///
81/// - Metric is composed of a metadata and data.
82/// - Metadata part contains a name, description, unit.
83/// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
84/// - DataPoint contains timestamps, attributes, and one of the possible value type
85/// fields.
86///
87/// Metric
88/// +------------+
89/// |name |
90/// |description |
91/// |unit | +------------------------------------+
92/// |data |---> |Gauge, Sum, Histogram, Summary, ... |
93/// +------------+ +------------------------------------+
94///
95/// Data \[One of Gauge, Sum, Histogram, Summary, ...\]
96/// +-----------+
97/// |... | // Metadata about the Data.
98/// |points |--+
99/// +-----------+ |
100/// | +---------------------------+
101/// | |DataPoint 1 |
102/// v |+------+------+ +------+ |
103/// +-----+ ||label |label |...|label | |
104/// | 1 |-->||value1|value2|...|valueN| |
105/// +-----+ |+------+------+ +------+ |
106/// | . | |+-----+ |
107/// | . | ||value| |
108/// | . | |+-----+ |
109/// | . | +---------------------------+
110/// | . | .
111/// | . | .
112/// | . | .
113/// | . | +---------------------------+
114/// | . | |DataPoint M |
115/// +-----+ |+------+------+ +------+ |
116/// | M |-->||label |label |...|label | |
117/// +-----+ ||value1|value2|...|valueN| |
118/// |+------+------+ +------+ |
119/// |+-----+ |
120/// ||value| |
121/// |+-----+ |
122/// +---------------------------+
123///
124/// Each distinct type of DataPoint represents the output of a specific
125/// aggregation function, the result of applying the DataPoint's
126/// associated function of to one or more measurements.
127///
128/// All DataPoint types have three common fields:
129/// - Attributes includes key-value pairs associated with the data point
130/// - TimeUnixNano is required, set to the end time of the aggregation
131/// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
132/// having an AggregationTemporality field, as discussed below.
133///
134/// Both TimeUnixNano and StartTimeUnixNano values are expressed as
135/// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
136///
137/// # TimeUnixNano
138///
139/// This field is required, having consistent interpretation across
140/// DataPoint types. TimeUnixNano is the moment corresponding to when
141/// the data point's aggregate value was captured.
142///
143/// Data points with the 0 value for TimeUnixNano SHOULD be rejected
144/// by consumers.
145///
146/// # StartTimeUnixNano
147///
148/// StartTimeUnixNano in general allows detecting when a sequence of
149/// observations is unbroken. This field indicates to consumers the
150/// start time for points with cumulative and delta
151/// AggregationTemporality, and it should be included whenever possible
152/// to support correct rate calculation. Although it may be omitted
153/// when the start time is truly unknown, setting StartTimeUnixNano is
154/// strongly encouraged.
155#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
156#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
157#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
158#[allow(clippy::derive_partial_eq_without_eq)]
159#[derive(Clone, PartialEq, ::prost::Message)]
160pub struct Metric {
161 /// name of the metric.
162 #[prost(string, tag = "1")]
163 pub name: ::prost::alloc::string::String,
164 /// description of the metric, which can be used in documentation.
165 #[prost(string, tag = "2")]
166 pub description: ::prost::alloc::string::String,
167 /// unit in which the metric value is reported. Follows the format
168 /// described by <http://unitsofmeasure.org/ucum.html.>
169 #[prost(string, tag = "3")]
170 pub unit: ::prost::alloc::string::String,
171 /// Additional metadata attributes that describe the metric. \[Optional\].
172 /// Attributes are non-identifying.
173 /// Consumers SHOULD NOT need to be aware of these attributes.
174 /// These attributes MAY be used to encode information allowing
175 /// for lossless roundtrip translation to / from another data model.
176 /// Attribute keys MUST be unique (it is not allowed to have more than one
177 /// attribute with the same key).
178 #[prost(message, repeated, tag = "12")]
179 pub metadata: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
180 /// Data determines the aggregation type (if any) of the metric, what is the
181 /// reported value type for the data points, as well as the relatationship to
182 /// the time interval over which they are reported.
183 #[prost(oneof = "metric::Data", tags = "5, 7, 9, 10, 11")]
184 pub data: ::core::option::Option<metric::Data>,
185}
186/// Nested message and enum types in `Metric`.
187pub mod metric {
188 /// Data determines the aggregation type (if any) of the metric, what is the
189 /// reported value type for the data points, as well as the relatationship to
190 /// the time interval over which they are reported.
191 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
192 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
193 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
194 #[allow(clippy::derive_partial_eq_without_eq)]
195 #[derive(Clone, PartialEq, ::prost::Oneof)]
196 pub enum Data {
197 #[prost(message, tag = "5")]
198 Gauge(super::Gauge),
199 #[prost(message, tag = "7")]
200 Sum(super::Sum),
201 #[prost(message, tag = "9")]
202 Histogram(super::Histogram),
203 #[prost(message, tag = "10")]
204 ExponentialHistogram(super::ExponentialHistogram),
205 #[prost(message, tag = "11")]
206 Summary(super::Summary),
207 }
208}
209/// Gauge represents the type of a scalar metric that always exports the
210/// "current value" for every data point. It should be used for an "unknown"
211/// aggregation.
212///
213/// A Gauge does not support different aggregation temporalities. Given the
214/// aggregation is unknown, points cannot be combined using the same
215/// aggregation, regardless of aggregation temporalities. Therefore,
216/// AggregationTemporality is not included. Consequently, this also means
217/// "StartTimeUnixNano" is ignored for all data points.
218#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
219#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
220#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
221#[allow(clippy::derive_partial_eq_without_eq)]
222#[derive(Clone, PartialEq, ::prost::Message)]
223pub struct Gauge {
224 #[prost(message, repeated, tag = "1")]
225 pub data_points: ::prost::alloc::vec::Vec<NumberDataPoint>,
226}
227/// Sum represents the type of a scalar metric that is calculated as a sum of all
228/// reported measurements over a time interval.
229#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
230#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
231#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
232#[allow(clippy::derive_partial_eq_without_eq)]
233#[derive(Clone, PartialEq, ::prost::Message)]
234pub struct Sum {
235 #[prost(message, repeated, tag = "1")]
236 pub data_points: ::prost::alloc::vec::Vec<NumberDataPoint>,
237 /// aggregation_temporality describes if the aggregator reports delta changes
238 /// since last report time, or cumulative changes since a fixed start time.
239 #[prost(enumeration = "AggregationTemporality", tag = "2")]
240 pub aggregation_temporality: i32,
241 /// If "true" means that the sum is monotonic.
242 #[prost(bool, tag = "3")]
243 pub is_monotonic: bool,
244}
245/// Histogram represents the type of a metric that is calculated by aggregating
246/// as a Histogram of all reported measurements over a time interval.
247#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
248#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
249#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
250#[allow(clippy::derive_partial_eq_without_eq)]
251#[derive(Clone, PartialEq, ::prost::Message)]
252pub struct Histogram {
253 #[prost(message, repeated, tag = "1")]
254 pub data_points: ::prost::alloc::vec::Vec<HistogramDataPoint>,
255 /// aggregation_temporality describes if the aggregator reports delta changes
256 /// since last report time, or cumulative changes since a fixed start time.
257 #[prost(enumeration = "AggregationTemporality", tag = "2")]
258 pub aggregation_temporality: i32,
259}
260/// ExponentialHistogram represents the type of a metric that is calculated by aggregating
261/// as a ExponentialHistogram of all reported double measurements over a time interval.
262#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
263#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
264#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
265#[allow(clippy::derive_partial_eq_without_eq)]
266#[derive(Clone, PartialEq, ::prost::Message)]
267pub struct ExponentialHistogram {
268 #[prost(message, repeated, tag = "1")]
269 pub data_points: ::prost::alloc::vec::Vec<ExponentialHistogramDataPoint>,
270 /// aggregation_temporality describes if the aggregator reports delta changes
271 /// since last report time, or cumulative changes since a fixed start time.
272 #[prost(enumeration = "AggregationTemporality", tag = "2")]
273 pub aggregation_temporality: i32,
274}
275/// Summary metric data are used to convey quantile summaries,
276/// a Prometheus (see: <https://prometheus.io/docs/concepts/metric_types/#summary>)
277/// and OpenMetrics (see: <https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45>)
278/// data type. These data points cannot always be merged in a meaningful way.
279/// While they can be useful in some applications, histogram data points are
280/// recommended for new applications.
281#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
282#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
283#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
284#[allow(clippy::derive_partial_eq_without_eq)]
285#[derive(Clone, PartialEq, ::prost::Message)]
286pub struct Summary {
287 #[prost(message, repeated, tag = "1")]
288 pub data_points: ::prost::alloc::vec::Vec<SummaryDataPoint>,
289}
290/// NumberDataPoint is a single data point in a timeseries that describes the
291/// time-varying scalar value of a metric.
292#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
293#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
294#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
295#[allow(clippy::derive_partial_eq_without_eq)]
296#[derive(Clone, PartialEq, ::prost::Message)]
297pub struct NumberDataPoint {
298 /// The set of key/value pairs that uniquely identify the timeseries from
299 /// where this point belongs. The list may be empty (may contain 0 elements).
300 /// Attribute keys MUST be unique (it is not allowed to have more than one
301 /// attribute with the same key).
302 #[prost(message, repeated, tag = "7")]
303 pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
304 /// StartTimeUnixNano is optional but strongly encouraged, see the
305 /// the detailed comments above Metric.
306 ///
307 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
308 /// 1970.
309 #[prost(fixed64, tag = "2")]
310 pub start_time_unix_nano: u64,
311 /// TimeUnixNano is required, see the detailed comments above Metric.
312 ///
313 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
314 /// 1970.
315 #[prost(fixed64, tag = "3")]
316 pub time_unix_nano: u64,
317 /// (Optional) List of exemplars collected from
318 /// measurements that were used to form the data point
319 #[prost(message, repeated, tag = "5")]
320 pub exemplars: ::prost::alloc::vec::Vec<Exemplar>,
321 /// Flags that apply to this specific data point. See DataPointFlags
322 /// for the available flags and their meaning.
323 #[prost(uint32, tag = "8")]
324 pub flags: u32,
325 /// The value itself. A point is considered invalid when one of the recognized
326 /// value fields is not present inside this oneof.
327 #[prost(oneof = "number_data_point::Value", tags = "4, 6")]
328 pub value: ::core::option::Option<number_data_point::Value>,
329}
330/// Nested message and enum types in `NumberDataPoint`.
331pub mod number_data_point {
332 /// The value itself. A point is considered invalid when one of the recognized
333 /// value fields is not present inside this oneof.
334 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
335 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
336 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
337 #[allow(clippy::derive_partial_eq_without_eq)]
338 #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
339 pub enum Value {
340 #[prost(double, tag = "4")]
341 AsDouble(f64),
342 #[prost(sfixed64, tag = "6")]
343 AsInt(i64),
344 }
345}
346/// HistogramDataPoint is a single data point in a timeseries that describes the
347/// time-varying values of a Histogram. A Histogram contains summary statistics
348/// for a population of values, it may optionally contain the distribution of
349/// those values across a set of buckets.
350///
351/// If the histogram contains the distribution of values, then both
352/// "explicit_bounds" and "bucket counts" fields must be defined.
353/// If the histogram does not contain the distribution of values, then both
354/// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
355/// "sum" are known.
356#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
357#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
358#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
359#[allow(clippy::derive_partial_eq_without_eq)]
360#[derive(Clone, PartialEq, ::prost::Message)]
361pub struct HistogramDataPoint {
362 /// The set of key/value pairs that uniquely identify the timeseries from
363 /// where this point belongs. The list may be empty (may contain 0 elements).
364 /// Attribute keys MUST be unique (it is not allowed to have more than one
365 /// attribute with the same key).
366 #[prost(message, repeated, tag = "9")]
367 pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
368 /// StartTimeUnixNano is optional but strongly encouraged, see the
369 /// the detailed comments above Metric.
370 ///
371 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
372 /// 1970.
373 #[prost(fixed64, tag = "2")]
374 pub start_time_unix_nano: u64,
375 /// TimeUnixNano is required, see the detailed comments above Metric.
376 ///
377 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
378 /// 1970.
379 #[prost(fixed64, tag = "3")]
380 pub time_unix_nano: u64,
381 /// count is the number of values in the population. Must be non-negative. This
382 /// value must be equal to the sum of the "count" fields in buckets if a
383 /// histogram is provided.
384 #[prost(fixed64, tag = "4")]
385 pub count: u64,
386 /// sum of the values in the population. If count is zero then this field
387 /// must be zero.
388 ///
389 /// Note: Sum should only be filled out when measuring non-negative discrete
390 /// events, and is assumed to be monotonic over the values of these events.
391 /// Negative events *can* be recorded, but sum should not be filled out when
392 /// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
393 /// see: <https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram>
394 #[prost(double, optional, tag = "5")]
395 pub sum: ::core::option::Option<f64>,
396 /// bucket_counts is an optional field contains the count values of histogram
397 /// for each bucket.
398 ///
399 /// The sum of the bucket_counts must equal the value in the count field.
400 ///
401 /// The number of elements in bucket_counts array must be by one greater than
402 /// the number of elements in explicit_bounds array.
403 #[prost(fixed64, repeated, tag = "6")]
404 pub bucket_counts: ::prost::alloc::vec::Vec<u64>,
405 /// explicit_bounds specifies buckets with explicitly defined bounds for values.
406 ///
407 /// The boundaries for bucket at index i are:
408 ///
409 /// (-infinity, explicit_bounds\[i]\] for i == 0
410 /// (explicit_bounds\[i-1\], explicit_bounds\[i]\] for 0 < i < size(explicit_bounds)
411 /// (explicit_bounds\[i-1\], +infinity) for i == size(explicit_bounds)
412 ///
413 /// The values in the explicit_bounds array must be strictly increasing.
414 ///
415 /// Histogram buckets are inclusive of their upper boundary, except the last
416 /// bucket where the boundary is at infinity. This format is intentionally
417 /// compatible with the OpenMetrics histogram definition.
418 #[prost(double, repeated, tag = "7")]
419 pub explicit_bounds: ::prost::alloc::vec::Vec<f64>,
420 /// (Optional) List of exemplars collected from
421 /// measurements that were used to form the data point
422 #[prost(message, repeated, tag = "8")]
423 pub exemplars: ::prost::alloc::vec::Vec<Exemplar>,
424 /// Flags that apply to this specific data point. See DataPointFlags
425 /// for the available flags and their meaning.
426 #[prost(uint32, tag = "10")]
427 pub flags: u32,
428 /// min is the minimum value over (start_time, end_time].
429 #[prost(double, optional, tag = "11")]
430 pub min: ::core::option::Option<f64>,
431 /// max is the maximum value over (start_time, end_time].
432 #[prost(double, optional, tag = "12")]
433 pub max: ::core::option::Option<f64>,
434}
435/// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
436/// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
437/// summary statistics for a population of values, it may optionally contain the
438/// distribution of those values across a set of buckets.
439///
440#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
441#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
442#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
443#[allow(clippy::derive_partial_eq_without_eq)]
444#[derive(Clone, PartialEq, ::prost::Message)]
445pub struct ExponentialHistogramDataPoint {
446 /// The set of key/value pairs that uniquely identify the timeseries from
447 /// where this point belongs. The list may be empty (may contain 0 elements).
448 /// Attribute keys MUST be unique (it is not allowed to have more than one
449 /// attribute with the same key).
450 #[prost(message, repeated, tag = "1")]
451 pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
452 /// StartTimeUnixNano is optional but strongly encouraged, see the
453 /// the detailed comments above Metric.
454 ///
455 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
456 /// 1970.
457 #[prost(fixed64, tag = "2")]
458 pub start_time_unix_nano: u64,
459 /// TimeUnixNano is required, see the detailed comments above Metric.
460 ///
461 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
462 /// 1970.
463 #[prost(fixed64, tag = "3")]
464 pub time_unix_nano: u64,
465 /// count is the number of values in the population. Must be
466 /// non-negative. This value must be equal to the sum of the "bucket_counts"
467 /// values in the positive and negative Buckets plus the "zero_count" field.
468 #[prost(fixed64, tag = "4")]
469 pub count: u64,
470 /// sum of the values in the population. If count is zero then this field
471 /// must be zero.
472 ///
473 /// Note: Sum should only be filled out when measuring non-negative discrete
474 /// events, and is assumed to be monotonic over the values of these events.
475 /// Negative events *can* be recorded, but sum should not be filled out when
476 /// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
477 /// see: <https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram>
478 #[prost(double, optional, tag = "5")]
479 pub sum: ::core::option::Option<f64>,
480 /// scale describes the resolution of the histogram. Boundaries are
481 /// located at powers of the base, where:
482 ///
483 /// base = (2^(2^-scale))
484 ///
485 /// The histogram bucket identified by `index`, a signed integer,
486 /// contains values that are greater than (base^index) and
487 /// less than or equal to (base^(index+1)).
488 ///
489 /// The positive and negative ranges of the histogram are expressed
490 /// separately. Negative values are mapped by their absolute value
491 /// into the negative range using the same scale as the positive range.
492 ///
493 /// scale is not restricted by the protocol, as the permissible
494 /// values depend on the range of the data.
495 #[prost(sint32, tag = "6")]
496 pub scale: i32,
497 /// zero_count is the count of values that are either exactly zero or
498 /// within the region considered zero by the instrumentation at the
499 /// tolerated degree of precision. This bucket stores values that
500 /// cannot be expressed using the standard exponential formula as
501 /// well as values that have been rounded to zero.
502 ///
503 /// Implementations MAY consider the zero bucket to have probability
504 /// mass equal to (zero_count / count).
505 #[prost(fixed64, tag = "7")]
506 pub zero_count: u64,
507 /// positive carries the positive range of exponential bucket counts.
508 #[prost(message, optional, tag = "8")]
509 pub positive: ::core::option::Option<exponential_histogram_data_point::Buckets>,
510 /// negative carries the negative range of exponential bucket counts.
511 #[prost(message, optional, tag = "9")]
512 pub negative: ::core::option::Option<exponential_histogram_data_point::Buckets>,
513 /// Flags that apply to this specific data point. See DataPointFlags
514 /// for the available flags and their meaning.
515 #[prost(uint32, tag = "10")]
516 pub flags: u32,
517 /// (Optional) List of exemplars collected from
518 /// measurements that were used to form the data point
519 #[prost(message, repeated, tag = "11")]
520 pub exemplars: ::prost::alloc::vec::Vec<Exemplar>,
521 /// min is the minimum value over (start_time, end_time].
522 #[prost(double, optional, tag = "12")]
523 pub min: ::core::option::Option<f64>,
524 /// max is the maximum value over (start_time, end_time].
525 #[prost(double, optional, tag = "13")]
526 pub max: ::core::option::Option<f64>,
527 /// ZeroThreshold may be optionally set to convey the width of the zero
528 /// region. Where the zero region is defined as the closed interval
529 /// \[-ZeroThreshold, ZeroThreshold\].
530 /// When ZeroThreshold is 0, zero count bucket stores values that cannot be
531 /// expressed using the standard exponential formula as well as values that
532 /// have been rounded to zero.
533 #[prost(double, tag = "14")]
534 pub zero_threshold: f64,
535}
536/// Nested message and enum types in `ExponentialHistogramDataPoint`.
537pub mod exponential_histogram_data_point {
538 /// Buckets are a set of bucket counts, encoded in a contiguous array
539 /// of counts.
540 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
541 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
542 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
543 #[allow(clippy::derive_partial_eq_without_eq)]
544 #[derive(Clone, PartialEq, ::prost::Message)]
545 pub struct Buckets {
546 /// Offset is the bucket index of the first entry in the bucket_counts array.
547 ///
548 /// Note: This uses a varint encoding as a simple form of compression.
549 #[prost(sint32, tag = "1")]
550 pub offset: i32,
551 /// bucket_counts is an array of count values, where bucket_counts\[i\] carries
552 /// the count of the bucket at index (offset+i). bucket_counts\[i\] is the count
553 /// of values greater than base^(offset+i) and less than or equal to
554 /// base^(offset+i+1).
555 ///
556 /// Note: By contrast, the explicit HistogramDataPoint uses
557 /// fixed64. This field is expected to have many buckets,
558 /// especially zeros, so uint64 has been selected to ensure
559 /// varint encoding.
560 #[prost(uint64, repeated, tag = "2")]
561 pub bucket_counts: ::prost::alloc::vec::Vec<u64>,
562 }
563}
564/// SummaryDataPoint is a single data point in a timeseries that describes the
565/// time-varying values of a Summary metric.
566#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
567#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
568#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
569#[allow(clippy::derive_partial_eq_without_eq)]
570#[derive(Clone, PartialEq, ::prost::Message)]
571pub struct SummaryDataPoint {
572 /// The set of key/value pairs that uniquely identify the timeseries from
573 /// where this point belongs. The list may be empty (may contain 0 elements).
574 /// Attribute keys MUST be unique (it is not allowed to have more than one
575 /// attribute with the same key).
576 #[prost(message, repeated, tag = "7")]
577 pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
578 /// StartTimeUnixNano is optional but strongly encouraged, see the
579 /// the detailed comments above Metric.
580 ///
581 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
582 /// 1970.
583 #[prost(fixed64, tag = "2")]
584 pub start_time_unix_nano: u64,
585 /// TimeUnixNano is required, see the detailed comments above Metric.
586 ///
587 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
588 /// 1970.
589 #[prost(fixed64, tag = "3")]
590 pub time_unix_nano: u64,
591 /// count is the number of values in the population. Must be non-negative.
592 #[prost(fixed64, tag = "4")]
593 pub count: u64,
594 /// sum of the values in the population. If count is zero then this field
595 /// must be zero.
596 ///
597 /// Note: Sum should only be filled out when measuring non-negative discrete
598 /// events, and is assumed to be monotonic over the values of these events.
599 /// Negative events *can* be recorded, but sum should not be filled out when
600 /// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
601 /// see: <https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary>
602 #[prost(double, tag = "5")]
603 pub sum: f64,
604 /// (Optional) list of values at different quantiles of the distribution calculated
605 /// from the current snapshot. The quantiles must be strictly increasing.
606 #[prost(message, repeated, tag = "6")]
607 pub quantile_values: ::prost::alloc::vec::Vec<summary_data_point::ValueAtQuantile>,
608 /// Flags that apply to this specific data point. See DataPointFlags
609 /// for the available flags and their meaning.
610 #[prost(uint32, tag = "8")]
611 pub flags: u32,
612}
613/// Nested message and enum types in `SummaryDataPoint`.
614pub mod summary_data_point {
615 /// Represents the value at a given quantile of a distribution.
616 ///
617 /// To record Min and Max values following conventions are used:
618 /// - The 1.0 quantile is equivalent to the maximum value observed.
619 /// - The 0.0 quantile is equivalent to the minimum value observed.
620 ///
621 /// See the following issue for more context:
622 /// <https://github.com/open-telemetry/opentelemetry-proto/issues/125>
623 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
624 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
625 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
626 #[allow(clippy::derive_partial_eq_without_eq)]
627 #[derive(Clone, Copy, PartialEq, ::prost::Message)]
628 pub struct ValueAtQuantile {
629 /// The quantile of a distribution. Must be in the interval
630 /// \[0.0, 1.0\].
631 #[prost(double, tag = "1")]
632 pub quantile: f64,
633 /// The value at the given quantile of a distribution.
634 ///
635 /// Quantile values must NOT be negative.
636 #[prost(double, tag = "2")]
637 pub value: f64,
638 }
639}
640/// A representation of an exemplar, which is a sample input measurement.
641/// Exemplars also hold information about the environment when the measurement
642/// was recorded, for example the span and trace ID of the active span when the
643/// exemplar was recorded.
644#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
645#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
646#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
647#[allow(clippy::derive_partial_eq_without_eq)]
648#[derive(Clone, PartialEq, ::prost::Message)]
649pub struct Exemplar {
650 /// The set of key/value pairs that were filtered out by the aggregator, but
651 /// recorded alongside the original measurement. Only key/value pairs that were
652 /// filtered out by the aggregator should be included
653 #[prost(message, repeated, tag = "7")]
654 pub filtered_attributes: ::prost::alloc::vec::Vec<
655 super::super::common::v1::KeyValue,
656 >,
657 /// time_unix_nano is the exact time when this exemplar was recorded
658 ///
659 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
660 /// 1970.
661 #[prost(fixed64, tag = "2")]
662 pub time_unix_nano: u64,
663 /// (Optional) Span ID of the exemplar trace.
664 /// span_id may be missing if the measurement is not recorded inside a trace
665 /// or if the trace is not sampled.
666 #[prost(bytes = "vec", tag = "4")]
667 pub span_id: ::prost::alloc::vec::Vec<u8>,
668 /// (Optional) Trace ID of the exemplar trace.
669 /// trace_id may be missing if the measurement is not recorded inside a trace
670 /// or if the trace is not sampled.
671 #[prost(bytes = "vec", tag = "5")]
672 pub trace_id: ::prost::alloc::vec::Vec<u8>,
673 /// The value of the measurement that was recorded. An exemplar is
674 /// considered invalid when one of the recognized value fields is not present
675 /// inside this oneof.
676 #[prost(oneof = "exemplar::Value", tags = "3, 6")]
677 pub value: ::core::option::Option<exemplar::Value>,
678}
679/// Nested message and enum types in `Exemplar`.
680pub mod exemplar {
681 /// The value of the measurement that was recorded. An exemplar is
682 /// considered invalid when one of the recognized value fields is not present
683 /// inside this oneof.
684 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
685 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
686 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
687 #[allow(clippy::derive_partial_eq_without_eq)]
688 #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
689 pub enum Value {
690 #[prost(double, tag = "3")]
691 AsDouble(f64),
692 #[prost(sfixed64, tag = "6")]
693 AsInt(i64),
694 }
695}
696/// AggregationTemporality defines how a metric aggregator reports aggregated
697/// values. It describes how those values relate to the time interval over
698/// which they are aggregated.
699#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
700#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
701#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
702#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
703#[repr(i32)]
704pub enum AggregationTemporality {
705 /// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
706 Unspecified = 0,
707 /// DELTA is an AggregationTemporality for a metric aggregator which reports
708 /// changes since last report time. Successive metrics contain aggregation of
709 /// values from continuous and non-overlapping intervals.
710 ///
711 /// The values for a DELTA metric are based only on the time interval
712 /// associated with one measurement cycle. There is no dependency on
713 /// previous measurements like is the case for CUMULATIVE metrics.
714 ///
715 /// For example, consider a system measuring the number of requests that
716 /// it receives and reports the sum of these requests every second as a
717 /// DELTA metric:
718 ///
719 /// 1. The system starts receiving at time=t_0.
720 /// 2. A request is received, the system measures 1 request.
721 /// 3. A request is received, the system measures 1 request.
722 /// 4. A request is received, the system measures 1 request.
723 /// 5. The 1 second collection cycle ends. A metric is exported for the
724 /// number of requests received over the interval of time t_0 to
725 /// t_0+1 with a value of 3.
726 /// 6. A request is received, the system measures 1 request.
727 /// 7. A request is received, the system measures 1 request.
728 /// 8. The 1 second collection cycle ends. A metric is exported for the
729 /// number of requests received over the interval of time t_0+1 to
730 /// t_0+2 with a value of 2.
731 Delta = 1,
732 /// CUMULATIVE is an AggregationTemporality for a metric aggregator which
733 /// reports changes since a fixed start time. This means that current values
734 /// of a CUMULATIVE metric depend on all previous measurements since the
735 /// start time. Because of this, the sender is required to retain this state
736 /// in some form. If this state is lost or invalidated, the CUMULATIVE metric
737 /// values MUST be reset and a new fixed start time following the last
738 /// reported measurement time sent MUST be used.
739 ///
740 /// For example, consider a system measuring the number of requests that
741 /// it receives and reports the sum of these requests every second as a
742 /// CUMULATIVE metric:
743 ///
744 /// 1. The system starts receiving at time=t_0.
745 /// 2. A request is received, the system measures 1 request.
746 /// 3. A request is received, the system measures 1 request.
747 /// 4. A request is received, the system measures 1 request.
748 /// 5. The 1 second collection cycle ends. A metric is exported for the
749 /// number of requests received over the interval of time t_0 to
750 /// t_0+1 with a value of 3.
751 /// 6. A request is received, the system measures 1 request.
752 /// 7. A request is received, the system measures 1 request.
753 /// 8. The 1 second collection cycle ends. A metric is exported for the
754 /// number of requests received over the interval of time t_0 to
755 /// t_0+2 with a value of 5.
756 /// 9. The system experiences a fault and loses state.
757 /// 10. The system recovers and resumes receiving at time=t_1.
758 /// 11. A request is received, the system measures 1 request.
759 /// 12. The 1 second collection cycle ends. A metric is exported for the
760 /// number of requests received over the interval of time t_1 to
761 /// t_0+1 with a value of 1.
762 ///
763 /// Note: Even though, when reporting changes since last report time, using
764 /// CUMULATIVE is valid, it is not recommended. This may cause problems for
765 /// systems that do not use start_time to determine when the aggregation
766 /// value was reset (e.g. Prometheus).
767 Cumulative = 2,
768}
769impl AggregationTemporality {
770 /// String value of the enum field names used in the ProtoBuf definition.
771 ///
772 /// The values are not transformed in any way and thus are considered stable
773 /// (if the ProtoBuf definition does not change) and safe for programmatic use.
774 pub fn as_str_name(&self) -> &'static str {
775 match self {
776 AggregationTemporality::Unspecified => "AGGREGATION_TEMPORALITY_UNSPECIFIED",
777 AggregationTemporality::Delta => "AGGREGATION_TEMPORALITY_DELTA",
778 AggregationTemporality::Cumulative => "AGGREGATION_TEMPORALITY_CUMULATIVE",
779 }
780 }
781 /// Creates an enum from field names used in the ProtoBuf definition.
782 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
783 match value {
784 "AGGREGATION_TEMPORALITY_UNSPECIFIED" => Some(Self::Unspecified),
785 "AGGREGATION_TEMPORALITY_DELTA" => Some(Self::Delta),
786 "AGGREGATION_TEMPORALITY_CUMULATIVE" => Some(Self::Cumulative),
787 _ => None,
788 }
789 }
790}
791/// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
792/// bit-field representing 32 distinct boolean flags. Each flag defined in this
793/// enum is a bit-mask. To test the presence of a single flag in the flags of
794/// a data point, for example, use an expression like:
795///
796/// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
797///
798#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
799#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
800#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
801#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
802#[repr(i32)]
803pub enum DataPointFlags {
804 /// The zero value for the enum. Should not be used for comparisons.
805 /// Instead use bitwise "and" with the appropriate mask as shown above.
806 DoNotUse = 0,
807 /// This DataPoint is valid but has no recorded value. This value
808 /// SHOULD be used to reflect explicitly missing data in a series, as
809 /// for an equivalent to the Prometheus "staleness marker".
810 NoRecordedValueMask = 1,
811}
812impl DataPointFlags {
813 /// String value of the enum field names used in the ProtoBuf definition.
814 ///
815 /// The values are not transformed in any way and thus are considered stable
816 /// (if the ProtoBuf definition does not change) and safe for programmatic use.
817 pub fn as_str_name(&self) -> &'static str {
818 match self {
819 DataPointFlags::DoNotUse => "DATA_POINT_FLAGS_DO_NOT_USE",
820 DataPointFlags::NoRecordedValueMask => {
821 "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK"
822 }
823 }
824 }
825 /// Creates an enum from field names used in the ProtoBuf definition.
826 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
827 match value {
828 "DATA_POINT_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse),
829 "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK" => Some(Self::NoRecordedValueMask),
830 _ => None,
831 }
832 }
833}