From d571b24ad1f3a63db6d9e5cae5ce2c2b57223edd Mon Sep 17 00:00:00 2001 From: Zhongyang Wu Date: Fri, 30 Sep 2022 12:07:51 -0700 Subject: [PATCH] feat(proto): remove build script and generate files using unit tests. (#881) --- .github/workflows/ci.yml | 2 + .github/workflows/integration_tests.yml | 3 +- opentelemetry-otlp/Cargo.toml | 6 +- opentelemetry-proto/Cargo.toml | 17 +- opentelemetry-proto/build.rs | 30 - opentelemetry-proto/src/lib.rs | 2 - opentelemetry-proto/src/proto.rs | 56 +- .../src/proto/grpcio/common.rs | 4 +- .../src/proto/grpcio/metrics.rs | 4 +- .../src/proto/grpcio/metrics_service.rs | 4 +- .../src/proto/grpcio/resource.rs | 4 +- opentelemetry-proto/src/proto/grpcio/trace.rs | 4 +- .../src/proto/grpcio/trace_config.rs | 4 +- .../src/proto/grpcio/trace_service.rs | 4 +- .../src/proto/grpcio/tracez.rs | 4 +- .../opentelemetry.proto.collector.logs.v1.rs | 261 +++++++ ...pentelemetry.proto.collector.metrics.v1.rs | 264 +++++++ .../opentelemetry.proto.collector.trace.v1.rs | 261 +++++++ .../tonic/opentelemetry.proto.common.v1.rs | 73 ++ .../tonic/opentelemetry.proto.logs.v1.rs | 210 ++++++ .../tonic/opentelemetry.proto.metrics.v1.rs | 693 ++++++++++++++++++ .../tonic/opentelemetry.proto.resource.v1.rs | 13 + .../tonic/opentelemetry.proto.trace.v1.rs | 380 ++++++++++ opentelemetry-proto/tests/grpc_build.rs | 120 ++- scripts/lint.sh | 4 +- 25 files changed, 2303 insertions(+), 124 deletions(-) delete mode 100644 opentelemetry-proto/build.rs create mode 100644 opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs create mode 100644 opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs create mode 100644 opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs create mode 100644 opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs create mode 100644 opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs create mode 100644 opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs create mode 100644 opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs create mode 100644 opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6af0d5f247..9175ad550f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,6 @@ name: CI +env: + CI: true on: pull_request: push: diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index 47101e3607..064734d66e 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -1,5 +1,6 @@ name: integration tests - +env: + CI: true on: pull_request: types: [ labeled, synchronize, opened, reopened ] diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index be2d028f47..cf1f67a090 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -35,7 +35,7 @@ futures-util = { version = "0.3", default-features = false, features = ["std"] } opentelemetry-proto = { version = "0.1", path = "../opentelemetry-proto", default-features = false } -grpcio = { version = "0.9", optional = true } +grpcio = { version = "0.11", optional = true } opentelemetry = { version = "0.18", default-features = false, features = ["trace"], path = "../opentelemetry" } opentelemetry-http = { version = "0.7", path = "../opentelemetry-http", optional = true } protobuf = { version = "2.18", optional = true } @@ -68,7 +68,7 @@ serialize = ["serde"] default = ["grpc-tonic", "trace"] # grpc using tonic -grpc-tonic = ["tonic", "prost", "http", "tokio", "opentelemetry-proto/gen-tonic", "opentelemetry-proto/build-client"] +grpc-tonic = ["tonic", "prost", "http", "tokio", "opentelemetry-proto/gen-tonic"] tls = ["tonic/tls"] tls-roots = ["tls", "tonic/tls-roots"] @@ -85,4 +85,4 @@ reqwest-rustls = ["reqwest", "reqwest/rustls-tls-native-roots"] surf-client = ["surf", "opentelemetry-http/surf"] # test -integration-testing = ["tonic", "prost", "tokio/full", "trace", "opentelemetry-proto/build-server"] +integration-testing = ["tonic", "prost", "tokio/full", "trace"] diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index e98fe2e7b8..fa767c04d9 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -14,7 +14,6 @@ categories = [ keywords = ["opentelemetry", "otlp", "logging", "tracing", "metrics"] license = "Apache-2.0" edition = "2021" -build = "build.rs" rust-version = "1.56" autotests = false @@ -24,15 +23,14 @@ doctest = false [[test]] name = "grpc_build" path = "tests/grpc_build.rs" -required-features = ["with-serde", "gen-protoc"] [features] default = [] -full = ["gen-tonic", "gen-protoc", "traces", "logs", "metrics", "zpages", "build-server", "build-client", "with-serde"] +full = ["gen-tonic", "gen-protoc", "traces", "logs", "metrics", "zpages", "with-serde"] # crates used to generate rs files -gen-tonic = ["tonic", "tonic-build", "prost"] +gen-tonic = ["tonic", "prost"] gen-protoc = ["grpcio", "protobuf"] # telemetry pillars and functions @@ -43,11 +41,9 @@ zpages = ["traces"] # add ons with-serde = ["protobuf/with-serde", "serde", "serde_json"] -build-server = [] -build-client = [] [dependencies] -grpcio = { version = "0.9", optional = true } +grpcio = { version = "0.11", optional = true } tonic = { version = "0.8.0", optional = true } prost = { version = "0.11.0", optional = true } protobuf = { version = "2.18", optional = true } # todo: update to 3.0 so we have docs for generated types. @@ -60,7 +56,6 @@ serde_json = { version = "1.0", optional = true } [dev-dependencies] protobuf-codegen = { version = "2.16" } protoc-grpcio = { version = "3.0" } - -[build-dependencies] -tonic-build = { version = "0.8.0", optional = true } -prost-build = { version = "0.11.1", optional = true } +tonic-build = { version = "0.8.0" } +prost-build = { version = "0.11.1" } +tempfile = "3.3.0" diff --git a/opentelemetry-proto/build.rs b/opentelemetry-proto/build.rs deleted file mode 100644 index ed795a21b4..0000000000 --- a/opentelemetry-proto/build.rs +++ /dev/null @@ -1,30 +0,0 @@ -use std::io::Error; - -// Grpc related files used by tonic are generated here. Those files re-generate for each build -// so it's up to date. -// -// Grpc related files used by grpcio are maintained at src/proto/grpcio. tests/grpc_build.rs makes -// sure they are up to date. - -fn main() -> Result<(), Error> { - #[cfg(feature = "gen-tonic")] - tonic_build::configure() - .build_server(cfg!(feature = "build-server")) - .build_client(cfg!(feature = "build-client")) - .compile( - &[ - "src/proto/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/trace/v1/trace_config.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/collector/trace/v1/trace_service.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/logs/v1/logs.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/collector/logs/v1/logs_service.proto", - ], - &["src/proto/opentelemetry-proto"], - )?; - - Ok(()) -} diff --git a/opentelemetry-proto/src/lib.rs b/opentelemetry-proto/src/lib.rs index 8820f550b0..9e4aa53955 100644 --- a/opentelemetry-proto/src/lib.rs +++ b/opentelemetry-proto/src/lib.rs @@ -21,8 +21,6 @@ //! - `gen-protoc`: generate rs files using [grpcio](https://github.com/tikv/grpc-rs). //! //! ## Additional configurations -//! - `build-server`: build grpc service servers if enabled. Only applicable to `gen-tonic`. -//! - `build-client`: build grpc service clients if enabled. Only applicable to `gen-tonic`. //! - `with-serde`: add serde annotations to generated types. Only applicable to `gen-protoc`. //! //! ## Misc diff --git a/opentelemetry-proto/src/proto.rs b/opentelemetry-proto/src/proto.rs index 9ac6c5c510..b8fa223de5 100644 --- a/opentelemetry-proto/src/proto.rs +++ b/opentelemetry-proto/src/proto.rs @@ -1,66 +1,68 @@ #[cfg(feature = "gen-tonic")] +#[path = "proto/tonic"] /// Generated files using [`tonic`](https://docs.rs/crate/grpcio) and [`prost`](https://docs.rs/crate/protobuf/latest) pub mod tonic { /// Service stub and clients + #[path = ""] pub mod collector { #[cfg(feature = "logs")] + #[path = ""] pub mod logs { - pub mod v1 { - tonic::include_proto!("opentelemetry.proto.collector.logs.v1"); - } + #[path = "opentelemetry.proto.collector.logs.v1.rs"] + pub mod v1; } #[cfg(feature = "metrics")] + #[path = ""] pub mod metrics { - pub mod v1 { - tonic::include_proto!("opentelemetry.proto.collector.metrics.v1"); - } + #[path = "opentelemetry.proto.collector.metrics.v1.rs"] + pub mod v1; } #[cfg(feature = "traces")] + #[path = ""] pub mod trace { - pub mod v1 { - tonic::include_proto!("opentelemetry.proto.collector.trace.v1"); - } + #[path = "opentelemetry.proto.collector.trace.v1.rs"] + pub mod v1; } } /// Common types used across all signals + #[path = ""] pub mod common { - pub mod v1 { - tonic::include_proto!("opentelemetry.proto.common.v1"); - } + #[path = "opentelemetry.proto.common.v1.rs"] + pub mod v1; } - #[cfg(feature = "logs")] /// Generated types used in logging. + #[cfg(feature = "logs")] + #[path = ""] pub mod logs { - pub mod v1 { - tonic::include_proto!("opentelemetry.proto.logs.v1"); - } + #[path = "opentelemetry.proto.logs.v1.rs"] + pub mod v1; } - #[cfg(feature = "metrics")] /// Generated types used in metrics. + #[cfg(feature = "metrics")] + #[path = ""] pub mod metrics { - pub mod v1 { - tonic::include_proto!("opentelemetry.proto.metrics.v1"); - } + #[path = "opentelemetry.proto.metrics.v1.rs"] + pub mod v1; } /// Generated types used in resources. + #[path = ""] pub mod resource { - pub mod v1 { - tonic::include_proto!("opentelemetry.proto.resource.v1"); - } + #[path = "opentelemetry.proto.resource.v1.rs"] + pub mod v1; } - #[cfg(feature = "traces")] /// Generated types used in traces. + #[cfg(feature = "traces")] + #[path = ""] pub mod trace { - pub mod v1 { - tonic::include_proto!("opentelemetry.proto.trace.v1"); - } + #[path = "opentelemetry.proto.trace.v1.rs"] + pub mod v1; } pub use crate::transform::common::tonic::Attributes; diff --git a/opentelemetry-proto/src/proto/grpcio/common.rs b/opentelemetry-proto/src/proto/grpcio/common.rs index 1be4f34c8d..8e6bf6e26f 100644 --- a/opentelemetry-proto/src/proto/grpcio/common.rs +++ b/opentelemetry-proto/src/proto/grpcio/common.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.27.1. Do not edit +// This file is generated by rust-protobuf 2.28.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; #[derive(PartialEq,Clone,Default)] #[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] diff --git a/opentelemetry-proto/src/proto/grpcio/metrics.rs b/opentelemetry-proto/src/proto/grpcio/metrics.rs index e94e44e3ae..7edf804260 100644 --- a/opentelemetry-proto/src/proto/grpcio/metrics.rs +++ b/opentelemetry-proto/src/proto/grpcio/metrics.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.27.1. Do not edit +// This file is generated by rust-protobuf 2.28.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; #[derive(PartialEq,Clone,Default)] #[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] diff --git a/opentelemetry-proto/src/proto/grpcio/metrics_service.rs b/opentelemetry-proto/src/proto/grpcio/metrics_service.rs index 0ab93bd148..7a0a2b2456 100644 --- a/opentelemetry-proto/src/proto/grpcio/metrics_service.rs +++ b/opentelemetry-proto/src/proto/grpcio/metrics_service.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.27.1. Do not edit +// This file is generated by rust-protobuf 2.28.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; #[derive(PartialEq,Clone,Default)] #[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] diff --git a/opentelemetry-proto/src/proto/grpcio/resource.rs b/opentelemetry-proto/src/proto/grpcio/resource.rs index dc99a4c628..39e98de9ce 100644 --- a/opentelemetry-proto/src/proto/grpcio/resource.rs +++ b/opentelemetry-proto/src/proto/grpcio/resource.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.27.1. Do not edit +// This file is generated by rust-protobuf 2.28.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; #[derive(PartialEq,Clone,Default)] #[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] diff --git a/opentelemetry-proto/src/proto/grpcio/trace.rs b/opentelemetry-proto/src/proto/grpcio/trace.rs index 68324dec78..bd61735c0c 100644 --- a/opentelemetry-proto/src/proto/grpcio/trace.rs +++ b/opentelemetry-proto/src/proto/grpcio/trace.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.27.1. Do not edit +// This file is generated by rust-protobuf 2.28.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; #[derive(PartialEq,Clone,Default)] #[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] diff --git a/opentelemetry-proto/src/proto/grpcio/trace_config.rs b/opentelemetry-proto/src/proto/grpcio/trace_config.rs index 9ab5e35426..1eaa7384c2 100644 --- a/opentelemetry-proto/src/proto/grpcio/trace_config.rs +++ b/opentelemetry-proto/src/proto/grpcio/trace_config.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.27.1. Do not edit +// This file is generated by rust-protobuf 2.28.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; #[derive(PartialEq,Clone,Default)] #[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] diff --git a/opentelemetry-proto/src/proto/grpcio/trace_service.rs b/opentelemetry-proto/src/proto/grpcio/trace_service.rs index 76a943f3ba..3949a9351b 100644 --- a/opentelemetry-proto/src/proto/grpcio/trace_service.rs +++ b/opentelemetry-proto/src/proto/grpcio/trace_service.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.27.1. Do not edit +// This file is generated by rust-protobuf 2.28.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; #[derive(PartialEq,Clone,Default)] #[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] diff --git a/opentelemetry-proto/src/proto/grpcio/tracez.rs b/opentelemetry-proto/src/proto/grpcio/tracez.rs index bfc1baaa36..8713408b76 100644 --- a/opentelemetry-proto/src/proto/grpcio/tracez.rs +++ b/opentelemetry-proto/src/proto/grpcio/tracez.rs @@ -1,4 +1,4 @@ -// This file is generated by rust-protobuf 2.27.1. Do not edit +// This file is generated by rust-protobuf 2.28.0. Do not edit // @generated // https://github.com/rust-lang/rust-clippy/issues/702 @@ -21,7 +21,7 @@ /// Generated files are compatible only with the same version /// of protobuf runtime. -// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1; +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; #[derive(PartialEq,Clone,Default)] #[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs new file mode 100644 index 0000000000..eae8e0424c --- /dev/null +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs @@ -0,0 +1,261 @@ +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportLogsServiceRequest { + /// An array of ResourceLogs. + /// For data coming from a single resource this array will typically contain one + /// element. Intermediary nodes (such as OpenTelemetry Collector) that receive + /// data from multiple origins typically batch the data before forwarding further and + /// in that case this array will contain multiple elements. + #[prost(message, repeated, tag="1")] + pub resource_logs: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportLogsServiceResponse { +} +/// Generated client implementations. +pub mod logs_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service that can be used to push logs between one Application instrumented with + /// OpenTelemetry and an collector, or between an collector and a central collector (in this + /// case logs are sent/received to/from multiple Applications). + #[derive(Debug, Clone)] + pub struct LogsServiceClient { + inner: tonic::client::Grpc, + } + impl LogsServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl LogsServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> LogsServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + LogsServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// For performance reasons, it is recommended to keep this RPC + /// alive for the entire life of the application. + pub async fn export( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/opentelemetry.proto.collector.logs.v1.LogsService/Export", + ); + self.inner.unary(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +pub mod logs_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + ///Generated trait containing gRPC methods that should be implemented for use with LogsServiceServer. + #[async_trait] + pub trait LogsService: Send + Sync + 'static { + /// For performance reasons, it is recommended to keep this RPC + /// alive for the entire life of the application. + async fn export( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + /// Service that can be used to push logs between one Application instrumented with + /// OpenTelemetry and an collector, or between an collector and a central collector (in this + /// case logs are sent/received to/from multiple Applications). + #[derive(Debug)] + pub struct LogsServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl LogsServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for LogsServiceServer + where + T: LogsService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/opentelemetry.proto.collector.logs.v1.LogsService/Export" => { + #[allow(non_camel_case_types)] + struct ExportSvc(pub Arc); + impl< + T: LogsService, + > tonic::server::UnaryService + for ExportSvc { + type Response = super::ExportLogsServiceResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).export(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ExportSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for LogsServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for LogsServiceServer { + const NAME: &'static str = "opentelemetry.proto.collector.logs.v1.LogsService"; + } +} diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs new file mode 100644 index 0000000000..8dc738c819 --- /dev/null +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs @@ -0,0 +1,264 @@ +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportMetricsServiceRequest { + /// An array of ResourceMetrics. + /// For data coming from a single resource this array will typically contain one + /// element. Intermediary nodes (such as OpenTelemetry Collector) that receive + /// data from multiple origins typically batch the data before forwarding further and + /// in that case this array will contain multiple elements. + #[prost(message, repeated, tag="1")] + pub resource_metrics: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportMetricsServiceResponse { +} +/// Generated client implementations. +pub mod metrics_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service that can be used to push metrics between one Application + /// instrumented with OpenTelemetry and a collector, or between a collector and a + /// central collector. + #[derive(Debug, Clone)] + pub struct MetricsServiceClient { + inner: tonic::client::Grpc, + } + impl MetricsServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl MetricsServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> MetricsServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + MetricsServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// For performance reasons, it is recommended to keep this RPC + /// alive for the entire life of the application. + pub async fn export( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", + ); + self.inner.unary(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +pub mod metrics_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + ///Generated trait containing gRPC methods that should be implemented for use with MetricsServiceServer. + #[async_trait] + pub trait MetricsService: Send + Sync + 'static { + /// For performance reasons, it is recommended to keep this RPC + /// alive for the entire life of the application. + async fn export( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + /// Service that can be used to push metrics between one Application + /// instrumented with OpenTelemetry and a collector, or between a collector and a + /// central collector. + #[derive(Debug)] + pub struct MetricsServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl MetricsServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for MetricsServiceServer + where + T: MetricsService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export" => { + #[allow(non_camel_case_types)] + struct ExportSvc(pub Arc); + impl< + T: MetricsService, + > tonic::server::UnaryService + for ExportSvc { + type Response = super::ExportMetricsServiceResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).export(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ExportSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for MetricsServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for MetricsServiceServer { + const NAME: &'static str = "opentelemetry.proto.collector.metrics.v1.MetricsService"; + } +} diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs new file mode 100644 index 0000000000..69e9cf8799 --- /dev/null +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs @@ -0,0 +1,261 @@ +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportTraceServiceRequest { + /// An array of ResourceSpans. + /// For data coming from a single resource this array will typically contain one + /// element. Intermediary nodes (such as OpenTelemetry Collector) that receive + /// data from multiple origins typically batch the data before forwarding further and + /// in that case this array will contain multiple elements. + #[prost(message, repeated, tag="1")] + pub resource_spans: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportTraceServiceResponse { +} +/// Generated client implementations. +pub mod trace_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service that can be used to push spans between one Application instrumented with + /// OpenTelemetry and a collector, or between a collector and a central collector (in this + /// case spans are sent/received to/from multiple Applications). + #[derive(Debug, Clone)] + pub struct TraceServiceClient { + inner: tonic::client::Grpc, + } + impl TraceServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl TraceServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> TraceServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + TraceServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// For performance reasons, it is recommended to keep this RPC + /// alive for the entire life of the application. + pub async fn export( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + ); + self.inner.unary(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +pub mod trace_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + ///Generated trait containing gRPC methods that should be implemented for use with TraceServiceServer. + #[async_trait] + pub trait TraceService: Send + Sync + 'static { + /// For performance reasons, it is recommended to keep this RPC + /// alive for the entire life of the application. + async fn export( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + /// Service that can be used to push spans between one Application instrumented with + /// OpenTelemetry and a collector, or between a collector and a central collector (in this + /// case spans are sent/received to/from multiple Applications). + #[derive(Debug)] + pub struct TraceServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl TraceServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for TraceServiceServer + where + T: TraceService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/opentelemetry.proto.collector.trace.v1.TraceService/Export" => { + #[allow(non_camel_case_types)] + struct ExportSvc(pub Arc); + impl< + T: TraceService, + > tonic::server::UnaryService + for ExportSvc { + type Response = super::ExportTraceServiceResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).export(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ExportSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for TraceServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for TraceServiceServer { + const NAME: &'static str = "opentelemetry.proto.collector.trace.v1.TraceService"; + } +} diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs new file mode 100644 index 0000000000..35b8cc3885 --- /dev/null +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs @@ -0,0 +1,73 @@ +/// AnyValue is used to represent any type of attribute value. AnyValue may contain a +/// primitive value such as a string or integer or it may contain an arbitrary nested +/// object containing arrays, key-value lists and primitives. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AnyValue { + /// The value is one of the listed fields. It is valid for all values to be unspecified + /// in which case this AnyValue is considered to be "empty". + #[prost(oneof="any_value::Value", tags="1, 2, 3, 4, 5, 6, 7")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `AnyValue`. +pub mod any_value { + /// The value is one of the listed fields. It is valid for all values to be unspecified + /// in which case this AnyValue is considered to be "empty". + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(string, tag="1")] + StringValue(::prost::alloc::string::String), + #[prost(bool, tag="2")] + BoolValue(bool), + #[prost(int64, tag="3")] + IntValue(i64), + #[prost(double, tag="4")] + DoubleValue(f64), + #[prost(message, tag="5")] + ArrayValue(super::ArrayValue), + #[prost(message, tag="6")] + KvlistValue(super::KeyValueList), + #[prost(bytes, tag="7")] + BytesValue(::prost::alloc::vec::Vec), + } +} +/// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +/// since oneof in AnyValue does not allow repeated fields. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArrayValue { + /// Array of values. The array may be empty (contain 0 elements). + #[prost(message, repeated, tag="1")] + pub values: ::prost::alloc::vec::Vec, +} +/// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +/// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +/// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +/// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +/// are semantically equivalent. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyValueList { + /// A collection of key/value pairs of key-value pairs. The list may be empty (may + /// contain 0 elements). + /// The keys MUST be unique (it is not allowed to have more than one + /// value with the same key). + #[prost(message, repeated, tag="1")] + pub values: ::prost::alloc::vec::Vec, +} +/// KeyValue is a key-value pair that is used to store Span attributes, Link +/// attributes, etc. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyValue { + #[prost(string, tag="1")] + pub key: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub value: ::core::option::Option, +} +/// InstrumentationLibrary is a message representing the instrumentation library information +/// such as the fully qualified name and version. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InstrumentationLibrary { + /// An empty instrumentation library name means the name is unknown. + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub version: ::prost::alloc::string::String, +} diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs new file mode 100644 index 0000000000..998a690f85 --- /dev/null +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs @@ -0,0 +1,210 @@ +/// LogsData represents the logs data that can be stored in a persistent storage, +/// OR can be embedded by other protocols that transfer OTLP logs data but do not +/// implement the OTLP protocol. +/// +/// The main difference between this message and collector protocol is that +/// in this message there will not be any "control" or "metadata" specific to +/// OTLP protocol. +/// +/// When new fields are added into this message, the OTLP request MUST be updated +/// as well. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LogsData { + /// An array of ResourceLogs. + /// For data coming from a single resource this array will typically contain + /// one element. Intermediary nodes that receive data from multiple origins + /// typically batch the data before forwarding further and in that case this + /// array will contain multiple elements. + #[prost(message, repeated, tag="1")] + pub resource_logs: ::prost::alloc::vec::Vec, +} +/// A collection of InstrumentationLibraryLogs from a Resource. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceLogs { + /// The resource for the logs in this message. + /// If this field is not set then resource info is unknown. + #[prost(message, optional, tag="1")] + pub resource: ::core::option::Option, + /// A list of InstrumentationLibraryLogs that originate from a resource. + #[prost(message, repeated, tag="2")] + pub instrumentation_library_logs: ::prost::alloc::vec::Vec, + /// This schema_url applies to the data in the "resource" field. It does not apply + /// to the data in the "instrumentation_library_logs" field which have their own + /// schema_url field. + #[prost(string, tag="3")] + pub schema_url: ::prost::alloc::string::String, +} +/// A collection of Logs produced by an InstrumentationLibrary. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InstrumentationLibraryLogs { + /// The instrumentation library information for the logs in this message. + /// Semantically when InstrumentationLibrary isn't set, it is equivalent with + /// an empty instrumentation library name (unknown). + #[prost(message, optional, tag="1")] + pub instrumentation_library: ::core::option::Option, + /// A list of log records. + #[prost(message, repeated, tag="2")] + pub log_records: ::prost::alloc::vec::Vec, + /// This schema_url applies to all logs in the "logs" field. + #[prost(string, tag="3")] + pub schema_url: ::prost::alloc::string::String, +} +/// A log record according to OpenTelemetry Log Data Model: +/// +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LogRecord { + /// time_unix_nano is the time when the event occurred. + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + /// Value of 0 indicates unknown or missing timestamp. + #[prost(fixed64, tag="1")] + pub time_unix_nano: u64, + /// Time when the event was observed by the collection system. + /// For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) + /// this timestamp is typically set at the generation time and is equal to Timestamp. + /// For events originating externally and collected by OpenTelemetry (e.g. using + /// Collector) this is the time when OpenTelemetry's code observed the event measured + /// by the clock of the OpenTelemetry code. This field MUST be set once the event is + /// observed by OpenTelemetry. + /// + /// For converting OpenTelemetry log data to formats that support only one timestamp or + /// when receiving OpenTelemetry log data by recipients that support only one timestamp + /// internally the following logic is recommended: + /// - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + /// Value of 0 indicates unknown or missing timestamp. + #[prost(fixed64, tag="11")] + pub observed_time_unix_nano: u64, + /// Numerical value of the severity, normalized to values described in Log Data Model. + /// \[Optional\]. + #[prost(enumeration="SeverityNumber", tag="2")] + pub severity_number: i32, + /// The severity text (also known as log level). The original string representation as + /// it is known at the source. \[Optional\]. + #[prost(string, tag="3")] + pub severity_text: ::prost::alloc::string::String, + /// Short event identifier that does not contain varying parts. Name describes + /// what happened (e.g. "ProcessStarted"). Recommended to be no longer than 50 + /// characters. Not guaranteed to be unique in any way. \[Optional\]. + /// This deprecated field is planned to be removed March 15, 2022. Receivers can + /// ignore this field. + #[deprecated] + #[prost(string, tag="4")] + pub name: ::prost::alloc::string::String, + /// A value containing the body of the log record. Can be for example a human-readable + /// string message (including multi-line) describing the event in a free form or it can + /// be a structured data composed of arrays and maps of other values. \[Optional\]. + #[prost(message, optional, tag="5")] + pub body: ::core::option::Option, + /// Additional attributes that describe the specific event occurrence. \[Optional\]. + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag="6")] + pub attributes: ::prost::alloc::vec::Vec, + #[prost(uint32, tag="7")] + pub dropped_attributes_count: u32, + /// Flags, a bit field. 8 least significant bits are the trace flags as + /// defined in W3C Trace Context specification. 24 most significant bits are reserved + /// and must be set to 0. Readers must not assume that 24 most significant bits + /// will be zero and must correctly mask the bits when reading 8-bit trace flag (use + /// flags & TRACE_FLAGS_MASK). \[Optional\]. + #[prost(fixed32, tag="8")] + pub flags: u32, + /// A unique identifier for a trace. All logs from the same trace share + /// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + /// is considered invalid. Can be set for logs that are part of request processing + /// and have an assigned trace id. \[Optional\]. + #[prost(bytes="vec", tag="9")] + pub trace_id: ::prost::alloc::vec::Vec, + /// A unique identifier for a span within a trace, assigned when the span + /// is created. The ID is an 8-byte array. An ID with all zeroes is considered + /// invalid. Can be set for logs that are part of a particular processing span. + /// If span_id is present trace_id SHOULD be also present. \[Optional\]. + #[prost(bytes="vec", tag="10")] + pub span_id: ::prost::alloc::vec::Vec, +} +/// Possible values for LogRecord.SeverityNumber. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SeverityNumber { + /// UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. + Unspecified = 0, + Trace = 1, + Trace2 = 2, + Trace3 = 3, + Trace4 = 4, + Debug = 5, + Debug2 = 6, + Debug3 = 7, + Debug4 = 8, + Info = 9, + Info2 = 10, + Info3 = 11, + Info4 = 12, + Warn = 13, + Warn2 = 14, + Warn3 = 15, + Warn4 = 16, + Error = 17, + Error2 = 18, + Error3 = 19, + Error4 = 20, + Fatal = 21, + Fatal2 = 22, + Fatal3 = 23, + Fatal4 = 24, +} +impl SeverityNumber { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SeverityNumber::Unspecified => "SEVERITY_NUMBER_UNSPECIFIED", + SeverityNumber::Trace => "SEVERITY_NUMBER_TRACE", + SeverityNumber::Trace2 => "SEVERITY_NUMBER_TRACE2", + SeverityNumber::Trace3 => "SEVERITY_NUMBER_TRACE3", + SeverityNumber::Trace4 => "SEVERITY_NUMBER_TRACE4", + SeverityNumber::Debug => "SEVERITY_NUMBER_DEBUG", + SeverityNumber::Debug2 => "SEVERITY_NUMBER_DEBUG2", + SeverityNumber::Debug3 => "SEVERITY_NUMBER_DEBUG3", + SeverityNumber::Debug4 => "SEVERITY_NUMBER_DEBUG4", + SeverityNumber::Info => "SEVERITY_NUMBER_INFO", + SeverityNumber::Info2 => "SEVERITY_NUMBER_INFO2", + SeverityNumber::Info3 => "SEVERITY_NUMBER_INFO3", + SeverityNumber::Info4 => "SEVERITY_NUMBER_INFO4", + SeverityNumber::Warn => "SEVERITY_NUMBER_WARN", + SeverityNumber::Warn2 => "SEVERITY_NUMBER_WARN2", + SeverityNumber::Warn3 => "SEVERITY_NUMBER_WARN3", + SeverityNumber::Warn4 => "SEVERITY_NUMBER_WARN4", + SeverityNumber::Error => "SEVERITY_NUMBER_ERROR", + SeverityNumber::Error2 => "SEVERITY_NUMBER_ERROR2", + SeverityNumber::Error3 => "SEVERITY_NUMBER_ERROR3", + SeverityNumber::Error4 => "SEVERITY_NUMBER_ERROR4", + SeverityNumber::Fatal => "SEVERITY_NUMBER_FATAL", + SeverityNumber::Fatal2 => "SEVERITY_NUMBER_FATAL2", + SeverityNumber::Fatal3 => "SEVERITY_NUMBER_FATAL3", + SeverityNumber::Fatal4 => "SEVERITY_NUMBER_FATAL4", + } + } +} +/// Masks for LogRecord.flags field. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LogRecordFlags { + LogRecordFlagUnspecified = 0, + LogRecordFlagTraceFlagsMask = 255, +} +impl LogRecordFlags { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LogRecordFlags::LogRecordFlagUnspecified => "LOG_RECORD_FLAG_UNSPECIFIED", + LogRecordFlags::LogRecordFlagTraceFlagsMask => "LOG_RECORD_FLAG_TRACE_FLAGS_MASK", + } + } +} diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs new file mode 100644 index 0000000000..b1cb0b39d5 --- /dev/null +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs @@ -0,0 +1,693 @@ +/// MetricsData represents the metrics data that can be stored in a persistent +/// storage, OR can be embedded by other protocols that transfer OTLP metrics +/// data but do not implement the OTLP protocol. +/// +/// The main difference between this message and collector protocol is that +/// in this message there will not be any "control" or "metadata" specific to +/// OTLP protocol. +/// +/// When new fields are added into this message, the OTLP request MUST be updated +/// as well. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MetricsData { + /// An array of ResourceMetrics. + /// For data coming from a single resource this array will typically contain + /// one element. Intermediary nodes that receive data from multiple origins + /// typically batch the data before forwarding further and in that case this + /// array will contain multiple elements. + #[prost(message, repeated, tag="1")] + pub resource_metrics: ::prost::alloc::vec::Vec, +} +/// A collection of InstrumentationLibraryMetrics from a Resource. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceMetrics { + /// The resource for the metrics in this message. + /// If this field is not set then no resource info is known. + #[prost(message, optional, tag="1")] + pub resource: ::core::option::Option, + /// A list of metrics that originate from a resource. + #[prost(message, repeated, tag="2")] + pub instrumentation_library_metrics: ::prost::alloc::vec::Vec, + /// This schema_url applies to the data in the "resource" field. It does not apply + /// to the data in the "instrumentation_library_metrics" field which have their own + /// schema_url field. + #[prost(string, tag="3")] + pub schema_url: ::prost::alloc::string::String, +} +/// A collection of Metrics produced by an InstrumentationLibrary. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InstrumentationLibraryMetrics { + /// The instrumentation library information for the metrics in this message. + /// Semantically when InstrumentationLibrary isn't set, it is equivalent with + /// an empty instrumentation library name (unknown). + #[prost(message, optional, tag="1")] + pub instrumentation_library: ::core::option::Option, + /// A list of metrics that originate from an instrumentation library. + #[prost(message, repeated, tag="2")] + pub metrics: ::prost::alloc::vec::Vec, + /// This schema_url applies to all metrics in the "metrics" field. + #[prost(string, tag="3")] + pub schema_url: ::prost::alloc::string::String, +} +/// Defines a Metric which has one or more timeseries. The following is a +/// brief summary of the Metric data model. For more details, see: +/// +/// +/// +/// +/// The data model and relation between entities is shown in the +/// diagram below. Here, "DataPoint" is the term used to refer to any +/// one of the specific data point value types, and "points" is the term used +/// to refer to any one of the lists of points contained in the Metric. +/// +/// - Metric is composed of a metadata and data. +/// - Metadata part contains a name, description, unit. +/// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). +/// - DataPoint contains timestamps, attributes, and one of the possible value type +/// fields. +/// +/// Metric +/// +------------+ +/// |name | +/// |description | +/// |unit | +------------------------------------+ +/// |data |---> |Gauge, Sum, Histogram, Summary, ... | +/// +------------+ +------------------------------------+ +/// +/// Data [One of Gauge, Sum, Histogram, Summary, ...] +/// +-----------+ +/// |... | // Metadata about the Data. +/// |points |--+ +/// +-----------+ | +/// | +---------------------------+ +/// | |DataPoint 1 | +/// v |+------+------+ +------+ | +/// +-----+ ||label |label |...|label | | +/// | 1 |-->||value1|value2|...|valueN| | +/// +-----+ |+------+------+ +------+ | +/// | . | |+-----+ | +/// | . | ||value| | +/// | . | |+-----+ | +/// | . | +---------------------------+ +/// | . | . +/// | . | . +/// | . | . +/// | . | +---------------------------+ +/// | . | |DataPoint M | +/// +-----+ |+------+------+ +------+ | +/// | M |-->||label |label |...|label | | +/// +-----+ ||value1|value2|...|valueN| | +/// |+------+------+ +------+ | +/// |+-----+ | +/// ||value| | +/// |+-----+ | +/// +---------------------------+ +/// +/// Each distinct type of DataPoint represents the output of a specific +/// aggregation function, the result of applying the DataPoint's +/// associated function of to one or more measurements. +/// +/// All DataPoint types have three common fields: +/// - Attributes includes key-value pairs associated with the data point +/// - TimeUnixNano is required, set to the end time of the aggregation +/// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints +/// having an AggregationTemporality field, as discussed below. +/// +/// Both TimeUnixNano and StartTimeUnixNano values are expressed as +/// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. +/// +/// # TimeUnixNano +/// +/// This field is required, having consistent interpretation across +/// DataPoint types. TimeUnixNano is the moment corresponding to when +/// the data point's aggregate value was captured. +/// +/// Data points with the 0 value for TimeUnixNano SHOULD be rejected +/// by consumers. +/// +/// # StartTimeUnixNano +/// +/// StartTimeUnixNano in general allows detecting when a sequence of +/// observations is unbroken. This field indicates to consumers the +/// start time for points with cumulative and delta +/// AggregationTemporality, and it should be included whenever possible +/// to support correct rate calculation. Although it may be omitted +/// when the start time is truly unknown, setting StartTimeUnixNano is +/// strongly encouraged. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Metric { + /// name of the metric, including its DNS name prefix. It must be unique. + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, + /// description of the metric, which can be used in documentation. + #[prost(string, tag="2")] + pub description: ::prost::alloc::string::String, + /// unit in which the metric value is reported. Follows the format + /// described by + #[prost(string, tag="3")] + pub unit: ::prost::alloc::string::String, + /// Data determines the aggregation type (if any) of the metric, what is the + /// reported value type for the data points, as well as the relatationship to + /// the time interval over which they are reported. + #[prost(oneof="metric::Data", tags="5, 7, 9, 10, 11")] + pub data: ::core::option::Option, +} +/// Nested message and enum types in `Metric`. +pub mod metric { + /// Data determines the aggregation type (if any) of the metric, what is the + /// reported value type for the data points, as well as the relatationship to + /// the time interval over which they are reported. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Data { + #[prost(message, tag="5")] + Gauge(super::Gauge), + #[prost(message, tag="7")] + Sum(super::Sum), + #[prost(message, tag="9")] + Histogram(super::Histogram), + #[prost(message, tag="10")] + ExponentialHistogram(super::ExponentialHistogram), + #[prost(message, tag="11")] + Summary(super::Summary), + } +} +/// Gauge represents the type of a scalar metric that always exports the +/// "current value" for every data point. It should be used for an "unknown" +/// aggregation. +/// +/// A Gauge does not support different aggregation temporalities. Given the +/// aggregation is unknown, points cannot be combined using the same +/// aggregation, regardless of aggregation temporalities. Therefore, +/// AggregationTemporality is not included. Consequently, this also means +/// "StartTimeUnixNano" is ignored for all data points. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Gauge { + #[prost(message, repeated, tag="1")] + pub data_points: ::prost::alloc::vec::Vec, +} +/// Sum represents the type of a scalar metric that is calculated as a sum of all +/// reported measurements over a time interval. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Sum { + #[prost(message, repeated, tag="1")] + pub data_points: ::prost::alloc::vec::Vec, + /// aggregation_temporality describes if the aggregator reports delta changes + /// since last report time, or cumulative changes since a fixed start time. + #[prost(enumeration="AggregationTemporality", tag="2")] + pub aggregation_temporality: i32, + /// If "true" means that the sum is monotonic. + #[prost(bool, tag="3")] + pub is_monotonic: bool, +} +/// Histogram represents the type of a metric that is calculated by aggregating +/// as a Histogram of all reported measurements over a time interval. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Histogram { + #[prost(message, repeated, tag="1")] + pub data_points: ::prost::alloc::vec::Vec, + /// aggregation_temporality describes if the aggregator reports delta changes + /// since last report time, or cumulative changes since a fixed start time. + #[prost(enumeration="AggregationTemporality", tag="2")] + pub aggregation_temporality: i32, +} +/// ExponentialHistogram represents the type of a metric that is calculated by aggregating +/// as a ExponentialHistogram of all reported double measurements over a time interval. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExponentialHistogram { + #[prost(message, repeated, tag="1")] + pub data_points: ::prost::alloc::vec::Vec, + /// aggregation_temporality describes if the aggregator reports delta changes + /// since last report time, or cumulative changes since a fixed start time. + #[prost(enumeration="AggregationTemporality", tag="2")] + pub aggregation_temporality: i32, +} +/// Summary metric data are used to convey quantile summaries, +/// a Prometheus (see: ) +/// and OpenMetrics (see: ) +/// data type. These data points cannot always be merged in a meaningful way. +/// While they can be useful in some applications, histogram data points are +/// recommended for new applications. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Summary { + #[prost(message, repeated, tag="1")] + pub data_points: ::prost::alloc::vec::Vec, +} +/// NumberDataPoint is a single data point in a timeseries that describes the +/// time-varying scalar value of a metric. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NumberDataPoint { + /// The set of key/value pairs that uniquely identify the timeseries from + /// where this point belongs. The list may be empty (may contain 0 elements). + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag="7")] + pub attributes: ::prost::alloc::vec::Vec, + /// StartTimeUnixNano is optional but strongly encouraged, see the + /// the detailed comments above Metric. + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + /// 1970. + #[prost(fixed64, tag="2")] + pub start_time_unix_nano: u64, + /// TimeUnixNano is required, see the detailed comments above Metric. + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + /// 1970. + #[prost(fixed64, tag="3")] + pub time_unix_nano: u64, + /// (Optional) List of exemplars collected from + /// measurements that were used to form the data point + #[prost(message, repeated, tag="5")] + pub exemplars: ::prost::alloc::vec::Vec, + /// Flags that apply to this specific data point. See DataPointFlags + /// for the available flags and their meaning. + #[prost(uint32, tag="8")] + pub flags: u32, + /// The value itself. A point is considered invalid when one of the recognized + /// value fields is not present inside this oneof. + #[prost(oneof="number_data_point::Value", tags="4, 6")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `NumberDataPoint`. +pub mod number_data_point { + /// The value itself. A point is considered invalid when one of the recognized + /// value fields is not present inside this oneof. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(double, tag="4")] + AsDouble(f64), + #[prost(sfixed64, tag="6")] + AsInt(i64), + } +} +/// HistogramDataPoint is a single data point in a timeseries that describes the +/// time-varying values of a Histogram. A Histogram contains summary statistics +/// for a population of values, it may optionally contain the distribution of +/// those values across a set of buckets. +/// +/// If the histogram contains the distribution of values, then both +/// "explicit_bounds" and "bucket counts" fields must be defined. +/// If the histogram does not contain the distribution of values, then both +/// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and +/// "sum" are known. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HistogramDataPoint { + /// The set of key/value pairs that uniquely identify the timeseries from + /// where this point belongs. The list may be empty (may contain 0 elements). + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag="9")] + pub attributes: ::prost::alloc::vec::Vec, + /// StartTimeUnixNano is optional but strongly encouraged, see the + /// the detailed comments above Metric. + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + /// 1970. + #[prost(fixed64, tag="2")] + pub start_time_unix_nano: u64, + /// TimeUnixNano is required, see the detailed comments above Metric. + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + /// 1970. + #[prost(fixed64, tag="3")] + pub time_unix_nano: u64, + /// count is the number of values in the population. Must be non-negative. This + /// value must be equal to the sum of the "count" fields in buckets if a + /// histogram is provided. + #[prost(fixed64, tag="4")] + pub count: u64, + /// sum of the values in the population. If count is zero then this field + /// must be zero. + /// + /// Note: Sum should only be filled out when measuring non-negative discrete + /// events, and is assumed to be monotonic over the values of these events. + /// Negative events *can* be recorded, but sum should not be filled out when + /// doing so. This is specifically to enforce compatibility w/ OpenMetrics, + /// see: + #[prost(double, tag="5")] + pub sum: f64, + /// bucket_counts is an optional field contains the count values of histogram + /// for each bucket. + /// + /// The sum of the bucket_counts must equal the value in the count field. + /// + /// The number of elements in bucket_counts array must be by one greater than + /// the number of elements in explicit_bounds array. + #[prost(fixed64, repeated, tag="6")] + pub bucket_counts: ::prost::alloc::vec::Vec, + /// explicit_bounds specifies buckets with explicitly defined bounds for values. + /// + /// The boundaries for bucket at index i are: + /// + /// (-infinity, explicit_bounds\[i]\] for i == 0 + /// (explicit_bounds\[i-1\], explicit_bounds\[i]\] for 0 < i < size(explicit_bounds) + /// (explicit_bounds\[i-1\], +infinity) for i == size(explicit_bounds) + /// + /// The values in the explicit_bounds array must be strictly increasing. + /// + /// Histogram buckets are inclusive of their upper boundary, except the last + /// bucket where the boundary is at infinity. This format is intentionally + /// compatible with the OpenMetrics histogram definition. + #[prost(double, repeated, tag="7")] + pub explicit_bounds: ::prost::alloc::vec::Vec, + /// (Optional) List of exemplars collected from + /// measurements that were used to form the data point + #[prost(message, repeated, tag="8")] + pub exemplars: ::prost::alloc::vec::Vec, + /// Flags that apply to this specific data point. See DataPointFlags + /// for the available flags and their meaning. + #[prost(uint32, tag="10")] + pub flags: u32, +} +/// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +/// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +/// summary statistics for a population of values, it may optionally contain the +/// distribution of those values across a set of buckets. +/// +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExponentialHistogramDataPoint { + /// The set of key/value pairs that uniquely identify the timeseries from + /// where this point belongs. The list may be empty (may contain 0 elements). + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag="1")] + pub attributes: ::prost::alloc::vec::Vec, + /// StartTimeUnixNano is optional but strongly encouraged, see the + /// the detailed comments above Metric. + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + /// 1970. + #[prost(fixed64, tag="2")] + pub start_time_unix_nano: u64, + /// TimeUnixNano is required, see the detailed comments above Metric. + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + /// 1970. + #[prost(fixed64, tag="3")] + pub time_unix_nano: u64, + /// count is the number of values in the population. Must be + /// non-negative. This value must be equal to the sum of the "bucket_counts" + /// values in the positive and negative Buckets plus the "zero_count" field. + #[prost(fixed64, tag="4")] + pub count: u64, + /// sum of the values in the population. If count is zero then this field + /// must be zero. + /// + /// Note: Sum should only be filled out when measuring non-negative discrete + /// events, and is assumed to be monotonic over the values of these events. + /// Negative events *can* be recorded, but sum should not be filled out when + /// doing so. This is specifically to enforce compatibility w/ OpenMetrics, + /// see: + #[prost(double, tag="5")] + pub sum: f64, + /// scale describes the resolution of the histogram. Boundaries are + /// located at powers of the base, where: + /// + /// base = (2^(2^-scale)) + /// + /// The histogram bucket identified by `index`, a signed integer, + /// contains values that are greater than or equal to (base^index) and + /// less than (base^(index+1)). + /// + /// The positive and negative ranges of the histogram are expressed + /// separately. Negative values are mapped by their absolute value + /// into the negative range using the same scale as the positive range. + /// + /// scale is not restricted by the protocol, as the permissible + /// values depend on the range of the data. + #[prost(sint32, tag="6")] + pub scale: i32, + /// zero_count is the count of values that are either exactly zero or + /// within the region considered zero by the instrumentation at the + /// tolerated degree of precision. This bucket stores values that + /// cannot be expressed using the standard exponential formula as + /// well as values that have been rounded to zero. + /// + /// Implementations MAY consider the zero bucket to have probability + /// mass equal to (zero_count / count). + #[prost(fixed64, tag="7")] + pub zero_count: u64, + /// positive carries the positive range of exponential bucket counts. + #[prost(message, optional, tag="8")] + pub positive: ::core::option::Option, + /// negative carries the negative range of exponential bucket counts. + #[prost(message, optional, tag="9")] + pub negative: ::core::option::Option, + /// Flags that apply to this specific data point. See DataPointFlags + /// for the available flags and their meaning. + #[prost(uint32, tag="10")] + pub flags: u32, + /// (Optional) List of exemplars collected from + /// measurements that were used to form the data point + #[prost(message, repeated, tag="11")] + pub exemplars: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `ExponentialHistogramDataPoint`. +pub mod exponential_histogram_data_point { + /// Buckets are a set of bucket counts, encoded in a contiguous array + /// of counts. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Buckets { + /// Offset is the bucket index of the first entry in the bucket_counts array. + /// + /// Note: This uses a varint encoding as a simple form of compression. + #[prost(sint32, tag="1")] + pub offset: i32, + /// Count is an array of counts, where count\[i\] carries the count + /// of the bucket at index (offset+i). count\[i\] is the count of + /// values greater than or equal to base^(offset+i) and less than + /// base^(offset+i+1). + /// + /// Note: By contrast, the explicit HistogramDataPoint uses + /// fixed64. This field is expected to have many buckets, + /// especially zeros, so uint64 has been selected to ensure + /// varint encoding. + #[prost(uint64, repeated, tag="2")] + pub bucket_counts: ::prost::alloc::vec::Vec, + } +} +/// SummaryDataPoint is a single data point in a timeseries that describes the +/// time-varying values of a Summary metric. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SummaryDataPoint { + /// The set of key/value pairs that uniquely identify the timeseries from + /// where this point belongs. The list may be empty (may contain 0 elements). + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag="7")] + pub attributes: ::prost::alloc::vec::Vec, + /// StartTimeUnixNano is optional but strongly encouraged, see the + /// the detailed comments above Metric. + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + /// 1970. + #[prost(fixed64, tag="2")] + pub start_time_unix_nano: u64, + /// TimeUnixNano is required, see the detailed comments above Metric. + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + /// 1970. + #[prost(fixed64, tag="3")] + pub time_unix_nano: u64, + /// count is the number of values in the population. Must be non-negative. + #[prost(fixed64, tag="4")] + pub count: u64, + /// sum of the values in the population. If count is zero then this field + /// must be zero. + /// + /// Note: Sum should only be filled out when measuring non-negative discrete + /// events, and is assumed to be monotonic over the values of these events. + /// Negative events *can* be recorded, but sum should not be filled out when + /// doing so. This is specifically to enforce compatibility w/ OpenMetrics, + /// see: + #[prost(double, tag="5")] + pub sum: f64, + /// (Optional) list of values at different quantiles of the distribution calculated + /// from the current snapshot. The quantiles must be strictly increasing. + #[prost(message, repeated, tag="6")] + pub quantile_values: ::prost::alloc::vec::Vec, + /// Flags that apply to this specific data point. See DataPointFlags + /// for the available flags and their meaning. + #[prost(uint32, tag="8")] + pub flags: u32, +} +/// Nested message and enum types in `SummaryDataPoint`. +pub mod summary_data_point { + /// Represents the value at a given quantile of a distribution. + /// + /// To record Min and Max values following conventions are used: + /// - The 1.0 quantile is equivalent to the maximum value observed. + /// - The 0.0 quantile is equivalent to the minimum value observed. + /// + /// See the following issue for more context: + /// + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ValueAtQuantile { + /// The quantile of a distribution. Must be in the interval + /// [0.0, 1.0]. + #[prost(double, tag="1")] + pub quantile: f64, + /// The value at the given quantile of a distribution. + /// + /// Quantile values must NOT be negative. + #[prost(double, tag="2")] + pub value: f64, + } +} +/// A representation of an exemplar, which is a sample input measurement. +/// Exemplars also hold information about the environment when the measurement +/// was recorded, for example the span and trace ID of the active span when the +/// exemplar was recorded. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Exemplar { + /// The set of key/value pairs that were filtered out by the aggregator, but + /// recorded alongside the original measurement. Only key/value pairs that were + /// filtered out by the aggregator should be included + #[prost(message, repeated, tag="7")] + pub filtered_attributes: ::prost::alloc::vec::Vec, + /// time_unix_nano is the exact time when this exemplar was recorded + /// + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + /// 1970. + #[prost(fixed64, tag="2")] + pub time_unix_nano: u64, + /// (Optional) Span ID of the exemplar trace. + /// span_id may be missing if the measurement is not recorded inside a trace + /// or if the trace is not sampled. + #[prost(bytes="vec", tag="4")] + pub span_id: ::prost::alloc::vec::Vec, + /// (Optional) Trace ID of the exemplar trace. + /// trace_id may be missing if the measurement is not recorded inside a trace + /// or if the trace is not sampled. + #[prost(bytes="vec", tag="5")] + pub trace_id: ::prost::alloc::vec::Vec, + /// The value of the measurement that was recorded. An exemplar is + /// considered invalid when one of the recognized value fields is not present + /// inside this oneof. + #[prost(oneof="exemplar::Value", tags="3, 6")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `Exemplar`. +pub mod exemplar { + /// The value of the measurement that was recorded. An exemplar is + /// considered invalid when one of the recognized value fields is not present + /// inside this oneof. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(double, tag="3")] + AsDouble(f64), + #[prost(sfixed64, tag="6")] + AsInt(i64), + } +} +/// AggregationTemporality defines how a metric aggregator reports aggregated +/// values. It describes how those values relate to the time interval over +/// which they are aggregated. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AggregationTemporality { + /// UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + Unspecified = 0, + /// DELTA is an AggregationTemporality for a metric aggregator which reports + /// changes since last report time. Successive metrics contain aggregation of + /// values from continuous and non-overlapping intervals. + /// + /// The values for a DELTA metric are based only on the time interval + /// associated with one measurement cycle. There is no dependency on + /// previous measurements like is the case for CUMULATIVE metrics. + /// + /// For example, consider a system measuring the number of requests that + /// it receives and reports the sum of these requests every second as a + /// DELTA metric: + /// + /// 1. The system starts receiving at time=t_0. + /// 2. A request is received, the system measures 1 request. + /// 3. A request is received, the system measures 1 request. + /// 4. A request is received, the system measures 1 request. + /// 5. The 1 second collection cycle ends. A metric is exported for the + /// number of requests received over the interval of time t_0 to + /// t_0+1 with a value of 3. + /// 6. A request is received, the system measures 1 request. + /// 7. A request is received, the system measures 1 request. + /// 8. The 1 second collection cycle ends. A metric is exported for the + /// number of requests received over the interval of time t_0+1 to + /// t_0+2 with a value of 2. + Delta = 1, + /// CUMULATIVE is an AggregationTemporality for a metric aggregator which + /// reports changes since a fixed start time. This means that current values + /// of a CUMULATIVE metric depend on all previous measurements since the + /// start time. Because of this, the sender is required to retain this state + /// in some form. If this state is lost or invalidated, the CUMULATIVE metric + /// values MUST be reset and a new fixed start time following the last + /// reported measurement time sent MUST be used. + /// + /// For example, consider a system measuring the number of requests that + /// it receives and reports the sum of these requests every second as a + /// CUMULATIVE metric: + /// + /// 1. The system starts receiving at time=t_0. + /// 2. A request is received, the system measures 1 request. + /// 3. A request is received, the system measures 1 request. + /// 4. A request is received, the system measures 1 request. + /// 5. The 1 second collection cycle ends. A metric is exported for the + /// number of requests received over the interval of time t_0 to + /// t_0+1 with a value of 3. + /// 6. A request is received, the system measures 1 request. + /// 7. A request is received, the system measures 1 request. + /// 8. The 1 second collection cycle ends. A metric is exported for the + /// number of requests received over the interval of time t_0 to + /// t_0+2 with a value of 5. + /// 9. The system experiences a fault and loses state. + /// 10. The system recovers and resumes receiving at time=t_1. + /// 11. A request is received, the system measures 1 request. + /// 12. The 1 second collection cycle ends. A metric is exported for the + /// number of requests received over the interval of time t_1 to + /// t_0+1 with a value of 1. + /// + /// Note: Even though, when reporting changes since last report time, using + /// CUMULATIVE is valid, it is not recommended. This may cause problems for + /// systems that do not use start_time to determine when the aggregation + /// value was reset (e.g. Prometheus). + Cumulative = 2, +} +impl AggregationTemporality { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + AggregationTemporality::Unspecified => "AGGREGATION_TEMPORALITY_UNSPECIFIED", + AggregationTemporality::Delta => "AGGREGATION_TEMPORALITY_DELTA", + AggregationTemporality::Cumulative => "AGGREGATION_TEMPORALITY_CUMULATIVE", + } + } +} +/// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a +/// bit-field representing 32 distinct boolean flags. Each flag defined in this +/// enum is a bit-mask. To test the presence of a single flag in the flags of +/// a data point, for example, use an expression like: +/// +/// (point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE +/// +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum DataPointFlags { + FlagNone = 0, + /// This DataPoint is valid but has no recorded value. This value + /// SHOULD be used to reflect explicitly missing data in a series, as + /// for an equivalent to the Prometheus "staleness marker". + FlagNoRecordedValue = 1, +} +impl DataPointFlags { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + DataPointFlags::FlagNone => "FLAG_NONE", + DataPointFlags::FlagNoRecordedValue => "FLAG_NO_RECORDED_VALUE", + } + } +} diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs new file mode 100644 index 0000000000..02f81cccb4 --- /dev/null +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs @@ -0,0 +1,13 @@ +/// Resource information. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Resource { + /// Set of attributes that describe the resource. + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag="1")] + pub attributes: ::prost::alloc::vec::Vec, + /// dropped_attributes_count is the number of dropped attributes. If the value is 0, then + /// no attributes were dropped. + #[prost(uint32, tag="2")] + pub dropped_attributes_count: u32, +} diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs new file mode 100644 index 0000000000..a8766fcd26 --- /dev/null +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs @@ -0,0 +1,380 @@ +/// TracesData represents the traces data that can be stored in a persistent storage, +/// OR can be embedded by other protocols that transfer OTLP traces data but do +/// not implement the OTLP protocol. +/// +/// The main difference between this message and collector protocol is that +/// in this message there will not be any "control" or "metadata" specific to +/// OTLP protocol. +/// +/// When new fields are added into this message, the OTLP request MUST be updated +/// as well. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TracesData { + /// An array of ResourceSpans. + /// For data coming from a single resource this array will typically contain + /// one element. Intermediary nodes that receive data from multiple origins + /// typically batch the data before forwarding further and in that case this + /// array will contain multiple elements. + #[prost(message, repeated, tag="1")] + pub resource_spans: ::prost::alloc::vec::Vec, +} +/// A collection of InstrumentationLibrarySpans from a Resource. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceSpans { + /// The resource for the spans in this message. + /// If this field is not set then no resource info is known. + #[prost(message, optional, tag="1")] + pub resource: ::core::option::Option, + /// A list of InstrumentationLibrarySpans that originate from a resource. + #[prost(message, repeated, tag="2")] + pub instrumentation_library_spans: ::prost::alloc::vec::Vec, + /// This schema_url applies to the data in the "resource" field. It does not apply + /// to the data in the "instrumentation_library_spans" field which have their own + /// schema_url field. + #[prost(string, tag="3")] + pub schema_url: ::prost::alloc::string::String, +} +/// A collection of Spans produced by an InstrumentationLibrary. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InstrumentationLibrarySpans { + /// The instrumentation library information for the spans in this message. + /// Semantically when InstrumentationLibrary isn't set, it is equivalent with + /// an empty instrumentation library name (unknown). + #[prost(message, optional, tag="1")] + pub instrumentation_library: ::core::option::Option, + /// A list of Spans that originate from an instrumentation library. + #[prost(message, repeated, tag="2")] + pub spans: ::prost::alloc::vec::Vec, + /// This schema_url applies to all spans and span events in the "spans" field. + #[prost(string, tag="3")] + pub schema_url: ::prost::alloc::string::String, +} +/// Span represents a single operation within a trace. Spans can be +/// nested to form a trace tree. Spans may also be linked to other spans +/// from the same or different trace and form graphs. Often, a trace +/// contains a root span that describes the end-to-end latency, and one +/// or more subspans for its sub-operations. A trace can also contain +/// multiple root spans, or none at all. Spans do not need to be +/// contiguous - there may be gaps or overlaps between spans in a trace. +/// +/// The next available field id is 17. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Span { + /// A unique identifier for a trace. All spans from the same trace share + /// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + /// is considered invalid. + /// + /// This field is semantically required. Receiver should generate new + /// random trace_id if empty or invalid trace_id was received. + /// + /// This field is required. + #[prost(bytes="vec", tag="1")] + pub trace_id: ::prost::alloc::vec::Vec, + /// A unique identifier for a span within a trace, assigned when the span + /// is created. The ID is an 8-byte array. An ID with all zeroes is considered + /// invalid. + /// + /// This field is semantically required. Receiver should generate new + /// random span_id if empty or invalid span_id was received. + /// + /// This field is required. + #[prost(bytes="vec", tag="2")] + pub span_id: ::prost::alloc::vec::Vec, + /// trace_state conveys information about request position in multiple distributed tracing graphs. + /// It is a trace_state in w3c-trace-context format: + /// See also for more details about this field. + #[prost(string, tag="3")] + pub trace_state: ::prost::alloc::string::String, + /// The `span_id` of this span's parent span. If this is a root span, then this + /// field must be empty. The ID is an 8-byte array. + #[prost(bytes="vec", tag="4")] + pub parent_span_id: ::prost::alloc::vec::Vec, + /// A description of the span's operation. + /// + /// For example, the name can be a qualified method name or a file name + /// and a line number where the operation is called. A best practice is to use + /// the same display name at the same call point in an application. + /// This makes it easier to correlate spans in different traces. + /// + /// This field is semantically required to be set to non-empty string. + /// Empty value is equivalent to an unknown span name. + /// + /// This field is required. + #[prost(string, tag="5")] + pub name: ::prost::alloc::string::String, + /// Distinguishes between spans generated in a particular context. For example, + /// two spans with the same name may be distinguished using `CLIENT` (caller) + /// and `SERVER` (callee) to identify queueing latency associated with the span. + #[prost(enumeration="span::SpanKind", tag="6")] + pub kind: i32, + /// start_time_unix_nano is the start time of the span. On the client side, this is the time + /// kept by the local machine where the span execution starts. On the server side, this + /// is the time when the server's application handler starts running. + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + /// + /// This field is semantically required and it is expected that end_time >= start_time. + #[prost(fixed64, tag="7")] + pub start_time_unix_nano: u64, + /// end_time_unix_nano is the end time of the span. On the client side, this is the time + /// kept by the local machine where the span execution ends. On the server side, this + /// is the time when the server application handler stops running. + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + /// + /// This field is semantically required and it is expected that end_time >= start_time. + #[prost(fixed64, tag="8")] + pub end_time_unix_nano: u64, + /// attributes is a collection of key/value pairs. Note, global attributes + /// like server name can be set using the resource API. Examples of attributes: + /// + /// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + /// "/http/server_latency": 300 + /// "abc.com/myattribute": true + /// "abc.com/score": 10.239 + /// + /// The OpenTelemetry API specification further restricts the allowed value types: + /// + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag="9")] + pub attributes: ::prost::alloc::vec::Vec, + /// dropped_attributes_count is the number of attributes that were discarded. Attributes + /// can be discarded because their keys are too long or because there are too many + /// attributes. If this value is 0, then no attributes were dropped. + #[prost(uint32, tag="10")] + pub dropped_attributes_count: u32, + /// events is a collection of Event items. + #[prost(message, repeated, tag="11")] + pub events: ::prost::alloc::vec::Vec, + /// dropped_events_count is the number of dropped events. If the value is 0, then no + /// events were dropped. + #[prost(uint32, tag="12")] + pub dropped_events_count: u32, + /// links is a collection of Links, which are references from this span to a span + /// in the same or different trace. + #[prost(message, repeated, tag="13")] + pub links: ::prost::alloc::vec::Vec, + /// dropped_links_count is the number of dropped links after the maximum size was + /// enforced. If this value is 0, then no links were dropped. + #[prost(uint32, tag="14")] + pub dropped_links_count: u32, + /// An optional final status for this span. Semantically when Status isn't set, it means + /// span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + #[prost(message, optional, tag="15")] + pub status: ::core::option::Option, +} +/// Nested message and enum types in `Span`. +pub mod span { + /// Event is a time-stamped annotation of the span, consisting of user-supplied + /// text description and key-value pairs. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Event { + /// time_unix_nano is the time the event occurred. + #[prost(fixed64, tag="1")] + pub time_unix_nano: u64, + /// name of the event. + /// This field is semantically required to be set to non-empty string. + #[prost(string, tag="2")] + pub name: ::prost::alloc::string::String, + /// attributes is a collection of attribute key/value pairs on the event. + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag="3")] + pub attributes: ::prost::alloc::vec::Vec, + /// dropped_attributes_count is the number of dropped attributes. If the value is 0, + /// then no attributes were dropped. + #[prost(uint32, tag="4")] + pub dropped_attributes_count: u32, + } + /// A pointer from the current span to another span in the same trace or in a + /// different trace. For example, this can be used in batching operations, + /// where a single batch handler processes multiple requests from different + /// traces or when the handler receives a request from a different project. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Link { + /// A unique identifier of a trace that this linked span is part of. The ID is a + /// 16-byte array. + #[prost(bytes="vec", tag="1")] + pub trace_id: ::prost::alloc::vec::Vec, + /// A unique identifier for the linked span. The ID is an 8-byte array. + #[prost(bytes="vec", tag="2")] + pub span_id: ::prost::alloc::vec::Vec, + /// The trace_state associated with the link. + #[prost(string, tag="3")] + pub trace_state: ::prost::alloc::string::String, + /// attributes is a collection of attribute key/value pairs on the link. + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag="4")] + pub attributes: ::prost::alloc::vec::Vec, + /// dropped_attributes_count is the number of dropped attributes. If the value is 0, + /// then no attributes were dropped. + #[prost(uint32, tag="5")] + pub dropped_attributes_count: u32, + } + /// SpanKind is the type of span. Can be used to specify additional relationships between spans + /// in addition to a parent/child relationship. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum SpanKind { + /// Unspecified. Do NOT use as default. + /// Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Unspecified = 0, + /// Indicates that the span represents an internal operation within an application, + /// as opposed to an operation happening at the boundaries. Default value. + Internal = 1, + /// Indicates that the span covers server-side handling of an RPC or other + /// remote network request. + Server = 2, + /// Indicates that the span describes a request to some remote service. + Client = 3, + /// Indicates that the span describes a producer sending a message to a broker. + /// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + /// between producer and consumer spans. A PRODUCER span ends when the message was accepted + /// by the broker while the logical processing of the message might span a much longer time. + Producer = 4, + /// Indicates that the span describes consumer receiving a message from a broker. + /// Like the PRODUCER kind, there is often no direct critical path latency relationship + /// between producer and consumer spans. + Consumer = 5, + } + impl SpanKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SpanKind::Unspecified => "SPAN_KIND_UNSPECIFIED", + SpanKind::Internal => "SPAN_KIND_INTERNAL", + SpanKind::Server => "SPAN_KIND_SERVER", + SpanKind::Client => "SPAN_KIND_CLIENT", + SpanKind::Producer => "SPAN_KIND_PRODUCER", + SpanKind::Consumer => "SPAN_KIND_CONSUMER", + } + } + } +} +/// The Status type defines a logical error model that is suitable for different +/// programming environments, including REST APIs and RPC APIs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Status { + /// A developer-facing human readable error message. + #[prost(string, tag="2")] + pub message: ::prost::alloc::string::String, + /// The status code. + #[prost(enumeration="status::StatusCode", tag="3")] + pub code: i32, +} +/// Nested message and enum types in `Status`. +pub mod status { + /// For the semantics of status codes see + /// + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum StatusCode { + /// The default status. + Unset = 0, + /// The Span has been validated by an Application developers or Operator to have + /// completed successfully. + Ok = 1, + /// The Span contains an error. + Error = 2, + } + impl StatusCode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + StatusCode::Unset => "STATUS_CODE_UNSET", + StatusCode::Ok => "STATUS_CODE_OK", + StatusCode::Error => "STATUS_CODE_ERROR", + } + } + } +} +/// Global configuration of the trace service. All fields must be specified, or +/// the default (zero) values will be used for each type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TraceConfig { + /// The global default max number of attributes per span. + #[prost(int64, tag="4")] + pub max_number_of_attributes: i64, + /// The global default max number of annotation events per span. + #[prost(int64, tag="5")] + pub max_number_of_timed_events: i64, + /// The global default max number of attributes per timed event. + #[prost(int64, tag="6")] + pub max_number_of_attributes_per_timed_event: i64, + /// The global default max number of link entries per span. + #[prost(int64, tag="7")] + pub max_number_of_links: i64, + /// The global default max number of attributes per span. + #[prost(int64, tag="8")] + pub max_number_of_attributes_per_link: i64, + /// The global default sampler used to make decisions on span sampling. + #[prost(oneof="trace_config::Sampler", tags="1, 2, 3")] + pub sampler: ::core::option::Option, +} +/// Nested message and enum types in `TraceConfig`. +pub mod trace_config { + /// The global default sampler used to make decisions on span sampling. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Sampler { + #[prost(message, tag="1")] + ConstantSampler(super::ConstantSampler), + #[prost(message, tag="2")] + TraceIdRatioBased(super::TraceIdRatioBased), + #[prost(message, tag="3")] + RateLimitingSampler(super::RateLimitingSampler), + } +} +/// Sampler that always makes a constant decision on span sampling. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConstantSampler { + #[prost(enumeration="constant_sampler::ConstantDecision", tag="1")] + pub decision: i32, +} +/// Nested message and enum types in `ConstantSampler`. +pub mod constant_sampler { + /// How spans should be sampled: + /// - Always off + /// - Always on + /// - Always follow the parent Span's decision (off if no parent). + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum ConstantDecision { + AlwaysOff = 0, + AlwaysOn = 1, + AlwaysParent = 2, + } + impl ConstantDecision { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ConstantDecision::AlwaysOff => "ALWAYS_OFF", + ConstantDecision::AlwaysOn => "ALWAYS_ON", + ConstantDecision::AlwaysParent => "ALWAYS_PARENT", + } + } + } +} +/// Sampler that tries to uniformly sample traces with a given ratio. +/// The ratio of sampling a trace is equal to that of the specified ratio. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TraceIdRatioBased { + /// The desired ratio of sampling. Must be within [0.0, 1.0]. + #[prost(double, tag="1")] + pub sampling_ratio: f64, +} +/// Sampler that tries to sample with a rate per time window. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RateLimitingSampler { + /// Rate per second. + #[prost(int64, tag="1")] + pub qps: i64, +} diff --git a/opentelemetry-proto/tests/grpc_build.rs b/opentelemetry-proto/tests/grpc_build.rs index b6a8f53e58..1051abc20c 100644 --- a/opentelemetry-proto/tests/grpc_build.rs +++ b/opentelemetry-proto/tests/grpc_build.rs @@ -1,58 +1,114 @@ use protobuf_codegen::Customize; use protoc_grpcio::compile_grpc_protos; use std::collections::HashMap; -use std::path::PathBuf; +use std::path::Path; +use tempfile::TempDir; + +const GRPCIO_OUT_DIR: &str = "src/proto/grpcio"; +const GRPCIO_PROTO_FILES: &[&str] = &[ + "src/proto/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/trace/v1/trace_config.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/collector/trace/v1/trace_service.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto", + "src/proto/tracez.proto", +]; +const GRPCIO_INCLUDES: &[&str] = &["src/proto/opentelemetry-proto/", "src/proto"]; + +const TONIC_OUT_DIR: &str = "src/proto/tonic"; +const TONIC_PROTO_FILES: &[&str] = &[ + "src/proto/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/trace/v1/trace_config.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/collector/trace/v1/trace_service.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/logs/v1/logs.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/collector/logs/v1/logs_service.proto", +]; +const TONIC_INCLUDES: &[&str] = &["src/proto/opentelemetry-proto"]; // This test helps to keep files generated and used by grpcio update to date. // If the test fails, it means the generated files has been changed. Please commit the change // and rerun test. It should pass at the second time. #[test] fn build_grpc() { - let before_build = build_content_map(); + let before_build = build_content_map(GRPCIO_OUT_DIR); + + let out_dir = TempDir::new().expect("failed to create temp dir to store the generated files"); + compile_grpc_protos( - &[ - "src/proto/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/trace/v1/trace_config.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/collector/trace/v1/trace_service.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto", - "src/proto/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto", - "src/proto/tracez.proto" - ], - &["src/proto/opentelemetry-proto/", "src/proto"], - "src/proto/grpcio", + GRPCIO_PROTO_FILES, + GRPCIO_INCLUDES, + out_dir.path(), Some(Customize { expose_fields: Some(true), serde_derive: Some(true), ..Default::default() }), ) - .expect("Error generating protobuf"); - let after_build = build_content_map(); - // we cannot use assert_eq! here because it will print both maps when they don't match, which - // makes the error message unreadable. - // If you find the test passed locally but not in CI pipeline. Try update the dependency. It may - // be a new version of protobuf or other dependencies - // DO NOT use assert_eq! here as it will print all generated file when proto changes. - assert!( - before_build == after_build, - "generated file has changed, please commit the change file and rerun the test" - ); + .expect("error generating protobuf"); + let after_build = build_content_map(out_dir.path()); + ensure_files_are_same(before_build, after_build, GRPCIO_OUT_DIR); } -fn build_content_map() -> HashMap { - std::fs::read_dir("src/proto/grpcio") - .expect("cannot open dict of generated grpc files") +#[test] +fn build_tonic() { + let before_build = build_content_map(TONIC_OUT_DIR); + + let out_dir = TempDir::new().expect("failed to create temp dir to store the generated files"); + + // build the generated files into OUT_DIR for now so we don't have to touch the src unless we have to + tonic_build::configure() + .build_server(true) + .build_client(true) + .out_dir(out_dir.path()) + .compile(TONIC_PROTO_FILES, TONIC_INCLUDES) + .expect("cannot compile protobuf using tonic"); + + let after_build = build_content_map(out_dir.path()); + ensure_files_are_same(before_build, after_build, TONIC_OUT_DIR); +} + +fn build_content_map(path: impl AsRef) -> HashMap { + std::fs::read_dir(path) + .expect("cannot open dictionary of generated files") .into_iter() .flatten() .map(|entry| { + let path = entry.path(); + let file_name = path + .file_name() + .expect("file name should always exist for generated files"); ( - entry.path(), - std::fs::read_to_string(entry.path()).unwrap_or_else(|_| { - panic!("cannot read from file {}", entry.path().to_string_lossy()) - }), + file_name.to_string_lossy().to_string(), + std::fs::read_to_string(path).expect("cannot read from existing generated file"), ) }) .collect() } + +fn ensure_files_are_same( + before_build: HashMap, + after_build: HashMap, + target_dir: &'static str, +) { + if after_build == before_build { + return; + } + + if std::env::var("CI").is_ok() { + panic!("generated file has changed but it's a CI environment, please rerun this test locally and commit the changes"); + } + + // if there is at least one changes we will just copy the whole directory over + for (file_name, content) in after_build { + std::fs::write(Path::new(target_dir).join(file_name), content) + .expect("cannot write to the proto generate file. If it's happening in CI env, please return the test locally and commit the change"); + } + + panic!("generated file has changed, please commit the change file and rerun the test"); +} diff --git a/scripts/lint.sh b/scripts/lint.sh index c5217f6dc5..e47599d1ec 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -52,8 +52,8 @@ if rustup component add clippy; then cargo_feature opentelemetry-proto "full" cargo_feature opentelemetry-proto "gen-tonic,traces" cargo_feature opentelemetry-proto "gen-tonic,traces,with-serde" - cargo_feature opentelemetry-proto "gen-tonic,traces,build-client" - cargo_feature opentelemetry-proto "gen-tonic,metrics,build-client" + cargo_feature opentelemetry-proto "gen-tonic,traces" + cargo_feature opentelemetry-proto "gen-tonic,metrics" cargo_feature opentelemetry-proto "gen-protoc,traces" cargo_feature opentelemetry-proto "gen-protoc,traces,with-serde" cargo_feature opentelemetry-proto "gen-protoc,zpages"