diff --git a/.cspell.json b/.cspell.json index 23ecd03c8b..be8b4c0cda 100644 --- a/.cspell.json +++ b/.cspell.json @@ -49,7 +49,9 @@ "nocapture", "Ochtman", "opentelemetry", + "OTELCOL", "OTLP", + "periodicreader", "protoc", "quantile", "Redelmeier", @@ -59,6 +61,7 @@ "shoppingcart", "struct", "Tescher", + "testresults", "tracerprovider", "updown", "Zhongyang", diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml index fbe2085b54..9d48b6bc3c 100644 --- a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml @@ -20,7 +20,7 @@ body: - type: textarea id: api-version attributes: - label: API Version + label: OpenTelemetry API Version (i.e version of `opentelemetry` crate) description: What version of the `opentelemetry` crate are you using? placeholder: 0.x, 1.x, etc. validations: @@ -28,7 +28,7 @@ body: - type: textarea id: sdk-version attributes: - label: SDK Version + label: label: OpenTelemetry SDK Version (i.e version of `opentelemetry_sdk` crate) description: What version of the `opentelemetry_sdk` crate are you using? placeholder: 0.x, 1.x, etc. validations: @@ -39,6 +39,7 @@ body: label: What Exporter(s) are you seeing the problem on? multiple: true options: + - stdout - OTLP - Zipkin - Prometheus diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f7002f0c0b..34d979987d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,6 +8,9 @@ on: - main paths-ignore: - '**.md' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: test: strategy: @@ -22,8 +25,9 @@ jobs: - rust: stable os: macos-latest - rust: stable - os: actuated-arm64-4cpu-16gb + os: otel-linux-arm64 runs-on: ${{ matrix.os }} + continue-on-error: ${{ matrix.rust == 'beta' }} steps: - name: Free disk space if: ${{ matrix.os == 'ubuntu-latest'}} @@ -73,39 +77,20 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly with: - toolchain: nightly-2024-05-01 + toolchain: nightly-2024-06-30 components: rustfmt - name: external-type-check run: | - cargo install cargo-check-external-types + cargo install cargo-check-external-types@0.1.13 cd ${{ matrix.example }} cargo check-external-types --config allowed-external-types.toml - non-default-examples: - strategy: - matrix: - os: [ windows-latest, ubuntu-latest ] - example: [opentelemetry-otlp/examples/basic-otlp] - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v4 - with: - submodules: true - - uses: dtolnay/rust-toolchain@stable - with: - components: rustfmt - - uses: arduino/setup-protoc@v3 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Build - run: | - cd ${{ matrix.example }} - cargo build --verbose msrv: strategy: matrix: os: [windows-latest, ubuntu-latest] - rust: [1.65.0, 1.70.0] + rust: [1.75.0] runs-on: ${{ matrix.os }} + continue-on-error: true steps: - uses: actions/checkout@v4 with: @@ -131,7 +116,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/rust-toolchain@stable with: components: rustfmt - uses: arduino/setup-protoc@v3 @@ -163,7 +148,7 @@ jobs: if: hashFiles('Cargo.lock') == '' run: cargo generate-lockfile - name: cargo llvm-cov - run: cargo llvm-cov --locked --all-features --workspace --lcov --output-path lcov.info + run: cargo llvm-cov --locked --all-features --workspace --lcov --lib --output-path lcov.info - name: Upload to codecov.io uses: codecov/codecov-action@v4 env: diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index 2f21b5f3db..badc78fb10 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -24,5 +24,5 @@ jobs: with: components: rustfmt - uses: arduino/setup-protoc@v3 - - name: Run integration tests using docker compose + - name: Run integration tests run: ./scripts/integration_tests.sh diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml index ed61eb4556..caf85c03d3 100644 --- a/.github/workflows/semver.yml +++ b/.github/workflows/semver.yml @@ -4,7 +4,6 @@ env: on: pull_request: types: [ labeled, synchronize, opened, reopened ] - jobs: semver-compliance: # This job uses the latest published crate as baseline for comparison. runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 02a0111fdb..89a02f4cf0 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,6 @@ Cargo.lock /.idea/ -.cosine \ No newline at end of file +.cosine + +opentelemetry-otlp/tests/integration_test/result.json diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 930bf727c3..ae81dc5017 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -140,7 +140,7 @@ For a deeper discussion, see: Currently, the Opentelemetry Rust SDK has two ways to handle errors. In the situation where errors are not allowed to return. One should call global error handler to process the errors. Otherwise, one should return the errors. -The Opentelemetry Rust SDK comes with an error type `opentelemetry::Error`. For different function, one error has been defined. All error returned by trace module MUST be wrapped in `opentelemetry::trace::TraceError`. All errors returned by metrics module MUST be wrapped in `opentelemetry::metrics::MetricsError`. All errors returned by logs module MUST be wrapped in `opentelemetry::logs::LogsError`. +The Opentelemetry Rust SDK comes with an error type `opentelemetry::Error`. For different function, one error has been defined. All error returned by trace module MUST be wrapped in `opentelemetry::trace::TraceError`. All errors returned by metrics module MUST be wrapped in `opentelemetry::metrics::MetricError`. All errors returned by logs module MUST be wrapped in `opentelemetry::logs::LogsError`. For users that want to implement their own exporters. It's RECOMMENDED to wrap all errors from the exporter into a crate-level error type, and implement `ExporterError` trait. @@ -169,7 +169,7 @@ It's important to regularly review and remove the `otel_unstable` flag from the The potential features include: - Stable and non-experimental features that compliant to specification, and have a feature flag to minimize compilation size. Example: feature flags for signals (like `logs`, `traces`, `metrics`) and runtimes (`rt-tokio`, `rt-tokio-current-thread`, `rt-async-std`). -- Stable and non-experimental features, although not part of the specification, are crucial for enhancing the tracing/log crate's functionality or boosting performance. These features are also subject to discussion and approval by the OpenTelemetry Rust Maintainers. An example of such a feature is `logs_level_enabled`. +- Stable and non-experimental features, although not part of the specification, are crucial for enhancing the tracing/log crate's functionality or boosting performance. These features are also subject to discussion and approval by the OpenTelemetry Rust Maintainers. All such features should adhere to naming convention `_` diff --git a/Cargo.toml b/Cargo.toml index 37e52f90bc..99d8906132 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,12 +39,15 @@ reqwest = { version = "0.12", default-features = false } serde = { version = "1.0", default-features = false } serde_json = "1.0" temp-env = "0.3.6" -thiserror = { version = "1", default-features = false } -tonic = { version = "0.12", default-features = false } +thiserror = { version = "2", default-features = false } +tonic = { version = "0.12.3", default-features = false } tonic-build = "0.12" tokio = { version = "1", default-features = false } -tokio-stream = "0.1.1" -tracing = { version = "0.1", default-features = false } -tracing-core = { version = "0.1", default-features = false } +tokio-stream = "0.1" +# Using `tracing 0.1.40` because 0.1.39 (which is yanked) introduces the ability to set event names in macros, +# required for OpenTelemetry's internal logging macros. +tracing = { version = ">=0.1.40", default-features = false } +# `tracing-core >=0.1.33` is required for compatibility with `tracing >=0.1.40`. +tracing-core = { version = ">=0.1.33", default-features = false } tracing-subscriber = { version = "0.3", default-features = false } url = { version = "2.5", default-features = false } diff --git a/README.md b/README.md index 67e0fcfbd2..704d6147ca 100644 --- a/README.md +++ b/README.md @@ -17,30 +17,33 @@ analysis in order to understand your software's performance and behavior. You can export and analyze them using [Prometheus], [Jaeger], and other observability tools. -*Compiler support: [requires `rustc` 1.65+][msrv]* +*[Supported Rust Versions](#supported-rust-versions)* [Prometheus]: https://prometheus.io [Jaeger]: https://www.jaegertracing.io -[msrv]: #supported-rust-versions ## Project Status +The table below summarizes the overall status of each component. Some components +include unstable features, which are documented in their respective crate +documentation. + | Signal/Component | Overall Status | | -------------------- | ------------------ | -| Logs-API | Beta* | +| Logs-API | RC* | | Logs-SDK | Beta | | Logs-OTLP Exporter | Beta | | Logs-Appender-Tracing | Beta | -| Metrics-API | Alpha | -| Metrics-SDK | Alpha | -| Metrics-OTLP Exporter | Alpha | +| Metrics-API | RC | +| Metrics-SDK | Beta | +| Metrics-OTLP Exporter | Beta | | Traces-API | Beta | | Traces-SDK | Beta | | Traces-OTLP Exporter | Beta | *OpenTelemetry Rust is not introducing a new end user callable Logging API. Instead, it provides [Logs Bridge -API](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/bridge-api.md), +API](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/api.md), that allows one to write log appenders that can bridge existing logging libraries to the OpenTelemetry log data model. The following log appenders are available: @@ -80,7 +83,7 @@ fn main() { }); // Shutdown trace pipeline - global::shutdown_tracer_provider(); + provider.shutdown().expect("TracerProvider should shutdown successfully") } ``` @@ -159,7 +162,7 @@ Registry](https://opentelemetry.io/ecosystem/registry/?language=rust). ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.65. The current OpenTelemetry version is not guaranteed to build +version is 1.75. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions @@ -198,12 +201,12 @@ you're more than welcome to participate! * [Harold Dost](https://github.com/hdost) * [Julian Tescher](https://github.com/jtescher) * [Lalit Kumar Bhasin](https://github.com/lalitb) +* [Utkarsh Umesan Pillai](https://github.com/utpilla) * [Zhongyang Wu](https://github.com/TommyCpp) ### Approvers * [Shaun Cox](https://github.com/shaun-cox) -* [Utkarsh Umesan Pillai](https://github.com/utpilla) ### Emeritus diff --git a/deny.toml b/deny.toml index f1c5533733..1d700bde24 100644 --- a/deny.toml +++ b/deny.toml @@ -29,10 +29,5 @@ license-files = [ ] [advisories] -ignore = [ - # unsoundness in indirect dependencies without a safe upgrade below - "RUSTSEC-2021-0145", - "RUSTSEC-2019-0036" -] unmaintained = "allow" yanked = "allow" \ No newline at end of file diff --git a/examples/README.md b/examples/README.md index a4e3148c86..eac8a3a779 100644 --- a/examples/README.md +++ b/examples/README.md @@ -7,7 +7,7 @@ This directory contains some examples that should help you get start crates from This example uses following crates from this repo: - opentelemetry(log) -- opentelemetry-appender-log +- opentelemetry-appender-tracing - opentelemetry-stdout Check this example if you want to understand *how to instrument logs using opentelemetry*. diff --git a/examples/logs-basic/Cargo.toml b/examples/logs-basic/Cargo.toml index ae30dc6779..00321af4fc 100644 --- a/examples/logs-basic/Cargo.toml +++ b/examples/logs-basic/Cargo.toml @@ -6,10 +6,8 @@ license = "Apache-2.0" publish = false [dependencies] -opentelemetry = { path = "../../opentelemetry", features = ["logs"] } opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["logs"] } opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["logs"]} -opentelemetry-appender-log = { path = "../../opentelemetry-appender-log", default-features = false} -opentelemetry-semantic-conventions = { path = "../../opentelemetry-semantic-conventions" } -log = { workspace = true } -serde_json = { workspace = true } +opentelemetry-appender-tracing = { path = "../../opentelemetry-appender-tracing", default-features = false} +tracing = { workspace = true, features = ["std"]} +tracing-subscriber = { workspace = true, features = ["registry", "std"] } diff --git a/examples/logs-basic/README.md b/examples/logs-basic/README.md index ce2fd2c284..5a1084aee2 100644 --- a/examples/logs-basic/README.md +++ b/examples/logs-basic/README.md @@ -1,15 +1,15 @@ -# OpenTelemetry Log Appender for log - Example +# OpenTelemetry Log Appender for tracing - Example -This example shows how to use the opentelemetry-appender-log crate, which is a +This example shows how to use the opentelemetry-appender-tracing crate, which is a [logging appender](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/glossary.md#log-appender--bridge) -that bridges logs from the [log crate](https://docs.rs/log/latest/log/) to +that bridges logs from the [tracing crate](https://tracing.rs/tracing/#events) to OpenTelemetry. The example setups a LoggerProvider with stdout exporter, so logs are emitted to stdout. ## Usage -Run the following, and Logs emitted using [log](https://docs.rs/log/latest/log/) +Run the following, and Logs emitted using [tracing](https://docs.rs/tracing/latest/tracing/) will be written out to stdout. ```shell diff --git a/examples/logs-basic/src/main.rs b/examples/logs-basic/src/main.rs index 8ebd092c80..a7edcc571d 100644 --- a/examples/logs-basic/src/main.rs +++ b/examples/logs-basic/src/main.rs @@ -1,27 +1,22 @@ -use log::{error, Level}; -use opentelemetry::KeyValue; -use opentelemetry_appender_log::OpenTelemetryLogBridge; +use opentelemetry_appender_tracing::layer; use opentelemetry_sdk::logs::LoggerProvider; use opentelemetry_sdk::Resource; -use opentelemetry_semantic_conventions::resource::SERVICE_NAME; +use tracing::error; +use tracing_subscriber::prelude::*; fn main() { - // Setup LoggerProvider with a stdout exporter let exporter = opentelemetry_stdout::LogExporter::default(); - let logger_provider = LoggerProvider::builder() - .with_resource(Resource::new([KeyValue::new( - SERVICE_NAME, - "logs-basic-example", - )])) + let provider: LoggerProvider = LoggerProvider::builder() + .with_resource( + Resource::builder() + .with_service_name("log-appender-tracing-example") + .build(), + ) .with_simple_exporter(exporter) .build(); + let layer = layer::OpenTelemetryTracingBridge::new(&provider); + tracing_subscriber::registry().with(layer).init(); - // Setup Log Appender for the log crate. - let otel_log_appender = OpenTelemetryLogBridge::new(&logger_provider); - log::set_boxed_logger(Box::new(otel_log_appender)).unwrap(); - log::set_max_level(Level::Error.to_level_filter()); - - // Emit logs using macros from the log crate. - // These logs gets piped through OpenTelemetry bridge and gets exported to stdout. - error!(target: "my-target", "hello from {}. My price is {}", "apple", 2.99); + error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io", message = "This is an example message"); + let _ = provider.shutdown(); } diff --git a/examples/metrics-advanced/Cargo.toml b/examples/metrics-advanced/Cargo.toml index a5a8a7c489..31cdf73a89 100644 --- a/examples/metrics-advanced/Cargo.toml +++ b/examples/metrics-advanced/Cargo.toml @@ -7,7 +7,6 @@ publish = false [dependencies] opentelemetry = { path = "../../opentelemetry", features = ["metrics"] } -opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["metrics", "rt-tokio"] } -opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["metrics"]} +opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["spec_unstable_metrics_views", "rt-tokio"] } +opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["metrics"] } tokio = { workspace = true, features = ["full"] } -serde_json = { workspace = true } diff --git a/examples/metrics-advanced/src/main.rs b/examples/metrics-advanced/src/main.rs index aa5dbb6cac..334d68c3d7 100644 --- a/examples/metrics-advanced/src/main.rs +++ b/examples/metrics-advanced/src/main.rs @@ -2,9 +2,9 @@ use opentelemetry::global; use opentelemetry::Key; use opentelemetry::KeyValue; use opentelemetry_sdk::metrics::{ - Aggregation, Instrument, PeriodicReader, SdkMeterProvider, Stream, + Aggregation, Instrument, PeriodicReader, SdkMeterProvider, Stream, Temporality, }; -use opentelemetry_sdk::{runtime, Resource}; +use opentelemetry_sdk::Resource; use std::error::Error; fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider { @@ -44,18 +44,20 @@ fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider { } }; - let exporter = opentelemetry_stdout::MetricsExporterBuilder::default() - // uncomment the below lines to pretty print output. - // .with_encoder(|writer, data| - // Ok(serde_json::to_writer_pretty(writer, &data).unwrap())) + // Build exporter using Delta Temporality. + let exporter = opentelemetry_stdout::MetricExporterBuilder::default() + .with_temporality(Temporality::Delta) .build(); - let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); + + let reader = PeriodicReader::builder(exporter).build(); + + let resource = Resource::builder() + .with_service_name("metrics-advanced-example") + .build(); + let provider = SdkMeterProvider::builder() .with_reader(reader) - .with_resource(Resource::new([KeyValue::new( - "service.name", - "metrics-advanced-example", - )])) + .with_resource(resource) .with_view(my_view_rename_and_unit) .with_view(my_view_drop_attributes) .with_view(my_view_change_aggregation) @@ -77,7 +79,7 @@ async fn main() -> Result<(), Box> { .f64_histogram("my_histogram") .with_unit("ms") .with_description("My histogram example description") - .init(); + .build(); // Record measurements using the histogram instrument. histogram.record( @@ -91,7 +93,7 @@ async fn main() -> Result<(), Box> { ); // Example 2 - Drop unwanted attributes using view. - let counter = meter.u64_counter("my_counter").init(); + let counter = meter.u64_counter("my_counter").build(); // Record measurements using the Counter instrument. // Though we are passing 4 attributes here, only 1 will be used @@ -115,7 +117,7 @@ async fn main() -> Result<(), Box> { .f64_histogram("my_second_histogram") .with_unit("ms") .with_description("My histogram example description") - .init(); + .build(); // Record measurements using the histogram instrument. // The values recorded are in the range of 1.2 to 1.5, warranting diff --git a/examples/metrics-basic/Cargo.toml b/examples/metrics-basic/Cargo.toml index 37b79da140..69a8fc8628 100644 --- a/examples/metrics-basic/Cargo.toml +++ b/examples/metrics-basic/Cargo.toml @@ -6,12 +6,8 @@ license = "Apache-2.0" publish = false [dependencies] -opentelemetry = { path = "../../opentelemetry", features = ["metrics", "otel_unstable"] } +opentelemetry = { path = "../../opentelemetry", features = ["metrics"] } opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["metrics", "rt-tokio"] } opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["metrics"]} tokio = { workspace = true, features = ["full"] } -serde_json = { workspace = true } -[features] -default = ["otel_unstable"] -otel_unstable = ["opentelemetry/otel_unstable"] diff --git a/examples/metrics-basic/src/main.rs b/examples/metrics-basic/src/main.rs index ecd5083d7c..113b4a332e 100644 --- a/examples/metrics-basic/src/main.rs +++ b/examples/metrics-basic/src/main.rs @@ -1,22 +1,22 @@ -use opentelemetry::global; -use opentelemetry::KeyValue; +use opentelemetry::{global, KeyValue}; use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider}; -use opentelemetry_sdk::{runtime, Resource}; +use opentelemetry_sdk::Resource; use std::error::Error; +use std::vec; fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider { - let exporter = opentelemetry_stdout::MetricsExporterBuilder::default() - // uncomment the below lines to pretty print output. - // .with_encoder(|writer, data| - // Ok(serde_json::to_writer_pretty(writer, &data).unwrap())) + let exporter = opentelemetry_stdout::MetricExporterBuilder::default() + // Build exporter using Delta Temporality (Defaults to Temporality::Cumulative) + // .with_temporality(opentelemetry_sdk::metrics::Temporality::Delta) .build(); - let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); + let reader = PeriodicReader::builder(exporter).build(); let provider = SdkMeterProvider::builder() .with_reader(reader) - .with_resource(Resource::new([KeyValue::new( - "service.name", - "metrics-basic-example", - )])) + .with_resource( + Resource::builder() + .with_service_name("metrics-basic-example") + .build(), + ) .build(); global::set_meter_provider(provider.clone()); provider @@ -31,7 +31,7 @@ async fn main() -> Result<(), Box> { let meter = global::meter("mylibraryname"); // Create a Counter Instrument. - let counter = meter.u64_counter("my_counter").init(); + let counter = meter.u64_counter("my_counter").build(); // Record measurements using the Counter instrument. counter.add( @@ -56,10 +56,10 @@ async fn main() -> Result<(), Box> { ], ) }) - .init(); + .build(); // Create a UpCounter Instrument. - let updown_counter = meter.i64_up_down_counter("my_updown_counter").init(); + let updown_counter = meter.i64_up_down_counter("my_updown_counter").build(); // Record measurements using the UpCounter instrument. updown_counter.add( @@ -84,13 +84,16 @@ async fn main() -> Result<(), Box> { ], ) }) - .init(); + .build(); // Create a Histogram Instrument. let histogram = meter .f64_histogram("my_histogram") .with_description("My histogram example description") - .init(); + // Setting boundaries is optional. By default, the boundaries are set to + // [0.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 2500.0, 5000.0, 7500.0, 10000.0] + .with_boundaries(vec![0.0, 5.0, 10.0, 15.0, 20.0, 25.0]) + .build(); // Record measurements using the histogram instrument. histogram.record( @@ -108,7 +111,7 @@ async fn main() -> Result<(), Box> { .f64_gauge("my_gauge") .with_description("A gauge set to 1.0") .with_unit("myunit") - .init(); + .build(); gauge.record( 1.0, @@ -132,7 +135,7 @@ async fn main() -> Result<(), Box> { ], ) }) - .init(); + .build(); // Metrics are exported by default every 30 seconds when using stdout exporter, // however shutting down the MeterProvider here instantly flushes diff --git a/examples/self-diagnostics/Cargo.toml b/examples/self-diagnostics/Cargo.toml deleted file mode 100644 index 8e8b1cd394..0000000000 --- a/examples/self-diagnostics/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "self-diagnostics" -version = "0.1.0" -edition = "2021" -license = "Apache-2.0" -publish = false - -[dependencies] -opentelemetry = { path = "../../opentelemetry" } -opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio"]} -opentelemetry-stdout = { path = "../../opentelemetry-stdout"} -opentelemetry-appender-tracing = { path = "../../opentelemetry-appender-tracing"} -tokio = { workspace = true, features = ["full"] } -tracing = { workspace = true, features = ["std"]} -tracing-core = { workspace = true } -tracing-subscriber = { version = "0.3.18", features = ["env-filter","registry", "std"]} -opentelemetry-otlp = { path = "../../opentelemetry-otlp", features = ["http-proto", "reqwest-client", "logs"] } -once_cell ={ version = "1.19.0"} -ctrlc = "3.4" diff --git a/examples/self-diagnostics/Dockerfile b/examples/self-diagnostics/Dockerfile deleted file mode 100644 index f88c276a55..0000000000 --- a/examples/self-diagnostics/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM rust:1.51 -COPY . /usr/src/basic-otlp-http/ -WORKDIR /usr/src/basic-otlp-http/ -RUN cargo build --release -RUN cargo install --path . -CMD ["/usr/local/cargo/bin/basic-otlp-http"] diff --git a/examples/self-diagnostics/README.md b/examples/self-diagnostics/README.md deleted file mode 100644 index ad17e57e0c..0000000000 --- a/examples/self-diagnostics/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# Basic OpenTelemetry metrics example with custom error handler: - -This example shows how to setup the custom error handler for self-diagnostics. - -## Custom Error Handling: - -A custom error handler is set up to capture and record errors using the `tracing` crate's `error!` macro. These errors are then exported to a collector using the `opentelemetry-appender-tracing` crate, which utilizes the OTLP log exporter over `HTTP/protobuf`. As a result, any errors generated by the configured OTLP metrics pipeline are funneled through this custom error handler for proper recording and export. - -## Filtering logs from external dependencies of OTLP Exporter: - -The example configures a tracing `filter` to restrict logs from external crates (`hyper`, `tonic`, and `reqwest`) used by the OTLP Exporter to the `error` level. This helps prevent an infinite loop of log generation when these crates emit logs that are picked up by the tracing subscriber. - -## Ensure that the internally generated errors are logged only once: - -By using a hashset to track seen errors, the custom error handler ensures that the same error is not logged multiple times. This is particularly useful for handling scenarios where continuous error logging might occur, such as when the OpenTelemetry collector is not running. - - -## Usage - -### `docker-compose` - -By default runs against the `otel/opentelemetry-collector:latest` image, and uses `reqwest-client` -as the http client, using http as the transport. - -```shell -docker-compose up -``` - -In another terminal run the application `cargo run` - -The docker-compose terminal will display logs, traces, metrics. - -Press Ctrl+C to stop the collector, and then tear it down: - -```shell -docker-compose down -``` - -### Manual - -If you don't want to use `docker-compose`, you can manually run the `otel/opentelemetry-collector` container -and inspect the logs to see traces being transferred. - -On Unix based systems use: - -```shell -# From the current directory, run `opentelemetry-collector` -docker run --rm -it -p 4318:4318 -v $(pwd):/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml -``` - -On Windows use: - -```shell -# From the current directory, run `opentelemetry-collector` -docker run --rm -it -p 4318:4318 -v "%cd%":/cfg otel/opentelemetry-collector:latest --config=/cfg/otel-collector-config.yaml -``` - -Run the app which exports logs, metrics and traces via OTLP to the collector - -```shell -cargo run -``` - -### Output: - -- If the docker instance for collector is running, below error should be logged into the container. There won't be any logs from the `hyper`, `reqwest` and `tonic` crates. -``` -otel-collector-1 | 2024-06-05T17:09:46.926Z info LogsExporter {"kind": "exporter", "data_type": "logs", "name": "logging", "resource logs": 1, "log records": 1} -otel-collector-1 | 2024-06-05T17:09:46.926Z info ResourceLog #0 -otel-collector-1 | Resource SchemaURL: -otel-collector-1 | Resource attributes: -otel-collector-1 | -> telemetry.sdk.name: Str(opentelemetry) -otel-collector-1 | -> telemetry.sdk.version: Str(0.23.0) -otel-collector-1 | -> telemetry.sdk.language: Str(rust) -otel-collector-1 | -> service.name: Str(unknown_service) -otel-collector-1 | ScopeLogs #0 -otel-collector-1 | ScopeLogs SchemaURL: -otel-collector-1 | InstrumentationScope opentelemetry-appender-tracing 0.4.0 -otel-collector-1 | LogRecord #0 -otel-collector-1 | ObservedTimestamp: 2024-06-05 17:09:45.931951161 +0000 UTC -otel-collector-1 | Timestamp: 1970-01-01 00:00:00 +0000 UTC -otel-collector-1 | SeverityText: ERROR -otel-collector-1 | SeverityNumber: Error(17) -otel-collector-1 | Body: Str(OpenTelemetry metrics error occurred: Metrics error: Warning: Maximum data points for metric stream exceeded. Entry added to overflow. Subsequent overflows to same metric until next collect will not be logged.) -otel-collector-1 | Attributes: -otel-collector-1 | -> name: Str(event examples/self-diagnostics/src/main.rs:42) -otel-collector-1 | Trace ID: -otel-collector-1 | Span ID: -otel-collector-1 | Flags: 0 -otel-collector-1 | {"kind": "exporter", "data_type": "logs", "name": "logging"} -``` - -- The SDK will keep trying to upload metrics at regular intervals if the collector's Docker instance is down. To avoid a logging loop, internal errors like 'Connection refused' will be attempted to be logged only once. diff --git a/examples/self-diagnostics/docker-compose.yaml b/examples/self-diagnostics/docker-compose.yaml deleted file mode 100644 index b363c459ea..0000000000 --- a/examples/self-diagnostics/docker-compose.yaml +++ /dev/null @@ -1,11 +0,0 @@ -version: "2" -services: - - # Collector - otel-collector: - image: otel/opentelemetry-collector:latest - command: ["--config=/etc/otel-collector-config.yaml", "${OTELCOL_ARGS}"] - volumes: - - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml - ports: - - "4318:4318" # OTLP HTTP receiver diff --git a/examples/self-diagnostics/otel-collector-config.yaml b/examples/self-diagnostics/otel-collector-config.yaml deleted file mode 100644 index 1c6d258426..0000000000 --- a/examples/self-diagnostics/otel-collector-config.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# This is a configuration file for the OpenTelemetry Collector intended to be -# used in conjunction with the opentelemetry-otlp example. -# -# For more information about the OpenTelemetry Collector see: -# https://github.com/open-telemetry/opentelemetry-collector -# -receivers: - otlp: - protocols: - grpc: - http: - -exporters: - debug: - verbosity: detailed - -service: - pipelines: - traces: - receivers: [otlp] - exporters: [debug] - metrics: - receivers: [otlp] - exporters: [debug] - logs: - receivers: [otlp] - exporters: [debug] \ No newline at end of file diff --git a/examples/self-diagnostics/src/main.rs b/examples/self-diagnostics/src/main.rs deleted file mode 100644 index b9e6ebd490..0000000000 --- a/examples/self-diagnostics/src/main.rs +++ /dev/null @@ -1,144 +0,0 @@ -use opentelemetry::global::{self, set_error_handler, Error as OtelError}; -use opentelemetry::KeyValue; -use opentelemetry_appender_tracing::layer; -use opentelemetry_otlp::WithExportConfig; -use tracing_subscriber::prelude::*; -use tracing_subscriber::EnvFilter; - -use std::error::Error; -use tracing::error; - -use once_cell::sync::Lazy; -use std::collections::HashSet; -use std::sync::{Arc, Mutex}; - -use std::sync::mpsc::channel; - -struct ErrorState { - seen_errors: Mutex>, -} - -impl ErrorState { - fn new() -> Self { - ErrorState { - seen_errors: Mutex::new(HashSet::new()), - } - } - - fn mark_as_seen(&self, err: &OtelError) -> bool { - let mut seen_errors = self.seen_errors.lock().unwrap(); - seen_errors.insert(err.to_string()) - } -} - -static GLOBAL_ERROR_STATE: Lazy> = Lazy::new(|| Arc::new(ErrorState::new())); - -fn custom_error_handler(err: OtelError) { - if GLOBAL_ERROR_STATE.mark_as_seen(&err) { - // log error not already seen - match err { - OtelError::Metric(err) => error!("OpenTelemetry metrics error occurred: {}", err), - OtelError::Trace(err) => error!("OpenTelemetry trace error occurred: {}", err), - OtelError::Log(err) => error!("OpenTelemetry log error occurred: {}", err), - OtelError::Propagation(err) => { - error!("OpenTelemetry propagation error occurred: {}", err) - } - OtelError::Other(err_msg) => error!("OpenTelemetry error occurred: {}", err_msg), - _ => error!("OpenTelemetry error occurred: {:?}", err), - } - } -} - -fn init_logger_provider() -> opentelemetry_sdk::logs::LoggerProvider { - let provider = opentelemetry_otlp::new_pipeline() - .logging() - .with_exporter( - opentelemetry_otlp::new_exporter() - .http() - .with_endpoint("http://localhost:4318/v1/logs"), - ) - .install_batch(opentelemetry_sdk::runtime::Tokio) - .unwrap(); - - // Add a tracing filter to filter events from crates used by opentelemetry-otlp. - // The filter levels are set as follows: - // - Allow `info` level and above by default. - // - Restrict `hyper`, `tonic`, and `reqwest` to `error` level logs only. - // This ensures events generated from these crates within the OTLP Exporter are not looped back, - // thus preventing infinite event generation. - // Note: This will also drop events from these crates used outside the OTLP Exporter. - // For more details, see: https://github.com/open-telemetry/opentelemetry-rust/issues/761 - let filter = EnvFilter::new("info") - .add_directive("hyper=error".parse().unwrap()) - .add_directive("tonic=error".parse().unwrap()) - .add_directive("reqwest=error".parse().unwrap()); - let cloned_provider = provider.clone(); - let layer = layer::OpenTelemetryTracingBridge::new(&cloned_provider); - tracing_subscriber::registry() - .with(filter) - .with(layer) - .init(); - provider -} - -fn init_meter_provider() -> opentelemetry_sdk::metrics::SdkMeterProvider { - let provider = opentelemetry_otlp::new_pipeline() - .metrics(opentelemetry_sdk::runtime::Tokio) - .with_period(std::time::Duration::from_secs(1)) - .with_exporter( - opentelemetry_otlp::new_exporter() - .http() - .with_endpoint("http://localhost:4318/v1/metrics"), - ) - .build() - .unwrap(); - let cloned_provider = provider.clone(); - global::set_meter_provider(cloned_provider); - provider -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Set the custom error handler - if let Err(err) = set_error_handler(custom_error_handler) { - eprintln!("Failed to set custom error handler: {}", err); - } - - let logger_provider = init_logger_provider(); - - // Initialize the MeterProvider with the stdout Exporter. - let meter_provider = init_meter_provider(); - - // Create a meter from the above MeterProvider. - let meter = global::meter("example"); - // Create a Counter Instrument. - let counter = meter.u64_counter("my_counter").init(); - - // Record measurements with unique key-value pairs to exceed the cardinality limit - // of 2000 and trigger error message - for i in 0..3000 { - counter.add( - 10, - &[KeyValue::new( - format!("mykey{}", i), - format!("myvalue{}", i), - )], - ); - } - - let (tx, rx) = channel(); - - ctrlc::set_handler(move || tx.send(()).expect("Could not send signal on channel.")) - .expect("Error setting Ctrl-C handler"); - - println!("Press Ctrl-C to continue..."); - rx.recv().expect("Could not receive from channel."); - println!("Got Ctrl-C, Doing shutdown and existing."); - - // MeterProvider is configured with an OTLP Exporter to export metrics every 1 second, - // however shutting down the MeterProvider here instantly flushes - // the metrics, instead of waiting for the 1 sec interval. - meter_provider.shutdown()?; - let _ = logger_provider.shutdown(); - Ok(()) -} diff --git a/examples/tracing-grpc/Cargo.toml b/examples/tracing-grpc/Cargo.toml index c4ba3e1105..c836904a37 100644 --- a/examples/tracing-grpc/Cargo.toml +++ b/examples/tracing-grpc/Cargo.toml @@ -20,7 +20,6 @@ opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["trace prost = { workspace = true } tokio = { workspace = true, features = ["full"] } tonic = { workspace = true } -serde_json = { workspace = true } [build-dependencies] tonic-build = { workspace = true } diff --git a/examples/tracing-grpc/src/client.rs b/examples/tracing-grpc/src/client.rs index 0f24e23710..c871e9ca8d 100644 --- a/examples/tracing-grpc/src/client.rs +++ b/examples/tracing-grpc/src/client.rs @@ -1,9 +1,7 @@ use hello_world::greeter_client::GreeterClient; use hello_world::HelloRequest; use opentelemetry::{global, propagation::Injector}; -use opentelemetry_sdk::{ - propagation::TraceContextPropagator, runtime::Tokio, trace::TracerProvider, -}; +use opentelemetry_sdk::{propagation::TraceContextPropagator, runtime::Tokio, trace as sdktrace}; use opentelemetry_stdout::SpanExporter; use opentelemetry::{ @@ -11,14 +9,15 @@ use opentelemetry::{ Context, KeyValue, }; -fn init_tracer() { +fn init_tracer() -> sdktrace::TracerProvider { global::set_text_map_propagator(TraceContextPropagator::new()); // Install stdout exporter pipeline to be able to retrieve the collected spans. - let provider = TracerProvider::builder() + let provider = sdktrace::TracerProvider::builder() .with_batch_exporter(SpanExporter::default(), Tokio) .build(); - global::set_tracer_provider(provider); + global::set_tracer_provider(provider.clone()); + provider } struct MetadataMap<'a>(&'a mut tonic::metadata::MetadataMap); @@ -75,9 +74,10 @@ async fn greet() -> Result<(), Box Result<(), Box> { - init_tracer(); + let provider = init_tracer(); greet().await?; - opentelemetry::global::shutdown_tracer_provider(); + + provider.shutdown()?; Ok(()) } diff --git a/examples/tracing-grpc/src/server.rs b/examples/tracing-grpc/src/server.rs index 3831907cf0..aadb77b6e6 100644 --- a/examples/tracing-grpc/src/server.rs +++ b/examples/tracing-grpc/src/server.rs @@ -11,14 +11,15 @@ use opentelemetry_sdk::{ use opentelemetry_stdout::SpanExporter; use tonic::{transport::Server, Request, Response, Status}; -fn init_tracer() { +fn init_tracer() -> TracerProvider { global::set_text_map_propagator(TraceContextPropagator::new()); // Install stdout exporter pipeline to be able to retrieve the collected spans. let provider = TracerProvider::builder() .with_batch_exporter(SpanExporter::default(), Tokio) .build(); - global::set_tracer_provider(provider); + global::set_tracer_provider(provider.clone()); + provider } #[allow(clippy::derive_partial_eq_without_eq)] // tonic don't derive Eq for generated types. We shouldn't manually change it. @@ -82,7 +83,7 @@ impl Greeter for MyGreeter { #[tokio::main] async fn main() -> Result<(), Box> { - init_tracer(); + let provider = init_tracer(); let addr = "[::1]:50051".parse()?; let greeter = MyGreeter::default(); @@ -92,7 +93,7 @@ async fn main() -> Result<(), Box .serve(addr) .await?; - opentelemetry::global::shutdown_tracer_provider(); + provider.shutdown()?; Ok(()) } diff --git a/examples/tracing-jaeger/Cargo.toml b/examples/tracing-jaeger/Cargo.toml index 809365c66b..6257f0ebf0 100644 --- a/examples/tracing-jaeger/Cargo.toml +++ b/examples/tracing-jaeger/Cargo.toml @@ -9,5 +9,4 @@ publish = false opentelemetry = { path = "../../opentelemetry" } opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio"] } opentelemetry-otlp = { path = "../../opentelemetry-otlp", features = ["tonic"] } -opentelemetry-semantic-conventions = { path = "../../opentelemetry-semantic-conventions" } tokio = { workspace = true, features = ["full"] } diff --git a/examples/tracing-jaeger/src/main.rs b/examples/tracing-jaeger/src/main.rs index e6c3dfdb2b..e015f9ab9f 100644 --- a/examples/tracing-jaeger/src/main.rs +++ b/examples/tracing-jaeger/src/main.rs @@ -1,30 +1,26 @@ -use opentelemetry::global::shutdown_tracer_provider; use opentelemetry::{ global, trace::{TraceContextExt, TraceError, Tracer}, KeyValue, }; -use opentelemetry_otlp::WithExportConfig; -use opentelemetry_sdk::{runtime, trace as sdktrace, Resource}; -use opentelemetry_semantic_conventions::resource::SERVICE_NAME; +use opentelemetry_sdk::trace::TracerProvider; +use opentelemetry_sdk::{runtime, Resource}; use std::error::Error; fn init_tracer_provider() -> Result { - opentelemetry_otlp::new_pipeline() - .tracing() - .with_exporter( - opentelemetry_otlp::new_exporter() - .tonic() - .with_endpoint("http://localhost:4317"), - ) - .with_trace_config( - sdktrace::Config::default().with_resource(Resource::new(vec![KeyValue::new( - SERVICE_NAME, - "tracing-jaeger", - )])), + let exporter = opentelemetry_otlp::SpanExporter::builder() + .with_tonic() + .build()?; + + Ok(TracerProvider::builder() + .with_batch_exporter(exporter, runtime::Tokio) + .with_resource( + Resource::builder() + .with_service_name("tracing-jaeger") + .build(), ) - .install_batch(runtime::Tokio) + .build()) } #[tokio::main] @@ -46,6 +42,7 @@ async fn main() -> Result<(), Box> { }); }); - shutdown_tracer_provider(); + tracer_provider.shutdown()?; + Ok(()) } diff --git a/opentelemetry-appender-log/CHANGELOG.md b/opentelemetry-appender-log/CHANGELOG.md index e5eb014050..ab8c5aebcc 100644 --- a/opentelemetry-appender-log/CHANGELOG.md +++ b/opentelemetry-appender-log/CHANGELOG.md @@ -2,6 +2,23 @@ ## vNext +- Bump msrv to 1.75.0. + + +## 0.27.0 + +Released 2024-Nov-11 + +- Update `opentelemetry` dependency version to 0.27 + +- Bump MSRV to 1.70 [#2179](https://github.com/open-telemetry/opentelemetry-rust/pull/2179) +- [2193](https://github.com/open-telemetry/opentelemetry-rust/pull/2193) `opentelemetry-appender-log`: Output experimental code attributes +- **Breaking** [2291](https://github.com/open-telemetry/opentelemetry-rust/pull/2291) Rename `logs_level_enabled flag` to `spec_unstable_logs_enabled`. Please enable this updated flag if the feature is needed. This flag will be removed once the feature is stabilized in the specifications. + +## v0.26.0 +Released 2024-Sep-30 +- Update `opentelemetry` dependency version to 0.26 + ## v0.25.0 - Update `opentelemetry` dependency version to 0.25 diff --git a/opentelemetry-appender-log/Cargo.toml b/opentelemetry-appender-log/Cargo.toml index 6cfb4c1d66..2cc1e0aa03 100644 --- a/opentelemetry-appender-log/Cargo.toml +++ b/opentelemetry-appender-log/Cargo.toml @@ -1,27 +1,36 @@ [package] name = "opentelemetry-appender-log" -version = "0.25.0" +version = "0.27.0" description = "An OpenTelemetry appender for the log crate" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-log" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-log" readme = "README.md" keywords = ["opentelemetry", "log", "logs"] license = "Apache-2.0" -rust-version = "1.65" +rust-version = "1.75.0" edition = "2021" [dependencies] -opentelemetry = { version = "0.25", path = "../opentelemetry", features = ["logs"]} -log = { workspace = true, features = ["kv", "std"]} +opentelemetry = { version = "0.27", path = "../opentelemetry", features = [ + "logs", +] } +log = { workspace = true, features = ["kv", "std"] } serde = { workspace = true, optional = true, features = ["std"] } +opentelemetry-semantic-conventions = { version = "0.27", path = "../opentelemetry-semantic-conventions", optional = true, features = [ + "semconv_experimental", +] } [features] -logs_level_enabled = ["opentelemetry/logs_level_enabled"] +spec_unstable_logs_enabled = ["opentelemetry/spec_unstable_logs_enabled"] with-serde = ["log/kv_serde", "serde"] +experimental_metadata_attributes = ["dep:opentelemetry-semantic-conventions"] [dev-dependencies] -opentelemetry_sdk = { path = "../opentelemetry-sdk", features = [ "testing", "logs_level_enabled" ] } -opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["logs"]} +opentelemetry_sdk = { path = "../opentelemetry-sdk", features = [ + "testing", + "spec_unstable_logs_enabled", +] } +opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["logs"] } log = { workspace = true, features = ["kv_serde"] } tokio = { workspace = true } serde = { workspace = true, features = ["std", "derive"] } diff --git a/opentelemetry-appender-log/examples/logs-basic.rs b/opentelemetry-appender-log/examples/logs-basic.rs index dc5bacc813..e1faf255b7 100644 --- a/opentelemetry-appender-log/examples/logs-basic.rs +++ b/opentelemetry-appender-log/examples/logs-basic.rs @@ -7,7 +7,6 @@ use log::{error, info, warn, Level}; use opentelemetry_appender_log::OpenTelemetryLogBridge; use opentelemetry_sdk::logs::{BatchLogProcessor, LoggerProvider}; -use opentelemetry_sdk::runtime; use opentelemetry_stdout::LogExporter; #[tokio::main] @@ -16,7 +15,7 @@ async fn main() { let exporter = LogExporter::default(); //Create a LoggerProvider and register the exporter let logger_provider = LoggerProvider::builder() - .with_log_processor(BatchLogProcessor::builder(exporter, runtime::Tokio).build()) + .with_log_processor(BatchLogProcessor::builder(exporter).build()) .build(); // Setup Log Appender for the log crate. diff --git a/opentelemetry-appender-log/src/lib.rs b/opentelemetry-appender-log/src/lib.rs index 7aff449410..2cc8b1e0fe 100644 --- a/opentelemetry-appender-log/src/lib.rs +++ b/opentelemetry-appender-log/src/lib.rs @@ -2,6 +2,8 @@ //! //! This library implements a log appender for the [`log`] crate using the [Logs Bridge API]. //! +//! *[Supported Rust Versions](#supported-rust-versions)* +//! //! # Getting Started //! //! The bridge requires configuration on both the `log` and OpenTelemetry sides. @@ -89,17 +91,32 @@ //! //! This library provides the following Cargo features: //! -//! - `logs_level_enabled`: Allow users to control the log level. +//! - `spec_unstable_logs_enabled`: Allow users to control the log level. //! - `with-serde`: Support complex values as attributes without stringifying them. //! //! [Logs Bridge API]: https://opentelemetry.io/docs/specs/otel/logs/bridge-api/ +//! +//! ## Supported Rust Versions +//! +//! OpenTelemetry is built against the latest stable release. The minimum +//! supported version is 1.70. The current OpenTelemetry version is not +//! guaranteed to build on Rust versions earlier than the minimum supported +//! version. +//! +//! The current stable Rust compiler and the three most recent minor versions +//! before it will always be supported. For example, if the current stable +//! compiler version is 1.49, the minimum supported version will not be +//! increased past 1.46, three minor versions prior. Increasing the minimum +//! supported compiler version is not considered a semver breaking change as +//! long as doing so complies with this policy. use log::{Level, Metadata, Record}; use opentelemetry::{ logs::{AnyValue, LogRecord, Logger, LoggerProvider, Severity}, - Key, + InstrumentationScope, Key, }; -use std::borrow::Cow; +#[cfg(feature = "experimental_metadata_attributes")] +use opentelemetry_semantic_conventions::attribute::{CODE_FILEPATH, CODE_LINENO, CODE_NAMESPACE}; pub struct OpenTelemetryLogBridge where @@ -116,11 +133,11 @@ where L: Logger + Send + Sync, { fn enabled(&self, _metadata: &Metadata) -> bool { - #[cfg(feature = "logs_level_enabled")] + #[cfg(feature = "spec_unstable_logs_enabled")] return self .logger .event_enabled(severity_of_level(_metadata.level()), _metadata.target()); - #[cfg(not(feature = "logs_level_enabled"))] + #[cfg(not(feature = "spec_unstable_logs_enabled"))] true } @@ -130,6 +147,28 @@ where log_record.set_severity_number(severity_of_level(record.level())); log_record.set_severity_text(record.level().as_str()); log_record.set_body(AnyValue::from(record.args().to_string())); + + #[cfg(feature = "experimental_metadata_attributes")] + { + if let Some(filepath) = record.file() { + log_record.add_attribute( + Key::new(CODE_FILEPATH), + AnyValue::from(filepath.to_string()), + ); + } + + if let Some(line_no) = record.line() { + log_record.add_attribute(Key::new(CODE_LINENO), AnyValue::from(line_no)); + } + + if let Some(module) = record.module_path() { + log_record.add_attribute( + Key::new(CODE_NAMESPACE), + AnyValue::from(module.to_string()), + ); + } + } + log_record.add_attributes(log_attributes(record.key_values())); log_record.set_target(record.metadata().target().to_string()); @@ -146,11 +185,12 @@ where L: Logger + Send + Sync, { pub fn new(provider: &P) -> Self { + let scope = InstrumentationScope::builder("opentelemetry-log-appender") + .with_version(env!("CARGO_PKG_VERSION")) + .build(); + OpenTelemetryLogBridge { - logger: provider - .logger_builder("opentelemetry-log-appender") - .with_version(Cow::Borrowed(env!("CARGO_PKG_VERSION"))) - .build(), + logger: provider.logger_with_scope(scope), _phantom: Default::default(), } } @@ -729,13 +769,13 @@ mod tests { use super::OpenTelemetryLogBridge; use opentelemetry::{logs::AnyValue, StringValue}; - use opentelemetry_sdk::{logs::LoggerProvider, testing::logs::InMemoryLogsExporter}; + use opentelemetry_sdk::{logs::LoggerProvider, testing::logs::InMemoryLogExporter}; use log::Log; #[test] fn logbridge_with_default_metadata_is_enabled() { - let exporter = InMemoryLogsExporter::default(); + let exporter = InMemoryLogExporter::default(); let logger_provider = LoggerProvider::builder() .with_simple_exporter(exporter) @@ -746,15 +786,15 @@ mod tests { // As a result of using `with_simple_exporter` while building the logger provider, // the processor used is a `SimpleLogProcessor` which has an implementation of `event_enabled` // that always returns true. - #[cfg(feature = "logs_level_enabled")] + #[cfg(feature = "spec_unstable_logs_enabled")] assert!(otel_log_appender.enabled(&log::Metadata::builder().build())); - #[cfg(not(feature = "logs_level_enabled"))] + #[cfg(not(feature = "spec_unstable_logs_enabled"))] assert!(otel_log_appender.enabled(&log::Metadata::builder().build())); } #[test] fn logbridge_with_record_can_log() { - let exporter = InMemoryLogsExporter::default(); + let exporter = InMemoryLogExporter::default(); let logger_provider = LoggerProvider::builder() .with_simple_exporter(exporter.clone()) @@ -806,11 +846,11 @@ mod tests { assert_eq!(logs.len(), 5); for log in logs { - let body: String = match log.record.body.as_ref().unwrap() { + let body: String = match log.record.body().unwrap() { super::AnyValue::String(s) => s.to_string(), _ => panic!("AnyValue::String expected"), }; - assert_eq!(body, log.record.severity_text.unwrap()); + assert_eq!(body, log.record.severity_text().unwrap()); } } @@ -868,7 +908,7 @@ mod tests { } } - let exporter = InMemoryLogsExporter::default(); + let exporter = InMemoryLogExporter::default(); let logger_provider = LoggerProvider::builder() .with_simple_exporter(exporter.clone()) @@ -1127,9 +1167,57 @@ mod tests { } } + #[cfg(feature = "experimental_metadata_attributes")] + #[test] + fn logbridge_code_attributes() { + use opentelemetry_semantic_conventions::attribute::{ + CODE_FILEPATH, CODE_LINENO, CODE_NAMESPACE, + }; + + let exporter = InMemoryLogExporter::default(); + + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + let otel_log_appender = OpenTelemetryLogBridge::new(&logger_provider); + + otel_log_appender.log( + &log::RecordBuilder::new() + .level(log::Level::Warn) + .args(format_args!("WARN")) + .file(Some("src/main.rs")) + .module_path(Some("service")) + .line(Some(101)) + .build(), + ); + + let logs = exporter.get_emitted_logs().unwrap(); + + let get = |needle: &str| -> Option { + logs[0].record.attributes_iter().find_map(|(k, v)| { + if k.as_str() == needle { + Some(v.clone()) + } else { + None + } + }) + }; + + assert_eq!( + Some(AnyValue::String(StringValue::from("src/main.rs"))), + get(CODE_FILEPATH) + ); + assert_eq!( + Some(AnyValue::String(StringValue::from("service"))), + get(CODE_NAMESPACE) + ); + assert_eq!(Some(AnyValue::Int(101)), get(CODE_LINENO)); + } + #[test] fn test_flush() { - let exporter = InMemoryLogsExporter::default(); + let exporter = InMemoryLogExporter::default(); let logger_provider = LoggerProvider::builder() .with_simple_exporter(exporter) diff --git a/opentelemetry-appender-tracing/CHANGELOG.md b/opentelemetry-appender-tracing/CHANGELOG.md index 188bcae044..9fac13a4b2 100644 --- a/opentelemetry-appender-tracing/CHANGELOG.md +++ b/opentelemetry-appender-tracing/CHANGELOG.md @@ -2,8 +2,23 @@ ## vNext -- [2101](https://github.com/open-telemetry/opentelemetry-rust/pull/2101) The `log` events emitted via the `tracing` pipeline using the `log-tracing` crate no longer include the target metadata as attributes. Exporters or backends that rely on this attribute should now access the target directly from the `LogRecord::target` field. +- Bump msrv to 1.75.0. + + +## 0.27.0 + +Released 2024-Nov-11 +- Update `opentelemetry` dependency version to 0.27 + +- Bump MSRV to 1.70 [#2179](https://github.com/open-telemetry/opentelemetry-rust/pull/2179) +- **Breaking** [2291](https://github.com/open-telemetry/opentelemetry-rust/pull/2291) Rename `logs_level_enabled flag` to `spec_unstable_logs_enabled`. Please enable this updated flag if the feature is needed. This flag will be removed once the feature is stabilized in the specifications. + +## v0.26.0 +Released 2024-Sep-30 + +- Update `opentelemetry` dependency version to 0.26 +- [2101](https://github.com/open-telemetry/opentelemetry-rust/pull/2101) The `log` events emitted via the `tracing` pipeline using the `log-tracing` crate no longer include the target metadata as attributes. Exporters or backends that rely on this attribute should now access the target directly from the `LogRecord::target` field. ## v0.25.0 diff --git a/opentelemetry-appender-tracing/Cargo.toml b/opentelemetry-appender-tracing/Cargo.toml index 322e451f32..e3c682b77d 100644 --- a/opentelemetry-appender-tracing/Cargo.toml +++ b/opentelemetry-appender-tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-appender-tracing" -version = "0.25.0" +version = "0.27.0" edition = "2021" description = "An OpenTelemetry log appender for the tracing crate" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-appender-tracing" @@ -8,11 +8,11 @@ repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/ope readme = "README.md" keywords = ["opentelemetry", "log", "logs", "tracing"] license = "Apache-2.0" -rust-version = "1.65" +rust-version = "1.75.0" [dependencies] log = { workspace = true, optional = true } -opentelemetry = { version = "0.25", path = "../opentelemetry", features = ["logs"] } +opentelemetry = { version = "0.27", path = "../opentelemetry", features = ["logs"] } tracing = { workspace = true, features = ["std"]} tracing-core = { workspace = true } tracing-log = { version = "0.2", optional = true } @@ -22,19 +22,21 @@ tracing-subscriber = { workspace = true, features = ["registry", "std"] } log = { workspace = true } opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["logs"] } opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["logs", "testing"] } +tracing-subscriber = { workspace = true, features = ["registry", "std", "env-filter"] } tracing-log = "0.2" async-trait = { workspace = true } criterion = { workspace = true } +tokio = { workspace = true, features = ["full"]} [target.'cfg(not(target_os = "windows"))'.dev-dependencies] pprof = { version = "0.13", features = ["flamegraph", "criterion"] } [features] experimental_metadata_attributes = ["dep:tracing-log"] -logs_level_enabled = ["opentelemetry/logs_level_enabled"] +spec_unstable_logs_enabled = ["opentelemetry/spec_unstable_logs_enabled"] [[bench]] name = "logs" harness = false -required-features = ["logs_level_enabled"] +required-features = ["spec_unstable_logs_enabled"] diff --git a/opentelemetry-appender-tracing/README.md b/opentelemetry-appender-tracing/README.md index b1764d58c3..7f4a35b214 100644 --- a/opentelemetry-appender-tracing/README.md +++ b/opentelemetry-appender-tracing/README.md @@ -32,3 +32,18 @@ management, and export of telemetry. A major goal of OpenTelemetry is that you can easily instrument your applications or systems, no matter their language, infrastructure, or runtime environment. Crucially, the storage and visualization of telemetry is intentionally left to other tools. + +*[Supported Rust Versions](#supported-rust-versions)* + +## Supported Rust Versions + +OpenTelemetry is built against the latest stable release. The minimum supported +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build +on Rust versions earlier than the minimum supported version. + +The current stable Rust compiler and the three most recent minor versions +before it will always be supported. For example, if the current stable compiler +version is 1.49, the minimum supported version will not be increased past 1.46, +three minor versions prior. Increasing the minimum supported compiler version +is not considered a semver breaking change as long as doing so complies with +this policy. diff --git a/opentelemetry-appender-tracing/benches/logs.rs b/opentelemetry-appender-tracing/benches/logs.rs index a5ebb83249..1c60bd82d7 100644 --- a/opentelemetry-appender-tracing/benches/logs.rs +++ b/opentelemetry-appender-tracing/benches/logs.rs @@ -15,10 +15,10 @@ use async_trait::async_trait; use criterion::{criterion_group, criterion_main, Criterion}; -use opentelemetry::logs::LogResult; -use opentelemetry::{InstrumentationLibrary, KeyValue}; +use opentelemetry::InstrumentationScope; use opentelemetry_appender_tracing::layer as tracing_layer; use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; +use opentelemetry_sdk::logs::LogResult; use opentelemetry_sdk::logs::{LogProcessor, LogRecord, LoggerProvider}; use opentelemetry_sdk::Resource; use pprof::criterion::{Output, PProfProfiler}; @@ -34,7 +34,7 @@ struct NoopExporter { #[async_trait] impl LogExporter for NoopExporter { - async fn export(&mut self, _: LogBatch<'_>) -> LogResult<()> { + async fn export(&self, _: LogBatch<'_>) -> LogResult<()> { LogResult::Ok(()) } @@ -55,7 +55,7 @@ impl NoopProcessor { } impl LogProcessor for NoopProcessor { - fn emit(&self, _: &mut LogRecord, _: &InstrumentationLibrary) { + fn emit(&self, _: &mut LogRecord, _: &InstrumentationScope) { // no-op } @@ -126,10 +126,11 @@ fn benchmark_with_ot_layer(c: &mut Criterion, enabled: bool, bench_name: &str) { let exporter = NoopExporter { enabled }; let processor = NoopProcessor::new(Box::new(exporter)); let provider = LoggerProvider::builder() - .with_resource(Resource::new(vec![KeyValue::new( - "service.name", - "benchmark", - )])) + .with_resource( + Resource::builder_empty() + .with_service_name("benchmark") + .build(), + ) .with_log_processor(processor) .build(); let ot_layer = tracing_layer::OpenTelemetryTracingBridge::new(&provider); diff --git a/opentelemetry-appender-tracing/examples/basic.rs b/opentelemetry-appender-tracing/examples/basic.rs index 0f36d8a930..c4fc9a3fab 100644 --- a/opentelemetry-appender-tracing/examples/basic.rs +++ b/opentelemetry-appender-tracing/examples/basic.rs @@ -1,6 +1,5 @@ //! run with `$ cargo run --example basic -use opentelemetry::KeyValue; use opentelemetry_appender_tracing::layer; use opentelemetry_sdk::{logs::LoggerProvider, Resource}; use tracing::error; @@ -9,10 +8,11 @@ use tracing_subscriber::prelude::*; fn main() { let exporter = opentelemetry_stdout::LogExporter::default(); let provider: LoggerProvider = LoggerProvider::builder() - .with_resource(Resource::new(vec![KeyValue::new( - "service.name", - "log-appender-tracing-example", - )])) + .with_resource( + Resource::builder() + .with_service_name("log-appender-tracing-example") + .build(), + ) .with_simple_exporter(exporter) .build(); let layer = layer::OpenTelemetryTracingBridge::new(&provider); diff --git a/opentelemetry-appender-tracing/src/layer.rs b/opentelemetry-appender-tracing/src/layer.rs index 8148f75069..a8354822e1 100644 --- a/opentelemetry-appender-tracing/src/layer.rs +++ b/opentelemetry-appender-tracing/src/layer.rs @@ -1,6 +1,6 @@ use opentelemetry::{ logs::{AnyValue, LogRecord, Logger, LoggerProvider, Severity}, - Key, + InstrumentationScope, Key, }; use std::borrow::Cow; use tracing_core::Level; @@ -69,7 +69,7 @@ impl<'a, LR: LogRecord> EventVisitor<'a, LR> { } } -impl<'a, LR: LogRecord> tracing::field::Visit for EventVisitor<'a, LR> { +impl tracing::field::Visit for EventVisitor<'_, LR> { fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { #[cfg(feature = "experimental_metadata_attributes")] if is_duplicated_metadata(field.name()) { @@ -136,11 +136,12 @@ where L: Logger + Send + Sync, { pub fn new(provider: &P) -> Self { + let scope = InstrumentationScope::builder(INSTRUMENTATION_LIBRARY_NAME) + .with_version(Cow::Borrowed(env!("CARGO_PKG_VERSION"))) + .build(); + OpenTelemetryTracingBridge { - logger: provider - .logger_builder(INSTRUMENTATION_LIBRARY_NAME) - .with_version(Cow::Borrowed(env!("CARGO_PKG_VERSION"))) - .build(), + logger: provider.logger_with_scope(scope), _phantom: Default::default(), } } @@ -183,7 +184,7 @@ where self.logger.emit(log_record); } - #[cfg(feature = "logs_level_enabled")] + #[cfg(feature = "spec_unstable_logs_enabled")] fn event_enabled( &self, _event: &tracing_core::Event<'_>, @@ -208,16 +209,19 @@ const fn severity_of_level(level: &Level) -> Severity { #[cfg(test)] mod tests { use crate::layer; + use async_trait::async_trait; use opentelemetry::logs::Severity; use opentelemetry::trace::TracerProvider as _; use opentelemetry::trace::{TraceContextExt, TraceFlags, Tracer}; use opentelemetry::{logs::AnyValue, Key}; - use opentelemetry_sdk::logs::{LogRecord, LoggerProvider}; - use opentelemetry_sdk::testing::logs::InMemoryLogsExporter; - use opentelemetry_sdk::trace; + use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; + use opentelemetry_sdk::logs::{LogRecord, LogResult, LoggerProvider}; + use opentelemetry_sdk::testing::logs::InMemoryLogExporter; use opentelemetry_sdk::trace::{Sampler, TracerProvider}; - use tracing::error; - use tracing_subscriber::layer::SubscriberExt; + use tracing::{error, warn}; + use tracing_subscriber::prelude::__tracing_subscriber_SubscriberExt; + use tracing_subscriber::util::SubscriberInitExt; + use tracing_subscriber::{EnvFilter, Layer}; pub fn attributes_contains(log_record: &LogRecord, key: &Key, value: &AnyValue) -> bool { log_record @@ -225,17 +229,91 @@ mod tests { .any(|(k, v)| k == key && v == value) } + fn create_tracing_subscriber( + _exporter: InMemoryLogExporter, + logger_provider: &LoggerProvider, + ) -> impl tracing::Subscriber { + let level_filter = tracing_subscriber::filter::LevelFilter::WARN; // Capture WARN and ERROR levels + let layer = + layer::OpenTelemetryTracingBridge::new(logger_provider).with_filter(level_filter); // No filter based on target, only based on log level + + tracing_subscriber::registry().with(layer) + } + // cargo test --features=testing + + #[derive(Clone, Debug, Default)] + struct ReentrantLogExporter; + + #[async_trait] + impl LogExporter for ReentrantLogExporter { + async fn export(&self, _batch: LogBatch<'_>) -> LogResult<()> { + // This will cause a deadlock as the export itself creates a log + // while still within the lock of the SimpleLogProcessor. + warn!(name: "my-event-name", target: "reentrant", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); + Ok(()) + } + } + + #[test] + #[ignore = "See issue: https://github.com/open-telemetry/opentelemetry-rust/issues/1745"] + fn simple_processor_deadlock() { + let exporter: ReentrantLogExporter = ReentrantLogExporter; + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + + // Setting subscriber as global as that is the only way to test this scenario. + tracing_subscriber::registry().with(layer).init(); + warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); + } + + #[test] + #[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."] + fn simple_processor_no_deadlock() { + let exporter: ReentrantLogExporter = ReentrantLogExporter; + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + + // This filter will prevent the deadlock as the reentrant log will be + // ignored. + let filter = EnvFilter::new("debug").add_directive("reentrant=error".parse().unwrap()); + // Setting subscriber as global as that is the only way to test this scenario. + tracing_subscriber::registry() + .with(filter) + .with(layer) + .init(); + warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + #[ignore = "While this test runs fine, this uses global subscriber and does not play well with other tests."] + async fn batch_processor_no_deadlock() { + let exporter: ReentrantLogExporter = ReentrantLogExporter; + let logger_provider = LoggerProvider::builder() + .with_batch_exporter(exporter.clone()) + .build(); + + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + + tracing_subscriber::registry().with(layer).init(); + warn!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); + } + #[test] fn tracing_appender_standalone() { // Arrange - let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); let logger_provider = LoggerProvider::builder() .with_simple_exporter(exporter.clone()) .build(); - let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); - let subscriber = tracing_subscriber::registry().with(layer); + let subscriber = create_tracing_subscriber(exporter.clone(), &logger_provider); // avoiding setting tracing subscriber as global as that does not // play well with unit tests. @@ -255,11 +333,11 @@ mod tests { .expect("Atleast one log is expected to be present."); // Validate common fields - assert_eq!(log.instrumentation.name, "opentelemetry-appender-tracing"); - assert_eq!(log.record.severity_number, Some(Severity::Error)); + assert_eq!(log.instrumentation.name(), "opentelemetry-appender-tracing"); + assert_eq!(log.record.severity_number(), Some(Severity::Error)); // Validate trace context is none. - assert!(log.record.trace_context.is_none()); + assert!(log.record.trace_context().is_none()); // Validate attributes #[cfg(not(feature = "experimental_metadata_attributes"))] @@ -310,13 +388,12 @@ mod tests { #[test] fn tracing_appender_inside_tracing_context() { // Arrange - let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); let logger_provider = LoggerProvider::builder() .with_simple_exporter(exporter.clone()) .build(); - let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); - let subscriber = tracing_subscriber::registry().with(layer); + let subscriber = create_tracing_subscriber(exporter.clone(), &logger_provider); // avoiding setting tracing subscriber as global as that does not // play well with unit tests. @@ -324,7 +401,7 @@ mod tests { // setup tracing as well. let tracer_provider = TracerProvider::builder() - .with_config(trace::Config::default().with_sampler(Sampler::AlwaysOn)) + .with_sampler(Sampler::AlwaysOn) .build(); let tracer = tracer_provider.tracer("test-tracer"); @@ -350,26 +427,21 @@ mod tests { .expect("Atleast one log is expected to be present."); // validate common fields. - assert_eq!(log.instrumentation.name, "opentelemetry-appender-tracing"); - assert_eq!(log.record.severity_number, Some(Severity::Error)); + assert_eq!(log.instrumentation.name(), "opentelemetry-appender-tracing"); + assert_eq!(log.record.severity_number(), Some(Severity::Error)); // validate trace context. - assert!(log.record.trace_context.is_some()); + assert!(log.record.trace_context().is_some()); assert_eq!( - log.record.trace_context.as_ref().unwrap().trace_id, + log.record.trace_context().unwrap().trace_id, trace_id_expected ); assert_eq!( - log.record.trace_context.as_ref().unwrap().span_id, + log.record.trace_context().unwrap().span_id, span_id_expected ); assert_eq!( - log.record - .trace_context - .as_ref() - .unwrap() - .trace_flags - .unwrap(), + log.record.trace_context().unwrap().trace_flags.unwrap(), TraceFlags::SAMPLED ); @@ -422,13 +494,12 @@ mod tests { #[test] fn tracing_appender_standalone_with_tracing_log() { // Arrange - let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); let logger_provider = LoggerProvider::builder() .with_simple_exporter(exporter.clone()) .build(); - let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); - let subscriber = tracing_subscriber::registry().with(layer); + let subscriber = create_tracing_subscriber(exporter.clone(), &logger_provider); // avoiding setting tracing subscriber as global as that does not // play well with unit tests. @@ -436,7 +507,7 @@ mod tests { drop(tracing_log::LogTracer::init()); // Act - log::error!("log from log crate"); + log::error!(target: "my-system", "log from log crate"); logger_provider.force_flush(); // Assert TODO: move to helper methods @@ -449,11 +520,11 @@ mod tests { .expect("Atleast one log is expected to be present."); // Validate common fields - assert_eq!(log.instrumentation.name, "opentelemetry-appender-tracing"); - assert_eq!(log.record.severity_number, Some(Severity::Error)); + assert_eq!(log.instrumentation.name(), "opentelemetry-appender-tracing"); + assert_eq!(log.record.severity_number(), Some(Severity::Error)); // Validate trace context is none. - assert!(log.record.trace_context.is_none()); + assert!(log.record.trace_context().is_none()); // Attributes can be polluted when we don't use this feature. #[cfg(feature = "experimental_metadata_attributes")] @@ -488,13 +559,12 @@ mod tests { #[test] fn tracing_appender_inside_tracing_context_with_tracing_log() { // Arrange - let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); let logger_provider = LoggerProvider::builder() .with_simple_exporter(exporter.clone()) .build(); - let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); - let subscriber = tracing_subscriber::registry().with(layer); + let subscriber = create_tracing_subscriber(exporter.clone(), &logger_provider); // avoiding setting tracing subscriber as global as that does not // play well with unit tests. @@ -503,7 +573,7 @@ mod tests { // setup tracing as well. let tracer_provider = TracerProvider::builder() - .with_config(trace::Config::default().with_sampler(Sampler::AlwaysOn)) + .with_sampler(Sampler::AlwaysOn) .build(); let tracer = tracer_provider.tracer("test-tracer"); @@ -513,7 +583,7 @@ mod tests { let span_id = cx.span().span_context().span_id(); // logging is done inside span context. - log::error!("log from log crate"); + log::error!(target: "my-system", "log from log crate"); (trace_id, span_id) }); @@ -529,26 +599,21 @@ mod tests { .expect("Atleast one log is expected to be present."); // validate common fields. - assert_eq!(log.instrumentation.name, "opentelemetry-appender-tracing"); - assert_eq!(log.record.severity_number, Some(Severity::Error)); + assert_eq!(log.instrumentation.name(), "opentelemetry-appender-tracing"); + assert_eq!(log.record.severity_number(), Some(Severity::Error)); // validate trace context. - assert!(log.record.trace_context.is_some()); + assert!(log.record.trace_context().is_some()); assert_eq!( - log.record.trace_context.as_ref().unwrap().trace_id, + log.record.trace_context().unwrap().trace_id, trace_id_expected ); assert_eq!( - log.record.trace_context.as_ref().unwrap().span_id, + log.record.trace_context().unwrap().span_id, span_id_expected ); assert_eq!( - log.record - .trace_context - .as_ref() - .unwrap() - .trace_flags - .unwrap(), + log.record.trace_context().unwrap().trace_flags.unwrap(), TraceFlags::SAMPLED ); diff --git a/opentelemetry-http/CHANGELOG.md b/opentelemetry-http/CHANGELOG.md index 26101764c8..18f06ca63d 100644 --- a/opentelemetry-http/CHANGELOG.md +++ b/opentelemetry-http/CHANGELOG.md @@ -2,6 +2,22 @@ ## vNext +- Bump msrv to 1.75.0. +- Add "internal-logs" feature flag (enabled by default), and emit internal logs. + +## 0.27.0 + +Released 2024-Nov-08 + +- Update `opentelemetry` dependency version to 0.27 + +- Bump MSRV to 1.70 [#2179](https://github.com/open-telemetry/opentelemetry-rust/pull/2179) + +## v0.26.0 +Released 2024-Sep-30 + +- Update `opentelemetry` dependency version to 0.26 + ## v0.25.0 - Update `opentelemetry` dependency version to 0.25 diff --git a/opentelemetry-http/Cargo.toml b/opentelemetry-http/Cargo.toml index dbde725bb2..fcc8492641 100644 --- a/opentelemetry-http/Cargo.toml +++ b/opentelemetry-http/Cargo.toml @@ -1,18 +1,20 @@ [package] name = "opentelemetry-http" -version = "0.25.0" +version = "0.27.0" description = "Helper implementations for sending HTTP requests. Uses include propagating and extracting context over http, exporting telemetry, requesting sampling strategies." homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" keywords = ["opentelemetry", "tracing", "context", "propagation"] license = "Apache-2.0" edition = "2021" -rust-version = "1.65" +rust-version = "1.75.0" [features] +default = ["internal-logs"] hyper = ["dep:http-body-util", "dep:hyper", "dep:hyper-util", "dep:tokio"] reqwest-rustls = ["reqwest", "reqwest/rustls-tls-native-roots"] reqwest-rustls-webpki-roots = ["reqwest", "reqwest/rustls-tls-webpki-roots"] +internal-logs = ["tracing", "opentelemetry/internal-logs"] [dependencies] async-trait = { workspace = true } @@ -20,7 +22,8 @@ bytes = { workspace = true } http = { workspace = true } http-body-util = { workspace = true, optional = true } hyper = { workspace = true, optional = true } -hyper-util = { workspace = true, features = ["client-legacy", "http2"], optional = true } -opentelemetry = { version = "0.25", path = "../opentelemetry", features = ["trace"] } +hyper-util = { workspace = true, features = ["client-legacy", "http1", "http2"], optional = true } +opentelemetry = { version = "0.27", path = "../opentelemetry", features = ["trace"] } reqwest = { workspace = true, features = ["blocking"], optional = true } tokio = { workspace = true, features = ["time"], optional = true } +tracing = {workspace = true, optional = true} \ No newline at end of file diff --git a/opentelemetry-http/README.md b/opentelemetry-http/README.md index 7ab8fe98bb..58f82210ab 100644 --- a/opentelemetry-http/README.md +++ b/opentelemetry-http/README.md @@ -28,3 +28,18 @@ management, and export of telemetry. A major goal of OpenTelemetry is that you can easily instrument your applications or systems, no matter their language, infrastructure, or runtime environment. Crucially, the storage and visualization of telemetry is intentionally left to other tools. + +*[Supported Rust Versions](#supported-rust-versions)* + +## Supported Rust Versions + +OpenTelemetry is built against the latest stable release. The minimum supported +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build +on Rust versions earlier than the minimum supported version. + +The current stable Rust compiler and the three most recent minor versions +before it will always be supported. For example, if the current stable compiler +version is 1.49, the minimum supported version will not be increased past 1.46, +three minor versions prior. Increasing the minimum supported compiler version +is not considered a semver breaking change as long as doing so complies with +this policy. diff --git a/opentelemetry-http/src/lib.rs b/opentelemetry-http/src/lib.rs index bed95cd389..f272d8d4c5 100644 --- a/opentelemetry-http/src/lib.rs +++ b/opentelemetry-http/src/lib.rs @@ -13,7 +13,7 @@ use opentelemetry::propagation::{Extractor, Injector}; /// for example usage. pub struct HeaderInjector<'a>(pub &'a mut http::HeaderMap); -impl<'a> Injector for HeaderInjector<'a> { +impl Injector for HeaderInjector<'_> { /// Set a key and value in the HeaderMap. Does nothing if the key or value are not valid inputs. fn set(&mut self, key: &str, value: String) { if let Ok(name) = http::header::HeaderName::from_bytes(key.as_bytes()) { @@ -30,7 +30,7 @@ impl<'a> Injector for HeaderInjector<'a> { /// for example usage. pub struct HeaderExtractor<'a>(pub &'a http::HeaderMap); -impl<'a> Extractor for HeaderExtractor<'a> { +impl Extractor for HeaderExtractor<'_> { /// Get a value for a key from the HeaderMap. If the value is not valid ASCII, returns None. fn get(&self, key: &str) -> Option<&str> { self.0.get(key).and_then(|value| value.to_str().ok()) @@ -66,11 +66,14 @@ pub trait HttpClient: Debug + Send + Sync { #[cfg(feature = "reqwest")] mod reqwest { + use opentelemetry::otel_debug; + use super::{async_trait, Bytes, HttpClient, HttpError, Request, Response}; #[async_trait] impl HttpClient for reqwest::Client { async fn send(&self, request: Request>) -> Result, HttpError> { + otel_debug!(name: "ReqwestClient.Send"); let request = request.try_into()?; let mut response = self.execute(request).await?.error_for_status()?; let headers = std::mem::take(response.headers_mut()); @@ -87,6 +90,7 @@ mod reqwest { #[async_trait] impl HttpClient for reqwest::blocking::Client { async fn send(&self, request: Request>) -> Result, HttpError> { + otel_debug!(name: "ReqwestBlockingClient.Send"); let request = request.try_into()?; let mut response = self.execute(request)?.error_for_status()?; let headers = std::mem::take(response.headers_mut()); @@ -102,13 +106,16 @@ mod reqwest { #[cfg(feature = "hyper")] pub mod hyper { - use crate::ResponseExt; - use super::{async_trait, Bytes, HttpClient, HttpError, Request, Response}; + use crate::ResponseExt; use http::HeaderValue; use http_body_util::{BodyExt, Full}; use hyper::body::{Body as HttpBody, Frame}; - use hyper_util::client::legacy::{connect::Connect, Client}; + use hyper_util::client::legacy::{ + connect::{Connect, HttpConnector}, + Client, + }; + use opentelemetry::otel_debug; use std::fmt::Debug; use std::pin::Pin; use std::task::{self, Poll}; @@ -116,40 +123,44 @@ pub mod hyper { use tokio::time; #[derive(Debug, Clone)] - pub struct HyperClient { + pub struct HyperClient + where + C: Connect + Clone + Send + Sync + 'static, + { inner: Client, timeout: Duration, authorization: Option, } - impl HyperClient { - pub fn new_with_timeout(inner: Client, timeout: Duration) -> Self { + impl HyperClient + where + C: Connect + Clone + Send + Sync + 'static, + { + pub fn new(connector: C, timeout: Duration, authorization: Option) -> Self { + // TODO - support custom executor + let inner = Client::builder(hyper_util::rt::TokioExecutor::new()).build(connector); Self { inner, timeout, - authorization: None, + authorization, } } + } - pub fn new_with_timeout_and_authorization_header( - inner: Client, + impl HyperClient { + /// Creates a new `HyperClient` with a default `HttpConnector`. + pub fn with_default_connector( timeout: Duration, - authorization: HeaderValue, + authorization: Option, ) -> Self { - Self { - inner, - timeout, - authorization: Some(authorization), - } + Self::new(HttpConnector::new(), timeout, authorization) } } #[async_trait] - impl HttpClient for HyperClient - where - C: Connect + Send + Sync + Clone + Debug + 'static, - { + impl HttpClient for HyperClient { async fn send(&self, request: Request>) -> Result, HttpError> { + otel_debug!(name: "HyperClient.Send"); let (parts, body) = request.into_parts(); let mut request = Request::from_parts(parts, Body(Full::from(body))); if let Some(ref authorization) = self.authorization { diff --git a/opentelemetry-jaeger-propagator/CHANGELOG.md b/opentelemetry-jaeger-propagator/CHANGELOG.md index ab69063ed5..04aeb82e72 100644 --- a/opentelemetry-jaeger-propagator/CHANGELOG.md +++ b/opentelemetry-jaeger-propagator/CHANGELOG.md @@ -2,6 +2,22 @@ ## vNext +- Bump msrv to 1.75.0. + + +## 0.27.0 + +Released 2024-Nov-11 + +- Update `opentelemetry` dependency version to 0.27 + +- Bump MSRV to 1.70 [#2179](https://github.com/open-telemetry/opentelemetry-rust/pull/2179) + +## v0.26.0 +Released 2024-Sep-30 + +- Update `opentelemetry` dependency version to 0.26 + ## v0.25.0 - Update `opentelemetry` dependency version to 0.25 diff --git a/opentelemetry-jaeger-propagator/Cargo.toml b/opentelemetry-jaeger-propagator/Cargo.toml index a6aae9df57..bcf525f931 100644 --- a/opentelemetry-jaeger-propagator/Cargo.toml +++ b/opentelemetry-jaeger-propagator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-jaeger-propagator" -version = "0.25.0" +version = "0.27.0" description = "Jaeger propagator for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger-propagator" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger-propagator" @@ -13,19 +13,21 @@ categories = [ keywords = ["opentelemetry", "jaeger", "propagator"] license = "Apache-2.0" edition = "2021" -rust-version = "1.65" +rust-version = "1.75.0" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -opentelemetry = { version = "0.25", default-features = false, features = [ +opentelemetry = { version = "0.27", default-features = false, features = [ "trace", ], path = "../opentelemetry" } +tracing = {workspace = true, optional = true} # optional for opentelemetry internal logging [dev-dependencies] opentelemetry = { features = ["testing"], path = "../opentelemetry" } [features] -default = [] +default = ["internal-logs"] +internal-logs = ["tracing"] diff --git a/opentelemetry-jaeger-propagator/README.md b/opentelemetry-jaeger-propagator/README.md index 9098e96a6a..5955df7fc0 100644 --- a/opentelemetry-jaeger-propagator/README.md +++ b/opentelemetry-jaeger-propagator/README.md @@ -28,3 +28,18 @@ management, and export of telemetry. A major goal of OpenTelemetry is that you can easily instrument your applications or systems, no matter their language, infrastructure, or runtime environment. Crucially, the storage and visualization of telemetry is intentionally left to other tools. + +*[Supported Rust Versions](#supported-rust-versions)* + +## Supported Rust Versions + +OpenTelemetry is built against the latest stable release. The minimum supported +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build +on Rust versions earlier than the minimum supported version. + +The current stable Rust compiler and the three most recent minor versions +before it will always be supported. For example, if the current stable compiler +version is 1.49, the minimum supported version will not be increased past 1.46, +three minor versions prior. Increasing the minimum supported compiler version +is not considered a semver breaking change as long as doing so complies with +this policy. diff --git a/opentelemetry-jaeger-propagator/src/lib.rs b/opentelemetry-jaeger-propagator/src/lib.rs index c8047eb11b..29921058a3 100644 --- a/opentelemetry-jaeger-propagator/src/lib.rs +++ b/opentelemetry-jaeger-propagator/src/lib.rs @@ -1,10 +1,9 @@ -//! *Compiler support: [requires `rustc` 1.64+][msrv]* +//! *[Supported Rust Versions](#supported-rust-versions)* //! //! [Jaeger Docs]: https://www.jaegertracing.io/docs/ //! [jaeger-deprecation]: https://github.com/open-telemetry/opentelemetry-specification/pull/2858/files //! [jaeger-otlp]: https://www.jaegertracing.io/docs/1.38/apis/#opentelemetry-protocol-stable //! [otlp-exporter]: https://docs.rs/opentelemetry-otlp/latest/opentelemetry_otlp/ -//! [msrv]: #supported-rust-versions //! [jaeger propagation format]: https://www.jaegertracing.io/docs/1.18/client-libraries/#propagation-format //! //! # Supported Rust Versions diff --git a/opentelemetry-jaeger-propagator/src/propagator.rs b/opentelemetry-jaeger-propagator/src/propagator.rs index 4dbd0b16fc..dfbbf3b05d 100644 --- a/opentelemetry-jaeger-propagator/src/propagator.rs +++ b/opentelemetry-jaeger-propagator/src/propagator.rs @@ -1,8 +1,7 @@ -use opentelemetry::propagation::PropagationError; use opentelemetry::{ - global::{self, Error}, + otel_warn, propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, - trace::{SpanContext, SpanId, TraceContextExt, TraceError, TraceFlags, TraceId, TraceState}, + trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, Context, }; use std::borrow::Cow; @@ -82,10 +81,11 @@ impl Propagator { let parts = header_value.split_terminator(':').collect::>(); if parts.len() != 4 { - global::handle_error(Error::Propagation(PropagationError::extract( - "invalid jaeger header format", - "JaegerPropagator", - ))); + otel_warn!( + name: "JaegerPropagator.InvalidHeader", + message = "Invalid jaeger header format", + header_value = header_value.to_string(), + ); return None; } @@ -100,10 +100,11 @@ impl Propagator { Some(SpanContext::new(trace_id, span_id, flags, true, state)) } _ => { - global::handle_error(Error::Propagation(PropagationError::extract( - "invalid jaeger header format", - "JaegerPropagator", - ))); + otel_warn!( + name: "JaegerPropagator.InvalidHeader", + message = "Invalid jaeger header format", + header_value = header_value.to_string(), + ); None } } @@ -171,7 +172,11 @@ impl Propagator { match TraceState::from_key_value(baggage_keys) { Ok(trace_state) => Ok(trace_state), Err(trace_state_err) => { - global::handle_error(Error::Trace(TraceError::Other(Box::new(trace_state_err)))); + otel_warn!( + name: "JaegerPropagator.InvalidTraceState", + message = "Invalid trace state", + reason = format!("{:?}", trace_state_err), + ); Err(()) //todo: assign an error type instead of using () } } diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index 584d80ed7f..2d0676d5bb 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -2,6 +2,77 @@ ## vNext +- Bump msrv to 1.75.0. + + +## 0.27.0 + +Released 2024-Nov-11 + +- Update `opentelemetry` dependency version to 0.27 +- Update `opentelemetry_sdk` dependency version to 0.27 +- Update `opentelemetry-http` dependency version to 0.27 +- Update `opentelemetry-proto` dependency version to 0.27 + +- **BREAKING**: + - ([#2217](https://github.com/open-telemetry/opentelemetry-rust/pull/2217)) **Replaced**: The `MetricsExporterBuilder` interface is modified from `with_temporality_selector` to `with_temporality` example can be seen below: + Previous Signature: + ```rust + MetricsExporterBuilder::default().with_temporality_selector(DeltaTemporalitySelector::new()) + ``` + Updated Signature: + ```rust + MetricsExporterBuilder::default().with_temporality(opentelemetry_sdk::metrics::Temporality::Delta) + ``` + - ([#2221](https://github.com/open-telemetry/opentelemetry-rust/pull/2221)) **Replaced**: + - The `opentelemetry_otlp::new_pipeline().{trace,logging,metrics}()` interface is now replaced with `{TracerProvider,SdkMeterProvider,LoggerProvider}::builder()`. + - The `opentelemetry_otlp::new_exporter()` interface is now replaced with `{SpanExporter,MetricsExporter,LogExporter}::builder()`. + + Pull request [#2221](https://github.com/open-telemetry/opentelemetry-rust/pull/2221) has a detailed migration guide in the description. See example below, + and [basic-otlp](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-otlp/examples/basic-otlp/src/main.rs) for more details: + + Previous Signature: + ```rust + let logger_provider: LoggerProvider = opentelemetry_otlp::new_pipeline() + .logging() + .with_resource(RESOURCE.clone()) + .with_exporter( + opentelemetry_otlp::new_exporter() + .tonic() + .with_endpoint("http://localhost:4317") + ) + .install_batch(runtime::Tokio)?; + ``` + Updated Signature: + ```rust + let exporter = LogExporter::builder() + .with_tonic() + .with_endpoint("http://localhost:4317") + .build()?; + + Ok(LoggerProvider::builder() + .with_resource(RESOURCE.clone()) + .with_batch_exporter(exporter, runtime::Tokio) + .build()) + ``` + - **Renamed** + - ([#2255](https://github.com/open-telemetry/opentelemetry-rust/pull/2255)): de-pluralize Metric types. + - `MetricsExporter` -> `MetricExporter` + - `MetricsExporterBuilder` -> `MetricExporterBuilder` + + - [#2263](https://github.com/open-telemetry/opentelemetry-rust/pull/2263) + Support `hyper` client for opentelemetry-otlp. This can be enabled using flag `hyper-client`. + Refer example: https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp/examples/basic-otlp-http + +## v0.26.0 +Released 2024-Sep-30 + +- Update `opentelemetry` dependency version to 0.26 +- Update `opentelemetry_sdk` dependency version to 0.26 +- Update `opentelemetry-http` dependency version to 0.26 +- Update `opentelemetry-proto` dependency version to 0.26 +- Bump MSRV to 1.71.1 [2140](https://github.com/open-telemetry/opentelemetry-rust/pull/2140) + ## v0.25.0 - Update `opentelemetry` dependency version to 0.25 diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index 859d72529e..0f4599fc03 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-otlp" -version = "0.25.0" +version = "0.27.0" description = "Exporter for the OpenTelemetry Collector" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-otlp" @@ -13,7 +13,7 @@ categories = [ keywords = ["opentelemetry", "otlp", "logging", "tracing", "metrics"] license = "Apache-2.0" edition = "2021" -rust-version = "1.70" +rust-version = "1.75.0" autotests = false [[test]] @@ -28,10 +28,11 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] async-trait = { workspace = true } futures-core = { workspace = true } -opentelemetry = { version = "0.25", default-features = false, path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.25", default-features = false, path = "../opentelemetry-sdk" } -opentelemetry-http = { version = "0.25", path = "../opentelemetry-http", optional = true } -opentelemetry-proto = { version = "0.25", path = "../opentelemetry-proto", default-features = false } +opentelemetry = { version = "0.27", default-features = false, path = "../opentelemetry" } +opentelemetry_sdk = { version = "0.27", default-features = false, path = "../opentelemetry-sdk" } +opentelemetry-http = { version = "0.27", path = "../opentelemetry-http", optional = true } +opentelemetry-proto = { version = "0.27", path = "../opentelemetry-proto", default-features = false } +tracing = {workspace = true, optional = true} prost = { workspace = true, optional = true } tonic = { workspace = true, optional = true } @@ -57,11 +58,12 @@ trace = ["opentelemetry/trace", "opentelemetry_sdk/trace", "opentelemetry-proto/ metrics = ["opentelemetry/metrics", "opentelemetry_sdk/metrics", "opentelemetry-proto/metrics"] logs = ["opentelemetry/logs", "opentelemetry_sdk/logs", "opentelemetry-proto/logs"] populate-logs-event-name = ["opentelemetry-proto/populate-logs-event-name"] +internal-logs = ["tracing", "opentelemetry/internal-logs"] # add ons serialize = ["serde", "serde_json"] -default = ["grpc-tonic", "trace", "metrics", "logs"] +default = ["grpc-tonic", "trace", "metrics", "logs", "internal-logs"] # grpc using tonic grpc-tonic = ["tonic", "prost", "http", "tokio", "opentelemetry-proto/gen-tonic"] @@ -78,6 +80,7 @@ reqwest-blocking-client = ["reqwest/blocking", "opentelemetry-http/reqwest"] reqwest-client = ["reqwest", "opentelemetry-http/reqwest"] reqwest-rustls = ["reqwest", "opentelemetry-http/reqwest-rustls"] reqwest-rustls-webpki-roots = ["reqwest", "opentelemetry-http/reqwest-rustls-webpki-roots"] +hyper-client = ["opentelemetry-http/hyper"] # test -integration-testing = ["tonic", "prost", "tokio/full", "trace"] +integration-testing = ["tonic", "prost", "tokio/full", "trace", "logs"] diff --git a/opentelemetry-otlp/README.md b/opentelemetry-otlp/README.md index c9fb019de2..16a54c875b 100644 --- a/opentelemetry-otlp/README.md +++ b/opentelemetry-otlp/README.md @@ -30,11 +30,10 @@ can easily instrument your applications or systems, no matter their language, infrastructure, or runtime environment. Crucially, the storage and visualization of telemetry is intentionally left to other tools. -*Compiler support: [requires `rustc` 1.70+][msrv]* +*[Supported Rust Versions](#supported-rust-versions)* [Prometheus]: https://prometheus.io [Jaeger]: https://www.jaegertracing.io -[msrv]: #supported-rust-versions ## Getting started @@ -43,7 +42,7 @@ See [docs](https://docs.rs/opentelemetry-otlp). ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.70. The current OpenTelemetry version is not guaranteed to build +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions diff --git a/opentelemetry-otlp/examples/basic-otlp-http/Cargo.toml b/opentelemetry-otlp/examples/basic-otlp-http/Cargo.toml index ccbe22e960..c56c5502c3 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/Cargo.toml +++ b/opentelemetry-otlp/examples/basic-otlp-http/Cargo.toml @@ -6,27 +6,19 @@ license = "Apache-2.0" publish = false [features] -default = ["reqwest"] +default = ["reqwest", "experimental_metrics_periodicreader_with_async_runtime"] reqwest = ["opentelemetry-otlp/reqwest-client"] -hyper = ["dep:async-trait", "dep:http", "dep:http-body-util", "dep:hyper", "dep:hyper-util", "dep:opentelemetry-http", "dep:bytes"] +hyper = ["opentelemetry-otlp/hyper-client"] +experimental_metrics_periodicreader_with_async_runtime = ["opentelemetry_sdk/experimental_metrics_periodicreader_with_async_runtime"] [dependencies] once_cell = { workspace = true } opentelemetry = { path = "../../../opentelemetry" } -opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "metrics", "logs"] } -opentelemetry-http = { path = "../../../opentelemetry-http", optional = true } -opentelemetry-otlp = { path = "../..", features = ["http-proto", "http-json", "reqwest-client", "logs"] } +opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "experimental_metrics_periodicreader_with_async_runtime"]} +opentelemetry-otlp = { path = "../..", features = ["http-proto", "http-json", "logs", "internal-logs"] , default-features = false} opentelemetry-appender-tracing = { path = "../../../opentelemetry-appender-tracing", default-features = false} -opentelemetry-semantic-conventions = { path = "../../../opentelemetry-semantic-conventions" } -async-trait = { workspace = true, optional = true } -bytes = { workspace = true, optional = true } -http = { workspace = true, optional = true } -http-body-util = { workspace = true, optional = true } -hyper = { workspace = true, features = ["client"], optional = true } -hyper-util = { workspace = true, features = ["client-legacy"], optional = true } tokio = { workspace = true, features = ["full"] } tracing = { workspace = true, features = ["std"]} -tracing-core = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std"] } +tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] } diff --git a/opentelemetry-otlp/examples/basic-otlp-http/README.md b/opentelemetry-otlp/examples/basic-otlp-http/README.md index d70a5534a0..eb65c74160 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/README.md +++ b/opentelemetry-otlp/examples/basic-otlp-http/README.md @@ -1,10 +1,24 @@ -# Basic OTLP exporter Example +# Basic OTLP Exporter Example -This example shows how to setup OpenTelemetry OTLP exporter for logs, metrics -and traces to export them to the [OpenTelemetry +This example demonstrates how to set up an OpenTelemetry OTLP exporter for logs, +metrics, and traces to send data to the [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) via OTLP -over selected protocol such as HTTP/protobuf or HTTP/json. The Collector then sends the data to the appropriate -backend, in this case, the logging Exporter, which displays data to console. +over HTTP (using `protobuf` encoding by default but can be changed to use +`json`). The Collector then forwards the data to the configured backend, which +in this case is the logging exporter, displaying data on the console. +Additionally, the example configures a `tracing::fmt` layer to output logs +emitted via `tracing` to `stdout`. For demonstration, this layer uses a filter +to display `DEBUG` level logs from various OpenTelemetry components. In real +applications, these filters should be adjusted appropriately. + +The example employs a `BatchExporter` for logs and traces, which is the +recommended approach when using OTLP exporters. While it can be modified to use +a `SimpleExporter`, this requires enabling feature flag `reqwest-blocking-client` and +making the `main()` a normal main and *not* `tokio::main` + +// TODO: Metrics does not work with non tokio main when using `reqwest-blocking-client` today, fix that when switching +// default to use own thread. +// TODO: Document `hyper` feature flag when using SimpleProcessor. ## Usage @@ -52,14 +66,12 @@ Run the app which exports logs, metrics and traces via OTLP to the collector cargo run ``` - By default the app will use a `reqwest` client to send. A hyper 0.14 client can be used with the `hyper` feature enabled ```shell cargo run --no-default-features --features=hyper ``` - ## View results You should be able to see something similar below with different time and ID in the same console that docker runs. @@ -121,7 +133,7 @@ SpanEvent #0 -> Timestamp: 2024-05-14 02:15:56.824201397 +0000 UTC -> DroppedAttributesCount: 0 -> Attributes:: - -> bogons: Int(100) + -> some.key: Int(100) {"kind": "exporter", "data_type": "traces", "name": "logging"} ... ``` diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/hyper.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/hyper.rs deleted file mode 100644 index 80a28ae62d..0000000000 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/hyper.rs +++ /dev/null @@ -1,49 +0,0 @@ -use async_trait::async_trait; -use bytes::Bytes; -use http::{Request, Response}; -use http_body_util::{BodyExt, Full}; -use hyper_util::{ - client::legacy::{ - connect::{Connect, HttpConnector}, - Client, - }, - rt::TokioExecutor, -}; -use opentelemetry_http::{HttpClient, HttpError, ResponseExt}; - -pub struct HyperClient { - inner: hyper_util::client::legacy::Client>, -} - -impl Default for HyperClient { - fn default() -> Self { - Self { - inner: Client::builder(TokioExecutor::new()).build_http(), - } - } -} - -impl std::fmt::Debug for HyperClient { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("HyperClient") - .field("inner", &self.inner) - .finish() - } -} - -#[async_trait] -impl HttpClient for HyperClient { - async fn send(&self, request: Request>) -> Result, HttpError> { - let request = request.map(|body| Full::new(Bytes::from(body))); - - let (parts, body) = self - .inner - .request(request) - .await? - .error_for_status()? - .into_parts(); - let body = body.collect().await?.to_bytes(); - - Ok(Response::from_parts(parts, body)) - } -} diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index 006d8e4e2e..6b3dee3f07 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -1,14 +1,19 @@ +/// To use hyper as the HTTP client - cargo run --features="hyper" --no-default-features use once_cell::sync::Lazy; use opentelemetry::{ global, - metrics::MetricsError, - trace::{TraceContextExt, TraceError, Tracer, TracerProvider as _}, - KeyValue, + trace::{TraceContextExt, TraceError, Tracer}, + InstrumentationScope, KeyValue, }; use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge; -use opentelemetry_otlp::Protocol; -use opentelemetry_otlp::{HttpExporterBuilder, WithExportConfig}; -use opentelemetry_sdk::trace::{self as sdktrace, Config}; +use opentelemetry_otlp::WithExportConfig; +use opentelemetry_otlp::{LogExporter, MetricExporter, Protocol, SpanExporter}; +use opentelemetry_sdk::{ + logs::LoggerProvider, + metrics::{MetricError, SdkMeterProvider}, + runtime, + trace::{self as sdktrace, TracerProvider}, +}; use opentelemetry_sdk::{ logs::{self as sdklogs}, Resource, @@ -18,125 +23,122 @@ use tracing::info; use tracing_subscriber::prelude::*; use tracing_subscriber::EnvFilter; -#[cfg(feature = "hyper")] -mod hyper; - static RESOURCE: Lazy = Lazy::new(|| { - Resource::new(vec![KeyValue::new( - opentelemetry_semantic_conventions::resource::SERVICE_NAME, - "basic-otlp-example", - )]) + Resource::builder() + .with_service_name("basic-otlp-example") + .build() }); -fn http_exporter() -> HttpExporterBuilder { - let exporter = opentelemetry_otlp::new_exporter().http(); - #[cfg(feature = "hyper")] - let exporter = exporter.with_http_client(hyper::HyperClient::default()); - exporter -} +fn init_logs() -> Result { + let exporter = LogExporter::builder() + .with_http() + .with_endpoint("http://localhost:4318/v1/logs") + .with_protocol(Protocol::HttpBinary) + .build()?; -fn init_logs() -> Result { - opentelemetry_otlp::new_pipeline() - .logging() + Ok(LoggerProvider::builder() + .with_batch_exporter(exporter) .with_resource(RESOURCE.clone()) - .with_exporter( - http_exporter() - .with_protocol(Protocol::HttpBinary) //can be changed to `Protocol::HttpJson` to export in JSON format - .with_endpoint("http://localhost:4318/v1/logs"), - ) - .install_batch(opentelemetry_sdk::runtime::Tokio) + .build()) } -fn init_tracer_provider() -> Result { - opentelemetry_otlp::new_pipeline() - .tracing() - .with_exporter( - http_exporter() - .with_protocol(Protocol::HttpBinary) //can be changed to `Protocol::HttpJson` to export in JSON format - .with_endpoint("http://localhost:4318/v1/traces"), - ) - .with_trace_config(Config::default().with_resource(RESOURCE.clone())) - .install_batch(opentelemetry_sdk::runtime::Tokio) +fn init_traces() -> Result { + let exporter = SpanExporter::builder() + .with_http() + .with_protocol(Protocol::HttpBinary) //can be changed to `Protocol::HttpJson` to export in JSON format + .with_endpoint("http://localhost:4318/v1/traces") + .build()?; + + Ok(TracerProvider::builder() + .with_batch_exporter(exporter, runtime::Tokio) + .with_resource(RESOURCE.clone()) + .build()) } -fn init_metrics() -> Result { - opentelemetry_otlp::new_pipeline() - .metrics(opentelemetry_sdk::runtime::Tokio) - .with_exporter( - http_exporter() - .with_protocol(Protocol::HttpBinary) //can be changed to `Protocol::HttpJson` to export in JSON format - .with_endpoint("http://localhost:4318/v1/metrics"), +fn init_metrics() -> Result { + let exporter = MetricExporter::builder() + .with_http() + .with_protocol(Protocol::HttpBinary) //can be changed to `Protocol::HttpJson` to export in JSON format + .with_endpoint("http://localhost:4318/v1/metrics") + .build()?; + + #[cfg(feature = "experimental_metrics_periodicreader_with_async_runtime")] + let reader = + opentelemetry_sdk::metrics::periodic_reader_with_async_runtime::PeriodicReader::builder( + exporter, + runtime::Tokio, ) + .build(); + // TODO: This does not work today. See https://github.com/open-telemetry/opentelemetry-rust/issues/2400 + #[cfg(not(feature = "experimental_metrics_periodicreader_with_async_runtime"))] + let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter).build(); + + Ok(SdkMeterProvider::builder() + .with_reader(reader) .with_resource(RESOURCE.clone()) - .build() + .build()) } #[tokio::main] async fn main() -> Result<(), Box> { - let result = init_tracer_provider(); - assert!( - result.is_ok(), - "Init tracer failed with error: {:?}", - result.err() - ); - - let tracer_provider = result.unwrap(); - global::set_tracer_provider(tracer_provider.clone()); - - let result = init_metrics(); - assert!( - result.is_ok(), - "Init metrics failed with error: {:?}", - result.err() - ); - - let meter_provider = result.unwrap(); - global::set_meter_provider(meter_provider.clone()); - - // Opentelemetry will not provide a global API to manage the logger - // provider. Application users must manage the lifecycle of the logger - // provider on their own. Dropping logger providers will disable log - // emitting. - let logger_provider = init_logs().unwrap(); + let logger_provider = init_logs()?; // Create a new OpenTelemetryTracingBridge using the above LoggerProvider. - let layer = OpenTelemetryTracingBridge::new(&logger_provider); + let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider); - // Add a tracing filter to filter events from crates used by opentelemetry-otlp. - // The filter levels are set as follows: + // For the OpenTelemetry layer, add a tracing filter to filter events from + // OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates + // like reqwest/tonic etc.) from being sent back to OTel itself, thus + // preventing infinite telemetry generation. The filter levels are set as + // follows: // - Allow `info` level and above by default. - // - Restrict `hyper`, `tonic`, and `reqwest` to `error` level logs only. - // This ensures events generated from these crates within the OTLP Exporter are not looped back, - // thus preventing infinite event generation. - // Note: This will also drop events from these crates used outside the OTLP Exporter. - // For more details, see: https://github.com/open-telemetry/opentelemetry-rust/issues/761 - let filter = EnvFilter::new("info") - .add_directive("hyper=error".parse().unwrap()) - .add_directive("tonic=error".parse().unwrap()) - .add_directive("reqwest=error".parse().unwrap()); - + // - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely. + // Note: This will also drop events from crates like `tonic` etc. even when + // they are used outside the OTLP Exporter. For more details, see: + // https://github.com/open-telemetry/opentelemetry-rust/issues/761 + let filter_otel = EnvFilter::new("info") + .add_directive("hyper=off".parse().unwrap()) + .add_directive("opentelemetry=off".parse().unwrap()) + .add_directive("tonic=off".parse().unwrap()) + .add_directive("h2=off".parse().unwrap()) + .add_directive("reqwest=off".parse().unwrap()); + let otel_layer = otel_layer.with_filter(filter_otel); + + // Create a new tracing::Fmt layer to print the logs to stdout. It has a + // default filter of `info` level and above, and `debug` and above for logs + // from OpenTelemetry crates. The filter levels can be customized as needed. + let filter_fmt = EnvFilter::new("info").add_directive("opentelemetry=debug".parse().unwrap()); + let fmt_layer = tracing_subscriber::fmt::layer() + .with_thread_names(true) + .with_filter(filter_fmt); + + // Initialize the tracing subscriber with the OpenTelemetry layer and the + // Fmt layer. tracing_subscriber::registry() - .with(filter) - .with(layer) + .with(otel_layer) + .with(fmt_layer) .init(); + let tracer_provider = init_traces()?; + global::set_tracer_provider(tracer_provider.clone()); + + let meter_provider = init_metrics()?; + global::set_meter_provider(meter_provider.clone()); + let common_scope_attributes = vec![KeyValue::new("scope-key", "scope-value")]; - let tracer = global::tracer_provider() - .tracer_builder("basic") - .with_attributes(common_scope_attributes.clone()) + let scope = InstrumentationScope::builder("basic") + .with_version("1.0") + .with_attributes(common_scope_attributes) .build(); - let meter = global::meter_with_version( - "basic", - Some("v1.0"), - Some("schema_url"), - Some(common_scope_attributes.clone()), - ); + + let tracer = global::tracer_with_scope(scope.clone()); + let meter = global::meter_with_scope(scope); let counter = meter .u64_counter("test_counter") .with_description("a simple counter for demo purposes.") .with_unit("my_unit") - .init(); + .build(); for _ in 0..10 { counter.add(1, &[KeyValue::new("test_key", "test_value")]); } @@ -146,7 +148,7 @@ async fn main() -> Result<(), Box> { let span = cx.span(); span.add_event( "Nice operation!".to_string(), - vec![KeyValue::new("bogons", 100)], + vec![KeyValue::new("some.key", 100)], ); span.set_attribute(KeyValue::new("another.key", "yes")); @@ -161,7 +163,7 @@ async fn main() -> Result<(), Box> { info!(target: "my-target", "hello from {}. My price is {}", "apple", 1.99); - global::shutdown_tracer_provider(); + tracer_provider.shutdown()?; logger_provider.shutdown()?; meter_provider.shutdown()?; diff --git a/opentelemetry-otlp/examples/basic-otlp/Cargo.toml b/opentelemetry-otlp/examples/basic-otlp/Cargo.toml index 10553f38d1..ad050bc338 100644 --- a/opentelemetry-otlp/examples/basic-otlp/Cargo.toml +++ b/opentelemetry-otlp/examples/basic-otlp/Cargo.toml @@ -7,12 +7,10 @@ publish = false [dependencies] once_cell = { workspace = true } -opentelemetry = { path = "../../../opentelemetry", features = ["metrics", "logs"] } -opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "logs"] } -opentelemetry-otlp = { path = "../../../opentelemetry-otlp", features = ["tonic", "metrics", "logs"] } -opentelemetry-semantic-conventions = { path = "../../../opentelemetry-semantic-conventions" } +opentelemetry = { path = "../../../opentelemetry" } +opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio"] } +opentelemetry-otlp = { path = "../../../opentelemetry-otlp" } tokio = { version = "1.0", features = ["full"] } opentelemetry-appender-tracing = { path = "../../../opentelemetry-appender-tracing", default-features = false} tracing = { workspace = true, features = ["std"]} -tracing-core = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std"] } \ No newline at end of file +tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] } \ No newline at end of file diff --git a/opentelemetry-otlp/examples/basic-otlp/README.md b/opentelemetry-otlp/examples/basic-otlp/README.md index 28206793bc..ca02018ad5 100644 --- a/opentelemetry-otlp/examples/basic-otlp/README.md +++ b/opentelemetry-otlp/examples/basic-otlp/README.md @@ -1,10 +1,51 @@ -# Basic OTLP exporter Example - -This example shows how to setup OpenTelemetry OTLP exporter for logs, metrics -and traces to exports them to the [OpenTelemetry -Collector](https://github.com/open-telemetry/opentelemetry-collector) via OTLP over gRPC. -The Collector then sends the data to the appropriate backend, in this case, -the logging Exporter, which displays data to console. +# Basic OTLP Exporter Example + +This example demonstrates how to set up an OpenTelemetry OTLP exporter for logs, +metrics, and traces to send data to the [OpenTelemetry +Collector](https://github.com/open-telemetry/opentelemetry-collector) via OTLP +over gRPC. The Collector then forwards the data to the configured backend, which +in this case is the logging exporter, displaying data on the console. +Additionally, the example configures a `tracing::fmt` layer to output logs +emitted via `tracing` to `stdout`. For demonstration, this layer uses a filter +to display `DEBUG` level logs from various OpenTelemetry components. In real +applications, these filters should be adjusted appropriately. + +The example employs a `BatchExporter` for logs and traces, which is the +recommended approach when using OTLP exporters. While it can be modified to use +a `SimpleExporter`, this requires the main method to be a `tokio::main` function +since the `tonic` client requires a Tokio runtime. If you prefer not to use +`tokio::main`, then the `init_logs` and `init_traces` functions must be executed +within a Tokio runtime. + +This examples uses the default `PeriodicReader` for metrics, which uses own +thread for background processing/exporting. Since the `tonic` client requires a +Tokio runtime, the main method must be a `tokio::main` function. If you prefer not +to use `tokio::main`, then the `init_metrics` function must be executed within a +Tokio runtime. + +Below is an example on how to use non `tokio::main`: + +```rust +fn main() -> Result<(), Box> { + let rt = tokio::runtime::Runtime::new()?; + let tracer_provider = rt.block_on(async { + init_traces() + })?; + global::set_tracer_provider(tracer_provider.clone()); + + let meter_provider = rt.block_on(async { + init_metrics() + })?; + global::set_meter_provider(meter_provider.clone()); + + let logger_provider = rt.block_on(async { + init_logs() + })?; + + // Ensure the runtime (`rt`) remains active until the program ends + // Additional code goes here... +} +``` ## Usage @@ -113,14 +154,14 @@ SpanEvent #0 -> Timestamp: 2024-05-22 20:25:42.8770471 +0000 UTC -> DroppedAttributesCount: 0 -> Attributes:: - -> bogons: Int(100) + -> some.key: Int(100) {"kind": "exporter", "data_type": "traces", "name": "logging"} ``` ### Metric ```text -2024-05-22T20:25:42.908Z info MetricsExporter {"kind": "exporter", "data_type": "metrics", "name": "logging", "resource metrics": 1, "metrics": 1, "data points": 1} +2024-05-22T20:25:42.908Z info MetricExporter {"kind": "exporter", "data_type": "metrics", "name": "logging", "resource metrics": 1, "metrics": 1, "data points": 1} 2024-05-22T20:25:42.908Z info ResourceMetrics #0 Resource SchemaURL: Resource attributes: @@ -150,7 +191,7 @@ Value: 10 ### Logs ```text -2024-05-22T20:25:42.914Z info LogsExporter {"kind": "exporter", "data_type": "logs", "name": "logging", "resource logs": 2, "log records": 2} +2024-05-22T20:25:42.914Z info LogExporter {"kind": "exporter", "data_type": "logs", "name": "logging", "resource logs": 2, "log records": 2} 2024-05-22T20:25:42.914Z info ResourceLog #0 Resource SchemaURL: Resource attributes: diff --git a/opentelemetry-otlp/examples/basic-otlp/otel-collector-config.yaml b/opentelemetry-otlp/examples/basic-otlp/otel-collector-config.yaml index 51ef89550f..b17c2607db 100644 --- a/opentelemetry-otlp/examples/basic-otlp/otel-collector-config.yaml +++ b/opentelemetry-otlp/examples/basic-otlp/otel-collector-config.yaml @@ -8,7 +8,9 @@ receivers: otlp: protocols: grpc: + endpoint: 0.0.0.0:4317 http: + endpoint: 0.0.0.0:4318 exporters: debug: diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index f931e592e2..c5425f8a9b 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -1,15 +1,13 @@ use once_cell::sync::Lazy; -use opentelemetry::global; -use opentelemetry::logs::LogError; -use opentelemetry::metrics::MetricsError; -use opentelemetry::trace::{TraceError, TracerProvider}; -use opentelemetry::{ - trace::{TraceContextExt, Tracer}, - KeyValue, -}; +use opentelemetry::trace::{TraceContextExt, TraceError, Tracer}; +use opentelemetry::KeyValue; +use opentelemetry::{global, InstrumentationScope}; use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge; -use opentelemetry_otlp::{ExportConfig, WithExportConfig}; -use opentelemetry_sdk::trace::Config; +use opentelemetry_otlp::{LogExporter, MetricExporter, SpanExporter, WithExportConfig}; +use opentelemetry_sdk::logs::LogError; +use opentelemetry_sdk::logs::LoggerProvider; +use opentelemetry_sdk::metrics::MetricError; +use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider}; use opentelemetry_sdk::{runtime, trace as sdktrace, Resource}; use std::error::Error; use tracing::info; @@ -17,117 +15,104 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::EnvFilter; static RESOURCE: Lazy = Lazy::new(|| { - Resource::new(vec![KeyValue::new( - opentelemetry_semantic_conventions::resource::SERVICE_NAME, - "basic-otlp-example", - )]) + Resource::builder() + .with_service_name("basic-otlp-example") + .build() }); -fn init_tracer_provider() -> Result { - opentelemetry_otlp::new_pipeline() - .tracing() - .with_exporter( - opentelemetry_otlp::new_exporter() - .tonic() - .with_endpoint("http://localhost:4317"), - ) - .with_trace_config(Config::default().with_resource(RESOURCE.clone())) - .install_batch(runtime::Tokio) +fn init_traces() -> Result { + let exporter = SpanExporter::builder() + .with_tonic() + .with_endpoint("http://localhost:4317") + .build()?; + Ok(sdktrace::TracerProvider::builder() + .with_resource(RESOURCE.clone()) + .with_batch_exporter(exporter, runtime::Tokio) + .build()) } -fn init_metrics() -> Result { - let export_config = ExportConfig { - endpoint: "http://localhost:4317".to_string(), - ..ExportConfig::default() - }; - opentelemetry_otlp::new_pipeline() - .metrics(runtime::Tokio) - .with_exporter( - opentelemetry_otlp::new_exporter() - .tonic() - .with_export_config(export_config), - ) +fn init_metrics() -> Result { + let exporter = MetricExporter::builder().with_tonic().build()?; + let reader = PeriodicReader::builder(exporter).build(); + + Ok(SdkMeterProvider::builder() + .with_reader(reader) .with_resource(RESOURCE.clone()) - .build() + .build()) } fn init_logs() -> Result { - opentelemetry_otlp::new_pipeline() - .logging() + let exporter = LogExporter::builder() + .with_tonic() + .with_endpoint("http://localhost:4317") + .build()?; + + Ok(LoggerProvider::builder() .with_resource(RESOURCE.clone()) - .with_exporter( - opentelemetry_otlp::new_exporter() - .tonic() - .with_endpoint("http://localhost:4317"), - ) - .install_batch(runtime::Tokio) + .with_batch_exporter(exporter) + .build()) } #[tokio::main] async fn main() -> Result<(), Box> { - // By binding the result to an unused variable, the lifetime of the variable - // matches the containing block, reporting traces and metrics during the whole - // execution. - - let result = init_tracer_provider(); - assert!( - result.is_ok(), - "Init tracer failed with error: {:?}", - result.err() - ); - let tracer_provider = result.unwrap(); - global::set_tracer_provider(tracer_provider.clone()); - - let result = init_metrics(); - assert!( - result.is_ok(), - "Init metrics failed with error: {:?}", - result.err() - ); - let meter_provider = result.unwrap(); - global::set_meter_provider(meter_provider.clone()); - - // Initialize logs and save the logger_provider. - let logger_provider = init_logs().unwrap(); + let logger_provider = init_logs()?; // Create a new OpenTelemetryTracingBridge using the above LoggerProvider. - let layer = OpenTelemetryTracingBridge::new(&logger_provider); + let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider); - // Add a tracing filter to filter events from crates used by opentelemetry-otlp. - // The filter levels are set as follows: + // For the OpenTelemetry layer, add a tracing filter to filter events from + // OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates + // like reqwest/tonic etc.) from being sent back to OTel itself, thus + // preventing infinite telemetry generation. The filter levels are set as + // follows: // - Allow `info` level and above by default. - // - Restrict `hyper`, `tonic`, and `reqwest` to `error` level logs only. - // This ensures events generated from these crates within the OTLP Exporter are not looped back, - // thus preventing infinite event generation. - // Note: This will also drop events from these crates used outside the OTLP Exporter. - // For more details, see: https://github.com/open-telemetry/opentelemetry-rust/issues/761 - let filter = EnvFilter::new("info") - .add_directive("hyper=error".parse().unwrap()) - .add_directive("tonic=error".parse().unwrap()) - .add_directive("reqwest=error".parse().unwrap()); - + // - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely. + // Note: This will also drop events from crates like `tonic` etc. even when + // they are used outside the OTLP Exporter. For more details, see: + // https://github.com/open-telemetry/opentelemetry-rust/issues/761 + let filter_otel = EnvFilter::new("info") + .add_directive("hyper=off".parse().unwrap()) + .add_directive("opentelemetry=off".parse().unwrap()) + .add_directive("tonic=off".parse().unwrap()) + .add_directive("h2=off".parse().unwrap()) + .add_directive("reqwest=off".parse().unwrap()); + let otel_layer = otel_layer.with_filter(filter_otel); + + // Create a new tracing::Fmt layer to print the logs to stdout. It has a + // default filter of `info` level and above, and `debug` and above for logs + // from OpenTelemetry crates. The filter levels can be customized as needed. + let filter_fmt = EnvFilter::new("info").add_directive("opentelemetry=debug".parse().unwrap()); + let fmt_layer = tracing_subscriber::fmt::layer() + .with_thread_names(true) + .with_filter(filter_fmt); + + // Initialize the tracing subscriber with the OpenTelemetry layer and the + // Fmt layer. tracing_subscriber::registry() - .with(filter) - .with(layer) + .with(otel_layer) + .with(fmt_layer) .init(); + let tracer_provider = init_traces()?; + global::set_tracer_provider(tracer_provider.clone()); + + let meter_provider = init_metrics()?; + global::set_meter_provider(meter_provider.clone()); + let common_scope_attributes = vec![KeyValue::new("scope-key", "scope-value")]; - let tracer = global::tracer_provider() - .tracer_builder("basic") - .with_attributes(common_scope_attributes.clone()) + let scope = InstrumentationScope::builder("basic") + .with_version("1.0") + .with_attributes(common_scope_attributes) .build(); - let meter = global::meter_with_version( - "basic", - Some("v1.0"), - Some("schema_url"), - Some(common_scope_attributes.clone()), - ); + + let tracer = global::tracer_with_scope(scope.clone()); + let meter = global::meter_with_scope(scope); let counter = meter .u64_counter("test_counter") .with_description("a simple counter for demo purposes.") .with_unit("my_unit") - .init(); + .build(); for _ in 0..10 { counter.add(1, &[KeyValue::new("test_key", "test_value")]); } @@ -150,8 +135,7 @@ async fn main() -> Result<(), Box> { }); info!(name: "my-event", target: "my-target", "hello from {}. My price is {}", "apple", 1.99); - - global::shutdown_tracer_provider(); + tracer_provider.shutdown()?; meter_provider.shutdown()?; logger_provider.shutdown()?; diff --git a/opentelemetry-otlp/src/exporter/http/logs.rs b/opentelemetry-otlp/src/exporter/http/logs.rs index db1932868b..905fb638d0 100644 --- a/opentelemetry-otlp/src/exporter/http/logs.rs +++ b/opentelemetry-otlp/src/exporter/http/logs.rs @@ -2,14 +2,15 @@ use std::sync::Arc; use async_trait::async_trait; use http::{header::CONTENT_TYPE, Method}; -use opentelemetry::logs::{LogError, LogResult}; +use opentelemetry::otel_debug; use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; +use opentelemetry_sdk::logs::{LogError, LogResult}; use super::OtlpHttpClient; #[async_trait] impl LogExporter for OtlpHttpClient { - async fn export(&mut self, batch: LogBatch<'_>) -> LogResult<()> { + async fn export(&self, batch: LogBatch<'_>) -> LogResult<()> { let client = self .client .lock() @@ -32,6 +33,7 @@ impl LogExporter for OtlpHttpClient { } let request_uri = request.uri().to_string(); + otel_debug!(name: "HttpLogsClient.CallingExport"); let response = client.send(request).await?; if !response.status().is_success() { diff --git a/opentelemetry-otlp/src/exporter/http/metrics.rs b/opentelemetry-otlp/src/exporter/http/metrics.rs index 8fcf1bc362..7b96a7c5ce 100644 --- a/opentelemetry-otlp/src/exporter/http/metrics.rs +++ b/opentelemetry-otlp/src/exporter/http/metrics.rs @@ -2,8 +2,9 @@ use std::sync::Arc; use async_trait::async_trait; use http::{header::CONTENT_TYPE, Method}; -use opentelemetry::metrics::{MetricsError, Result}; +use opentelemetry::otel_debug; use opentelemetry_sdk::metrics::data::ResourceMetrics; +use opentelemetry_sdk::metrics::{MetricError, MetricResult}; use crate::{metric::MetricsClient, Error}; @@ -11,14 +12,14 @@ use super::OtlpHttpClient; #[async_trait] impl MetricsClient for OtlpHttpClient { - async fn export(&self, metrics: &mut ResourceMetrics) -> Result<()> { + async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()> { let client = self .client .lock() .map_err(Into::into) .and_then(|g| match &*g { Some(client) => Ok(Arc::clone(client)), - _ => Err(MetricsError::Other("exporter is already shut down".into())), + _ => Err(MetricError::Other("exporter is already shut down".into())), })?; let (body, content_type) = self.build_metrics_export_body(metrics)?; @@ -33,15 +34,16 @@ impl MetricsClient for OtlpHttpClient { request.headers_mut().insert(k.clone(), v.clone()); } + otel_debug!(name: "HttpMetricsClient.CallingExport"); client .send(request) .await - .map_err(|e| MetricsError::ExportErr(Box::new(Error::RequestFailed(e))))?; + .map_err(|e| MetricError::ExportErr(Box::new(Error::RequestFailed(e))))?; Ok(()) } - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { let _ = self.client.lock()?.take(); Ok(()) diff --git a/opentelemetry-otlp/src/exporter/http/mod.rs b/opentelemetry-otlp/src/exporter/http/mod.rs index 9110b0c474..e718960686 100644 --- a/opentelemetry-otlp/src/exporter/http/mod.rs +++ b/opentelemetry-otlp/src/exporter/http/mod.rs @@ -17,8 +17,6 @@ use opentelemetry_proto::transform::trace::tonic::group_spans_by_resource_and_sc use opentelemetry_sdk::export::logs::LogBatch; #[cfg(feature = "trace")] use opentelemetry_sdk::export::trace::SpanData; -#[cfg(feature = "metrics")] -use opentelemetry_sdk::metrics::data::ResourceMetrics; use prost::Message; use std::collections::HashMap; use std::env; @@ -35,17 +33,24 @@ mod logs; #[cfg(feature = "trace")] mod trace; +#[cfg(all( + not(feature = "reqwest-client"), + not(feature = "reqwest-blocking-client"), + feature = "hyper-client" +))] +use opentelemetry_http::hyper::HyperClient; + /// Configuration of the http transport -#[cfg(any(feature = "http-proto", feature = "http-json"))] #[derive(Debug)] #[cfg_attr( all( not(feature = "reqwest-client"), - not(feature = "reqwest-blocking-client") + not(feature = "reqwest-blocking-client"), + not(feature = "hyper-client") ), derive(Default) )] -pub(crate) struct HttpConfig { +pub struct HttpConfig { /// Select the HTTP client client: Option>, @@ -53,19 +58,39 @@ pub(crate) struct HttpConfig { headers: Option>, } -#[cfg(any(feature = "reqwest-blocking-client", feature = "reqwest-client",))] +#[cfg(any( + feature = "reqwest-blocking-client", + feature = "reqwest-client", + feature = "hyper-client" +))] impl Default for HttpConfig { fn default() -> Self { + #[cfg(feature = "reqwest-blocking-client")] + let default_client = std::thread::spawn(|| { + Some(Arc::new(reqwest::blocking::Client::new()) as Arc) + }) + .join() + .expect("creating reqwest::blocking::Client on a new thread not to fail"); + #[cfg(all(not(feature = "reqwest-blocking-client"), feature = "reqwest-client"))] + let default_client = Some(Arc::new(reqwest::Client::new()) as Arc); + #[cfg(all( + not(feature = "reqwest-client"), + not(feature = "reqwest-blocking-client"), + feature = "hyper-client" + ))] + // TODO - support configuring custom connector and executor + let default_client = Some(Arc::new(HyperClient::with_default_connector( + Duration::from_secs(10), + None, + )) as Arc); + #[cfg(all( + not(feature = "reqwest-client"), + not(feature = "reqwest-blocking-client"), + not(feature = "hyper-client") + ))] + let default_client = None; HttpConfig { - #[cfg(feature = "reqwest-blocking-client")] - client: Some(Arc::new(reqwest::blocking::Client::new())), - #[cfg(all(not(feature = "reqwest-blocking-client"), feature = "reqwest-client"))] - client: Some(Arc::new(reqwest::Client::new())), - #[cfg(all( - not(feature = "reqwest-client"), - not(feature = "reqwest-blocking-client") - ))] - client: None, + client: default_client, headers: None, } } @@ -77,26 +102,23 @@ impl Default for HttpConfig { /// /// ``` /// # #[cfg(feature="metrics")] -/// use opentelemetry_sdk::metrics::reader::{ -/// DefaultTemporalitySelector, -/// }; +/// use opentelemetry_sdk::metrics::Temporality; /// /// # fn main() -> Result<(), Box> { /// // Create a span exporter you can use to when configuring tracer providers /// # #[cfg(feature="trace")] -/// let span_exporter = opentelemetry_otlp::new_exporter().http().build_span_exporter()?; +/// let span_exporter = opentelemetry_otlp::SpanExporter::builder().with_http().build()?; /// /// // Create a metrics exporter you can use when configuring meter providers /// # #[cfg(feature="metrics")] -/// let metrics_exporter = opentelemetry_otlp::new_exporter() -/// .http() -/// .build_metrics_exporter( -/// Box::new(DefaultTemporalitySelector::new()), -/// )?; +/// let metrics_exporter = opentelemetry_otlp::MetricExporter::builder() +/// .with_http() +/// .with_temporality(Temporality::default()) +/// .build()?; /// /// // Create a log exporter you can use when configuring logger providers /// # #[cfg(feature="logs")] -/// let log_exporter = opentelemetry_otlp::new_exporter().http().build_log_exporter()?; +/// let log_exporter = opentelemetry_otlp::LogExporter::builder().with_http().build()?; /// # Ok(()) /// # } /// ``` @@ -123,31 +145,6 @@ impl Default for HttpExporterBuilder { } impl HttpExporterBuilder { - /// Specify the OTLP protocol to be used by the exporter - pub fn with_protocol(mut self, protocol: Protocol) -> Self { - self.exporter_config.protocol = protocol; - self - } - - /// Assign client implementation - pub fn with_http_client(mut self, client: T) -> Self { - self.http_config.client = Some(Arc::new(client)); - self - } - - /// Set additional headers to send to the collector. - pub fn with_headers(mut self, headers: HashMap) -> Self { - // headers will be wrapped, so we must do some logic to unwrap first. - let mut inst_headers = self.http_config.headers.unwrap_or_default(); - inst_headers.extend( - headers - .into_iter() - .map(|(key, value)| (key, super::url_decode(&value).unwrap_or(value))), - ); - self.http_config.headers = Some(inst_headers); - self - } - fn build_client( &mut self, signal_endpoint_var: &str, @@ -158,7 +155,7 @@ impl HttpExporterBuilder { let endpoint = resolve_http_endpoint( signal_endpoint_var, signal_endpoint_path, - self.exporter_config.endpoint.as_str(), + self.exporter_config.endpoint.clone(), )?; let timeout = match env::var(signal_timeout_var) @@ -171,13 +168,11 @@ impl HttpExporterBuilder { }, None => self.exporter_config.timeout, }; - let http_client = self .http_config .client .take() .ok_or(crate::Error::NoHttpClient)?; - #[allow(clippy::mutable_key_type)] // http headers are not mutated let mut headers: HashMap = self .http_config @@ -231,7 +226,7 @@ impl HttpExporterBuilder { /// Create a log exporter with the current configuration #[cfg(feature = "logs")] - pub fn build_log_exporter(mut self) -> opentelemetry::logs::LogResult { + pub fn build_log_exporter(mut self) -> opentelemetry_sdk::logs::LogResult { use crate::{ OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_HEADERS, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, @@ -251,8 +246,8 @@ impl HttpExporterBuilder { #[cfg(feature = "metrics")] pub fn build_metrics_exporter( mut self, - temporality_selector: Box, - ) -> opentelemetry::metrics::Result { + temporality: opentelemetry_sdk::metrics::Temporality, + ) -> opentelemetry_sdk::metrics::MetricResult { use crate::{ OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_HEADERS, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, @@ -265,7 +260,7 @@ impl HttpExporterBuilder { OTEL_EXPORTER_OTLP_METRICS_HEADERS, )?; - Ok(crate::MetricsExporter::new(client, temporality_selector)) + Ok(crate::MetricExporter::new(client, temporality)) } } @@ -323,7 +318,7 @@ impl OtlpHttpClient { fn build_logs_export_body( &self, logs: LogBatch<'_>, - ) -> opentelemetry::logs::LogResult<(Vec, &'static str)> { + ) -> opentelemetry_sdk::logs::LogResult<(Vec, &'static str)> { use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest; let resource_logs = group_logs_by_resource_and_scope(logs, &self.resource); let req = ExportLogsServiceRequest { resource_logs }; @@ -332,7 +327,7 @@ impl OtlpHttpClient { #[cfg(feature = "http-json")] Protocol::HttpJson => match serde_json::to_string_pretty(&req) { Ok(json) => Ok((json.into(), "application/json")), - Err(e) => Err(opentelemetry::logs::LogError::from(e.to_string())), + Err(e) => Err(opentelemetry_sdk::logs::LogError::from(e.to_string())), }, _ => Ok((req.encode_to_vec(), "application/x-protobuf")), } @@ -341,8 +336,8 @@ impl OtlpHttpClient { #[cfg(feature = "metrics")] fn build_metrics_export_body( &self, - metrics: &mut ResourceMetrics, - ) -> opentelemetry::metrics::Result<(Vec, &'static str)> { + metrics: &mut opentelemetry_sdk::metrics::data::ResourceMetrics, + ) -> opentelemetry_sdk::metrics::MetricResult<(Vec, &'static str)> { use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest; let req: ExportMetricsServiceRequest = (&*metrics).into(); @@ -351,7 +346,9 @@ impl OtlpHttpClient { #[cfg(feature = "http-json")] Protocol::HttpJson => match serde_json::to_string_pretty(&req) { Ok(json) => Ok((json.into(), "application/json")), - Err(e) => Err(opentelemetry::metrics::MetricsError::Other(e.to_string())), + Err(e) => Err(opentelemetry_sdk::metrics::MetricError::Other( + e.to_string(), + )), }, _ => Ok((req.encode_to_vec(), "application/x-protobuf")), } @@ -371,7 +368,7 @@ fn build_endpoint_uri(endpoint: &str, path: &str) -> Result { fn resolve_http_endpoint( signal_endpoint_var: &str, signal_endpoint_path: &str, - provided_endpoint: &str, + provided_endpoint: Option, ) -> Result { // per signal env var is not modified if let Some(endpoint) = env::var(signal_endpoint_var) @@ -389,14 +386,14 @@ fn resolve_http_endpoint( return Ok(endpoint); } - if provided_endpoint.is_empty() { - build_endpoint_uri( - OTEL_EXPORTER_OTLP_HTTP_ENDPOINT_DEFAULT, - signal_endpoint_path, - ) - } else { - provided_endpoint.parse().map_err(From::from) - } + provided_endpoint + .map(|e| e.parse().map_err(From::from)) + .unwrap_or_else(|| { + build_endpoint_uri( + OTEL_EXPORTER_OTLP_HTTP_ENDPOINT_DEFAULT, + signal_endpoint_path, + ) + }) } #[allow(clippy::mutable_key_type)] // http headers are not mutated @@ -409,11 +406,64 @@ fn add_header_from_string(input: &str, headers: &mut HashMap &mut HttpConfig; +} + +/// Expose interface for modifying builder config. +impl HasHttpConfig for HttpExporterBuilder { + fn http_client_config(&mut self) -> &mut HttpConfig { + &mut self.http_config + } +} + +/// This trait will be implemented for every struct that implemented [`HasHttpConfig`] trait. +/// +/// ## Examples +/// ``` +/// # #[cfg(all(feature = "trace", feature = "grpc-tonic"))] +/// # { +/// use crate::opentelemetry_otlp::WithHttpConfig; +/// let exporter_builder = opentelemetry_otlp::SpanExporter::builder() +/// .with_http() +/// .with_headers(std::collections::HashMap::new()); +/// # } +/// ``` +pub trait WithHttpConfig { + /// Assign client implementation + fn with_http_client(self, client: T) -> Self; + + /// Set additional headers to send to the collector. + fn with_headers(self, headers: HashMap) -> Self; +} + +impl WithHttpConfig for B { + fn with_http_client(mut self, client: T) -> Self { + self.http_client_config().client = Some(Arc::new(client)); + self + } + + fn with_headers(mut self, headers: HashMap) -> Self { + // headers will be wrapped, so we must do some logic to unwrap first. + self.http_client_config() + .headers + .iter_mut() + .zip(headers) + .for_each(|(http_client_headers, (key, value))| { + http_client_headers.insert(key, super::url_decode(&value).unwrap_or(value)); + }); + self + } +} + #[cfg(test)] mod tests { + use crate::exporter::http::HttpConfig; use crate::exporter::tests::run_env_test; use crate::{ - new_exporter, WithExportConfig, OTEL_EXPORTER_OTLP_ENDPOINT, + HttpExporterBuilder, WithExportConfig, WithHttpConfig, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, }; @@ -427,7 +477,7 @@ mod tests { let endpoint = resolve_http_endpoint( OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "/v1/traces", - "http://localhost:4317", + Some("http://localhost:4317".to_string()), ) .unwrap(); assert_eq!(endpoint, "http://example.com/v1/traces"); @@ -443,7 +493,7 @@ mod tests { let endpoint = super::resolve_http_endpoint( OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "/v1/traces", - "http://localhost:4317", + Some("http://localhost:4317".to_string()), ) .unwrap(); assert_eq!(endpoint, "http://example.com"); @@ -462,7 +512,7 @@ mod tests { let endpoint = super::resolve_http_endpoint( OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "/v1/traces", - "http://localhost:4317", + Some("http://localhost:4317".to_string()), ) .unwrap(); assert_eq!(endpoint, "http://example.com"); @@ -476,7 +526,7 @@ mod tests { let endpoint = super::resolve_http_endpoint( "NON_EXISTENT_VAR", "/v1/traces", - "http://localhost:4317", + Some("http://localhost:4317".to_string()), ) .unwrap(); assert_eq!(endpoint, "http://localhost:4317/"); @@ -511,7 +561,7 @@ mod tests { let endpoint = super::resolve_http_endpoint( OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "/v1/traces", - "http://localhost:4317", + Some("http://localhost:4317".to_string()), ) .unwrap(); assert_eq!(endpoint, "http://example.com/v1/traces"); @@ -525,7 +575,7 @@ mod tests { let result = super::resolve_http_endpoint( OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "/v1/traces", - "-*/*-/*-//-/-/yet-another-invalid-uri", + Some("-*/*-/*-//-/-/yet-another-invalid-uri".to_string()), ); assert!(result.is_err()); // You may also want to assert on the specific error type if applicable @@ -618,16 +668,51 @@ mod tests { } } + #[test] + fn test_http_exporter_builder_with_header() { + use std::collections::HashMap; + // Arrange + let initial_headers = HashMap::from([("k1".to_string(), "v1".to_string())]); + let extra_headers = HashMap::from([("k2".to_string(), "v2".to_string())]); + let expected_headers = initial_headers.iter().chain(extra_headers.iter()).fold( + HashMap::new(), + |mut acc, (k, v)| { + acc.insert(k.clone(), v.clone()); + acc + }, + ); + let builder = HttpExporterBuilder { + http_config: HttpConfig { + client: None, + headers: Some(initial_headers), + }, + exporter_config: crate::ExportConfig::default(), + }; + + // Act + let builder = builder.with_headers(extra_headers); + + // Assert + assert_eq!( + builder + .http_config + .headers + .clone() + .expect("headers should always be Some"), + expected_headers, + ); + } + #[test] fn test_http_exporter_endpoint() { // default endpoint should add signal path run_env_test(vec![], || { - let exporter = new_exporter().http(); + let exporter = HttpExporterBuilder::default(); let url = resolve_http_endpoint( OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "/v1/traces", - exporter.exporter_config.endpoint.as_str(), + exporter.exporter_config.endpoint, ) .unwrap(); @@ -636,14 +721,13 @@ mod tests { // if builder endpoint is set, it should not add signal path run_env_test(vec![], || { - let exporter = new_exporter() - .http() + let exporter = HttpExporterBuilder::default() .with_endpoint("http://localhost:4318/v1/tracesbutnotreally"); let url = resolve_http_endpoint( OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, "/v1/traces", - exporter.exporter_config.endpoint.as_str(), + exporter.exporter_config.endpoint, ) .unwrap(); diff --git a/opentelemetry-otlp/src/exporter/http/trace.rs b/opentelemetry-otlp/src/exporter/http/trace.rs index cc7894c266..d188dc8911 100644 --- a/opentelemetry-otlp/src/exporter/http/trace.rs +++ b/opentelemetry-otlp/src/exporter/http/trace.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use futures_core::future::BoxFuture; use http::{header::CONTENT_TYPE, Method}; -use opentelemetry::trace::TraceError; +use opentelemetry::{otel_debug, trace::TraceError}; use opentelemetry_sdk::export::trace::{ExportResult, SpanData, SpanExporter}; use super::OtlpHttpClient; @@ -47,6 +47,7 @@ impl SpanExporter for OtlpHttpClient { Box::pin(async move { let request_uri = request.uri().to_string(); + otel_debug!(name: "HttpTracesClient.CallingExport"); let response = client.send(request).await?; if !response.status().is_success() { diff --git a/opentelemetry-otlp/src/exporter/mod.rs b/opentelemetry-otlp/src/exporter/mod.rs index d138527d8f..bec1c809bb 100644 --- a/opentelemetry-otlp/src/exporter/mod.rs +++ b/opentelemetry-otlp/src/exporter/mod.rs @@ -69,7 +69,7 @@ pub(crate) mod tonic; pub struct ExportConfig { /// The address of the OTLP collector. If it's not provided via builder or environment variables. /// Default address will be used based on the protocol. - pub endpoint: String, + pub endpoint: Option, /// The protocol to use when communicating with the collector. pub protocol: Protocol, @@ -83,7 +83,7 @@ impl Default for ExportConfig { let protocol = default_protocol(); ExportConfig { - endpoint: "".to_string(), + endpoint: None, // don't use default_endpoint(protocol) here otherwise we // won't know if user provided a value protocol, @@ -144,12 +144,13 @@ fn default_headers() -> std::collections::HashMap { headers } -/// Provide access to the export config field within the exporter builders. +/// Provide access to the [ExportConfig] field within the exporter builders. pub trait HasExportConfig { - /// Return a mutable reference to the export config within the exporter builders. + /// Return a mutable reference to the [ExportConfig] within the exporter builders. fn export_config(&mut self) -> &mut ExportConfig; } +/// Provide [ExportConfig] access to the [TonicExporterBuilder]. #[cfg(feature = "grpc-tonic")] impl HasExportConfig for TonicExporterBuilder { fn export_config(&mut self) -> &mut ExportConfig { @@ -157,6 +158,7 @@ impl HasExportConfig for TonicExporterBuilder { } } +/// Provide [ExportConfig] access to the [HttpExporterBuilder]. #[cfg(any(feature = "http-proto", feature = "http-json"))] impl HasExportConfig for HttpExporterBuilder { fn export_config(&mut self) -> &mut ExportConfig { @@ -164,7 +166,7 @@ impl HasExportConfig for HttpExporterBuilder { } } -/// Expose methods to override export configuration. +/// Expose methods to override [ExportConfig]. /// /// This trait will be implemented for every struct that implemented [`HasExportConfig`] trait. /// @@ -173,8 +175,8 @@ impl HasExportConfig for HttpExporterBuilder { /// # #[cfg(all(feature = "trace", feature = "grpc-tonic"))] /// # { /// use crate::opentelemetry_otlp::WithExportConfig; -/// let exporter_builder = opentelemetry_otlp::new_exporter() -/// .tonic() +/// let exporter_builder = opentelemetry_otlp::SpanExporter::builder() +/// .with_tonic() /// .with_endpoint("http://localhost:7201"); /// # } /// ``` @@ -183,11 +185,11 @@ pub trait WithExportConfig { fn with_endpoint>(self, endpoint: T) -> Self; /// Set the protocol to use when communicating with the collector. /// - /// Note that protocols that are not supported by exporters will be ignore. The exporter + /// Note that protocols that are not supported by exporters will be ignored. The exporter /// will use default protocol in this case. /// /// ## Note - /// All exporters in this crate are only support one protocol thus choosing the protocol is an no-op at the moment + /// All exporters in this crate only support one protocol, thus choosing the protocol is an no-op at the moment. fn with_protocol(self, protocol: Protocol) -> Self; /// Set the timeout to the collector. fn with_timeout(self, timeout: Duration) -> Self; @@ -197,7 +199,7 @@ pub trait WithExportConfig { impl WithExportConfig for B { fn with_endpoint>(mut self, endpoint: T) -> Self { - self.export_config().endpoint = endpoint.into(); + self.export_config().endpoint = Some(endpoint.into()); self } @@ -291,17 +293,17 @@ mod tests { #[cfg(any(feature = "http-proto", feature = "http-json"))] #[test] fn test_default_http_endpoint() { - let exporter_builder = crate::new_exporter().http(); + let exporter_builder = crate::HttpExporterBuilder::default(); - assert_eq!(exporter_builder.exporter_config.endpoint, ""); + assert_eq!(exporter_builder.exporter_config.endpoint, None); } #[cfg(feature = "grpc-tonic")] #[test] fn test_default_tonic_endpoint() { - let exporter_builder = crate::new_exporter().tonic(); + let exporter_builder = crate::TonicExporterBuilder::default(); - assert_eq!(exporter_builder.exporter_config.endpoint, ""); + assert_eq!(exporter_builder.exporter_config.endpoint, None); } #[test] diff --git a/opentelemetry-otlp/src/exporter/tonic/logs.rs b/opentelemetry-otlp/src/exporter/tonic/logs.rs index bf9b6c9ed3..2737c2bd99 100644 --- a/opentelemetry-otlp/src/exporter/tonic/logs.rs +++ b/opentelemetry-otlp/src/exporter/tonic/logs.rs @@ -1,15 +1,17 @@ use async_trait::async_trait; use core::fmt; -use opentelemetry::logs::{LogError, LogResult}; +use opentelemetry::otel_debug; use opentelemetry_proto::tonic::collector::logs::v1::{ logs_service_client::LogsServiceClient, ExportLogsServiceRequest, }; use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; +use opentelemetry_sdk::logs::{LogError, LogResult}; use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request}; use opentelemetry_proto::transform::logs::tonic::group_logs_by_resource_and_scope; use super::BoxInterceptor; +use tokio::sync::Mutex; pub(crate) struct TonicLogsClient { inner: Option, @@ -20,7 +22,7 @@ pub(crate) struct TonicLogsClient { struct ClientInner { client: LogsServiceClient, - interceptor: BoxInterceptor, + interceptor: Mutex, } impl fmt::Debug for TonicLogsClient { @@ -42,10 +44,12 @@ impl TonicLogsClient { .accept_compressed(compression); } + otel_debug!(name: "TonicsLogsClientBuilt"); + TonicLogsClient { inner: Some(ClientInner { client, - interceptor, + interceptor: Mutex::new(interceptor), }), resource: Default::default(), } @@ -54,11 +58,13 @@ impl TonicLogsClient { #[async_trait] impl LogExporter for TonicLogsClient { - async fn export(&mut self, batch: LogBatch<'_>) -> LogResult<()> { - let (mut client, metadata, extensions) = match &mut self.inner { + async fn export(&self, batch: LogBatch<'_>) -> LogResult<()> { + let (mut client, metadata, extensions) = match &self.inner { Some(inner) => { let (m, e, _) = inner .interceptor + .lock() + .await // tokio::sync::Mutex doesn't return a poisoned error, so we can safely use the interceptor here .call(Request::new(())) .map_err(|e| LogError::Other(Box::new(e)))? .into_parts(); @@ -69,6 +75,8 @@ impl LogExporter for TonicLogsClient { let resource_logs = group_logs_by_resource_and_scope(batch, &self.resource); + otel_debug!(name: "TonicsLogsClient.CallingExport"); + client .export(Request::from_parts( metadata, diff --git a/opentelemetry-otlp/src/exporter/tonic/metrics.rs b/opentelemetry-otlp/src/exporter/tonic/metrics.rs index 97040d3201..8a938885a3 100644 --- a/opentelemetry-otlp/src/exporter/tonic/metrics.rs +++ b/opentelemetry-otlp/src/exporter/tonic/metrics.rs @@ -2,11 +2,12 @@ use core::fmt; use std::sync::Mutex; use async_trait::async_trait; -use opentelemetry::metrics::{MetricsError, Result}; +use opentelemetry::otel_debug; use opentelemetry_proto::tonic::collector::metrics::v1::{ metrics_service_client::MetricsServiceClient, ExportMetricsServiceRequest, }; use opentelemetry_sdk::metrics::data::ResourceMetrics; +use opentelemetry_sdk::metrics::{MetricError, MetricResult}; use tonic::{codegen::CompressionEncoding, service::Interceptor, transport::Channel, Request}; use super::BoxInterceptor; @@ -40,6 +41,8 @@ impl TonicMetricsClient { .accept_compressed(compression); } + otel_debug!(name: "TonicsMetricsClientBuilt"); + TonicMetricsClient { inner: Mutex::new(Some(ClientInner { client, @@ -51,7 +54,7 @@ impl TonicMetricsClient { #[async_trait] impl MetricsClient for TonicMetricsClient { - async fn export(&self, metrics: &mut ResourceMetrics) -> Result<()> { + async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()> { let (mut client, metadata, extensions) = self.inner .lock() @@ -62,16 +65,18 @@ impl MetricsClient for TonicMetricsClient { .interceptor .call(Request::new(())) .map_err(|e| { - MetricsError::Other(format!( + MetricError::Other(format!( "unexpected status while exporting {e:?}" )) })? .into_parts(); Ok((inner.client.clone(), m, e)) } - None => Err(MetricsError::Other("exporter is already shut down".into())), + None => Err(MetricError::Other("exporter is already shut down".into())), })?; + otel_debug!(name: "TonicsMetricsClient.CallingExport"); + client .export(Request::from_parts( metadata, @@ -84,7 +89,7 @@ impl MetricsClient for TonicMetricsClient { Ok(()) } - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { let _ = self.inner.lock()?.take(); Ok(()) diff --git a/opentelemetry-otlp/src/exporter/tonic/mod.rs b/opentelemetry-otlp/src/exporter/tonic/mod.rs index af5a5ca7de..99d2e8fa15 100644 --- a/opentelemetry-otlp/src/exporter/tonic/mod.rs +++ b/opentelemetry-otlp/src/exporter/tonic/mod.rs @@ -4,6 +4,7 @@ use std::str::FromStr; use std::time::Duration; use http::{HeaderMap, HeaderName, HeaderValue}; +use opentelemetry::otel_debug; use tonic::codec::CompressionEncoding; use tonic::metadata::{KeyAndValueRef, MetadataMap}; use tonic::service::Interceptor; @@ -25,7 +26,7 @@ mod logs; mod metrics; #[cfg(feature = "trace")] -mod trace; +pub(crate) mod trace; /// Configuration for [tonic] /// @@ -34,14 +35,14 @@ mod trace; #[non_exhaustive] pub struct TonicConfig { /// Custom metadata entries to send to the collector. - pub metadata: Option, - + pub(crate) metadata: Option, /// TLS settings for the collector endpoint. #[cfg(feature = "tls")] - pub tls_config: Option, - + pub(crate) tls_config: Option, /// The compression algorithm to use when communicating with the collector. - pub compression: Option, + pub(crate) compression: Option, + pub(crate) channel: Option, + pub(crate) interceptor: Option, } impl TryFrom for tonic::codec::CompressionEncoding { @@ -67,21 +68,6 @@ impl TryFrom for tonic::codec::CompressionEncoding { } } -fn resolve_compression( - tonic_config: &TonicConfig, - env_override: &str, -) -> Result, crate::Error> { - if let Some(compression) = tonic_config.compression { - Ok(Some(compression.try_into()?)) - } else if let Ok(compression) = env::var(env_override) { - Ok(Some(compression.parse::()?.try_into()?)) - } else if let Ok(compression) = env::var(OTEL_EXPORTER_OTLP_COMPRESSION) { - Ok(Some(compression.parse::()?.try_into()?)) - } else { - Ok(None) - } -} - /// Configuration for the [tonic] OTLP GRPC exporter. /// /// It allows you to @@ -96,35 +82,30 @@ fn resolve_compression( /// /// ```no_run /// # #[cfg(feature="metrics")] -/// use opentelemetry_sdk::metrics::reader::{ -/// DefaultTemporalitySelector, -/// }; +/// use opentelemetry_sdk::metrics::Temporality; /// /// # fn main() -> Result<(), Box> { /// // Create a span exporter you can use to when configuring tracer providers /// # #[cfg(feature="trace")] -/// let span_exporter = opentelemetry_otlp::new_exporter().tonic().build_span_exporter()?; +/// let span_exporter = opentelemetry_otlp::SpanExporter::builder().with_tonic().build()?; /// -/// // Create a metrics exporter you can use when configuring meter providers +/// // Create a metric exporter you can use when configuring meter providers /// # #[cfg(feature="metrics")] -/// let metrics_exporter = opentelemetry_otlp::new_exporter() -/// .tonic() -/// .build_metrics_exporter( -/// Box::new(DefaultTemporalitySelector::new()), -/// )?; +/// let metric_exporter = opentelemetry_otlp::MetricExporter::builder() +/// .with_tonic() +/// .with_temporality(Temporality::default()) +/// .build()?; /// /// // Create a log exporter you can use when configuring logger providers /// # #[cfg(feature="logs")] -/// let log_exporter = opentelemetry_otlp::new_exporter().tonic().build_log_exporter()?; +/// let log_exporter = opentelemetry_otlp::LogExporter::builder().with_tonic().build()?; /// # Ok(()) /// # } /// ``` #[derive(Debug)] pub struct TonicExporterBuilder { - pub(crate) exporter_config: ExportConfig, pub(crate) tonic_config: TonicConfig, - pub(crate) channel: Option, - pub(crate) interceptor: Option, + pub(crate) exporter_config: ExportConfig, } pub(crate) struct BoxInterceptor(Box); @@ -142,80 +123,28 @@ impl Debug for BoxInterceptor { impl Default for TonicExporterBuilder { fn default() -> Self { - let tonic_config = TonicConfig { - metadata: Some(MetadataMap::from_headers( - (&default_headers()) - .try_into() - .expect("Invalid tonic headers"), - )), - #[cfg(feature = "tls")] - tls_config: None, - compression: None, - }; - TonicExporterBuilder { + tonic_config: TonicConfig { + metadata: Some(MetadataMap::from_headers( + (&default_headers()) + .try_into() + .expect("Invalid tonic headers"), + )), + #[cfg(feature = "tls")] + tls_config: None, + compression: None, + channel: Option::default(), + interceptor: Option::default(), + }, exporter_config: ExportConfig { protocol: crate::Protocol::Grpc, ..Default::default() }, - tonic_config, - channel: Option::default(), - interceptor: Option::default(), } } } impl TonicExporterBuilder { - /// Set the TLS settings for the collector endpoint. - #[cfg(feature = "tls")] - pub fn with_tls_config(mut self, tls_config: ClientTlsConfig) -> Self { - self.tonic_config.tls_config = Some(tls_config); - self - } - - /// Set custom metadata entries to send to the collector. - pub fn with_metadata(mut self, metadata: MetadataMap) -> Self { - // extending metadata maps is harder than just casting back/forth - let incoming_headers = metadata.into_headers(); - let mut existing_headers = self - .tonic_config - .metadata - .unwrap_or_default() - .into_headers(); - existing_headers.extend(incoming_headers); - - self.tonic_config.metadata = Some(MetadataMap::from_headers(existing_headers)); - self - } - - /// Set the compression algorithm to use when communicating with the collector. - pub fn with_compression(mut self, compression: Compression) -> Self { - self.tonic_config.compression = Some(compression); - self - } - - /// Use `channel` as tonic's transport channel. - /// this will override tls config and should only be used - /// when working with non-HTTP transports. - /// - /// Users MUST make sure the [`ExportConfig::timeout`] is - /// the same as the channel's timeout. - pub fn with_channel(mut self, channel: tonic::transport::Channel) -> Self { - self.channel = Some(channel); - self - } - - /// Use a custom `interceptor` to modify each outbound request. - /// this can be used to modify the grpc metadata, for example - /// to inject auth tokens. - pub fn with_interceptor(mut self, interceptor: I) -> Self - where - I: tonic::service::Interceptor + Clone + Send + Sync + 'static, - { - self.interceptor = Some(BoxInterceptor(Box::new(interceptor))); - self - } - fn build_channel( self, signal_endpoint_var: &str, @@ -223,12 +152,11 @@ impl TonicExporterBuilder { signal_compression_var: &str, signal_headers_var: &str, ) -> Result<(Channel, BoxInterceptor, Option), crate::Error> { - let tonic_config = self.tonic_config; - let compression = resolve_compression(&tonic_config, signal_compression_var)?; + let compression = self.resolve_compression(signal_compression_var)?; - let headers_from_env = parse_headers_from_env(signal_headers_var); + let (headers_from_env, headers_for_logging) = parse_headers_from_env(signal_headers_var); let metadata = merge_metadata_with_headers_from_env( - tonic_config.metadata.unwrap_or_default(), + self.tonic_config.metadata.unwrap_or_default(), headers_from_env, ); @@ -247,7 +175,7 @@ impl TonicExporterBuilder { Ok(req) }; - let interceptor = match self.interceptor { + let interceptor = match self.tonic_config.interceptor { Some(mut interceptor) => { BoxInterceptor(Box::new(move |req| interceptor.call(add_metadata(req)?))) } @@ -255,30 +183,16 @@ impl TonicExporterBuilder { }; // If a custom channel was provided, use that channel instead of creating one - if let Some(channel) = self.channel { + if let Some(channel) = self.tonic_config.channel { return Ok((channel, interceptor, compression)); } let config = self.exporter_config; - // resolving endpoint string - // grpc doesn't have a "path" like http(See https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md) - // the path of grpc calls are based on the protobuf service definition - // so we won't append one for default grpc endpoints - // If users for some reason want to use a custom path, they can use env var or builder to pass it - let endpoint = match env::var(signal_endpoint_var) - .ok() - .or(env::var(OTEL_EXPORTER_OTLP_ENDPOINT).ok()) - { - Some(val) => val, - None => { - if config.endpoint.is_empty() { - OTEL_EXPORTER_OTLP_GRPC_ENDPOINT_DEFAULT.to_string() - } else { - config.endpoint - } - } - }; + let endpoint = Self::resolve_endpoint(signal_endpoint_var, config.endpoint); + + // Used for logging the endpoint + let endpoint_clone = endpoint.clone(); let endpoint = Channel::from_shared(endpoint).map_err(crate::Error::from)?; let timeout = match env::var(signal_timeout_var) @@ -293,7 +207,7 @@ impl TonicExporterBuilder { }; #[cfg(feature = "tls")] - let channel = match tonic_config.tls_config { + let channel = match self.tonic_config.tls_config { Some(tls_config) => endpoint .tls_config(tls_config) .map_err(crate::Error::from)?, @@ -305,16 +219,51 @@ impl TonicExporterBuilder { #[cfg(not(feature = "tls"))] let channel = endpoint.timeout(timeout).connect_lazy(); + otel_debug!(name: "TonicChannelBuilt", endpoint = endpoint_clone, timeout_in_millisecs = timeout.as_millis(), compression = format!("{:?}", compression), headers = format!("{:?}", headers_for_logging)); Ok((channel, interceptor, compression)) } + fn resolve_endpoint(default_endpoint_var: &str, provided_endpoint: Option) -> String { + // resolving endpoint string + // grpc doesn't have a "path" like http(See https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md) + // the path of grpc calls are based on the protobuf service definition + // so we won't append one for default grpc endpoints + // If users for some reason want to use a custom path, they can use env var or builder to pass it + match env::var(default_endpoint_var) + .ok() + .or(env::var(OTEL_EXPORTER_OTLP_ENDPOINT).ok()) + { + Some(val) => val, + None => { + provided_endpoint.unwrap_or(OTEL_EXPORTER_OTLP_GRPC_ENDPOINT_DEFAULT.to_string()) + } + } + } + + fn resolve_compression( + &self, + env_override: &str, + ) -> Result, crate::Error> { + if let Some(compression) = self.tonic_config.compression { + Ok(Some(compression.try_into()?)) + } else if let Ok(compression) = env::var(env_override) { + Ok(Some(compression.parse::()?.try_into()?)) + } else if let Ok(compression) = env::var(OTEL_EXPORTER_OTLP_COMPRESSION) { + Ok(Some(compression.parse::()?.try_into()?)) + } else { + Ok(None) + } + } + /// Build a new tonic log exporter #[cfg(feature = "logs")] - pub fn build_log_exporter( + pub(crate) fn build_log_exporter( self, - ) -> Result { + ) -> Result { use crate::exporter::tonic::logs::TonicLogsClient; + otel_debug!(name: "LogsTonicChannelBuilding"); + let (channel, interceptor, compression) = self.build_channel( crate::logs::OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, crate::logs::OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, @@ -329,13 +278,15 @@ impl TonicExporterBuilder { /// Build a new tonic metrics exporter #[cfg(feature = "metrics")] - pub fn build_metrics_exporter( + pub(crate) fn build_metrics_exporter( self, - temporality_selector: Box, - ) -> opentelemetry::metrics::Result { - use crate::MetricsExporter; + temporality: opentelemetry_sdk::metrics::Temporality, + ) -> opentelemetry_sdk::metrics::MetricResult { + use crate::MetricExporter; use metrics::TonicMetricsClient; + otel_debug!(name: "MetricsTonicChannelBuilding"); + let (channel, interceptor, compression) = self.build_channel( crate::metric::OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, crate::metric::OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, @@ -345,16 +296,18 @@ impl TonicExporterBuilder { let client = TonicMetricsClient::new(channel, interceptor, compression); - Ok(MetricsExporter::new(client, temporality_selector)) + Ok(MetricExporter::new(client, temporality)) } /// Build a new tonic span exporter #[cfg(feature = "trace")] - pub fn build_span_exporter( + pub(crate) fn build_span_exporter( self, ) -> Result { use crate::exporter::tonic::trace::TonicTracesClient; + otel_debug!(name: "TracesTonicChannelBuilding"); + let (channel, interceptor, compression) = self.build_channel( crate::span::OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, crate::span::OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, @@ -382,28 +335,130 @@ fn merge_metadata_with_headers_from_env( } } -fn parse_headers_from_env(signal_headers_var: &str) -> HeaderMap { - env::var(signal_headers_var) - .or_else(|_| env::var(OTEL_EXPORTER_OTLP_HEADERS)) - .map(|input| { - parse_header_string(&input) - .filter_map(|(key, value)| { - Some(( - HeaderName::from_str(key).ok()?, - HeaderValue::from_str(&value).ok()?, - )) - }) - .collect::() - }) - .unwrap_or_default() +fn parse_headers_from_env(signal_headers_var: &str) -> (HeaderMap, Vec<(String, String)>) { + let mut headers = Vec::new(); + + ( + env::var(signal_headers_var) + .or_else(|_| env::var(OTEL_EXPORTER_OTLP_HEADERS)) + .map(|input| { + parse_header_string(&input) + .filter_map(|(key, value)| { + headers.push((key.to_owned(), value.clone())); + Some(( + HeaderName::from_str(key).ok()?, + HeaderValue::from_str(&value).ok()?, + )) + }) + .collect::() + }) + .unwrap_or_default(), + headers, + ) +} + +/// Expose interface for modifying [TonicConfig] fields within the exporter builders. +pub trait HasTonicConfig { + /// Return a mutable reference to the export config within the exporter builders. + fn tonic_config(&mut self) -> &mut TonicConfig; +} + +/// Expose interface for modifying [TonicConfig] fields within the [TonicExporterBuilder]. +impl HasTonicConfig for TonicExporterBuilder { + fn tonic_config(&mut self) -> &mut TonicConfig { + &mut self.tonic_config + } +} + +/// Expose methods to override [TonicConfig]. +/// +/// This trait will be implemented for every struct that implemented [`HasTonicConfig`] trait. +/// +/// ## Examples +/// ``` +/// # #[cfg(all(feature = "trace", feature = "grpc-tonic"))] +/// # { +/// use crate::opentelemetry_otlp::{WithExportConfig, WithTonicConfig}; +/// let exporter_builder = opentelemetry_otlp::SpanExporter::builder() +/// .with_tonic() +/// .with_compression(opentelemetry_otlp::Compression::Gzip); +/// # } +/// ``` +pub trait WithTonicConfig { + /// Set the TLS settings for the collector endpoint. + #[cfg(feature = "tls")] + fn with_tls_config(self, tls_config: ClientTlsConfig) -> Self; + + /// Set custom metadata entries to send to the collector. + fn with_metadata(self, metadata: MetadataMap) -> Self; + + /// Set the compression algorithm to use when communicating with the collector. + fn with_compression(self, compression: Compression) -> Self; + + /// Use `channel` as tonic's transport channel. + /// this will override tls config and should only be used + /// when working with non-HTTP transports. + /// + /// Users MUST make sure the [`ExportConfig::timeout`] is + /// the same as the channel's timeout. + fn with_channel(self, channel: tonic::transport::Channel) -> Self; + + /// Use a custom `interceptor` to modify each outbound request. + /// this can be used to modify the grpc metadata, for example + /// to inject auth tokens. + fn with_interceptor(self, interceptor: I) -> Self + where + I: tonic::service::Interceptor + Clone + Send + Sync + 'static; +} + +impl WithTonicConfig for B { + #[cfg(feature = "tls")] + fn with_tls_config(mut self, tls_config: ClientTlsConfig) -> Self { + self.tonic_config().tls_config = Some(tls_config); + self + } + + /// Set custom metadata entries to send to the collector. + fn with_metadata(mut self, metadata: MetadataMap) -> Self { + // extending metadata maps is harder than just casting back/forth + let mut existing_headers = self + .tonic_config() + .metadata + .clone() + .unwrap_or_default() + .into_headers(); + existing_headers.extend(metadata.into_headers()); + + self.tonic_config().metadata = Some(MetadataMap::from_headers(existing_headers)); + self + } + + fn with_compression(mut self, compression: Compression) -> Self { + self.tonic_config().compression = Some(compression); + self + } + + fn with_channel(mut self, channel: tonic::transport::Channel) -> Self { + self.tonic_config().channel = Some(channel); + self + } + + fn with_interceptor(mut self, interceptor: I) -> Self + where + I: tonic::service::Interceptor + Clone + Send + Sync + 'static, + { + self.tonic_config().interceptor = Some(BoxInterceptor(Box::new(interceptor))); + self + } } #[cfg(test)] mod tests { use crate::exporter::tests::run_env_test; + use crate::exporter::tonic::WithTonicConfig; #[cfg(feature = "grpc-tonic")] use crate::exporter::Compression; - use crate::TonicExporterBuilder; + use crate::{TonicExporterBuilder, WithExportConfig, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT}; use crate::{OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS}; use http::{HeaderMap, HeaderName, HeaderValue}; use tonic::metadata::{MetadataMap, MetadataValue}; @@ -415,7 +470,9 @@ mod tests { metadata.insert("foo", "bar".parse().unwrap()); let builder = TonicExporterBuilder::default().with_metadata(metadata); let result = builder.tonic_config.metadata.unwrap(); - let foo = result.get("foo").unwrap(); + let foo = result + .get("foo") + .expect("there to always be an entry for foo"); assert_eq!(foo, &MetadataValue::try_from("bar").unwrap()); assert!(result.get("User-Agent").is_some()); @@ -456,7 +513,6 @@ mod tests { } #[test] - #[cfg(feature = "grpc-tonic")] fn test_convert_compression() { #[cfg(feature = "gzip-tonic")] assert!(tonic::codec::CompressionEncoding::try_from(Compression::Gzip).is_ok()); @@ -477,7 +533,7 @@ mod tests { ], || { assert_eq!( - super::parse_headers_from_env(OTEL_EXPORTER_OTLP_TRACES_HEADERS), + super::parse_headers_from_env(OTEL_EXPORTER_OTLP_TRACES_HEADERS).0, HeaderMap::from_iter([ ( HeaderName::from_static("k1"), @@ -491,7 +547,7 @@ mod tests { ); assert_eq!( - super::parse_headers_from_env("EMPTY_ENV"), + super::parse_headers_from_env("EMPTY_ENV").0, HeaderMap::from_iter([( HeaderName::from_static("k3"), HeaderValue::from_static("v3") @@ -514,7 +570,7 @@ mod tests { metadata.insert("k1", "v0".parse().unwrap()); let result = - super::merge_metadata_with_headers_from_env(metadata, headers_from_env); + super::merge_metadata_with_headers_from_env(metadata, headers_from_env.0); assert_eq!( result.get("foo").unwrap(), @@ -525,4 +581,31 @@ mod tests { }, ); } + + #[test] + fn test_tonic_exporter_endpoint() { + // default endpoint for grpc should not add signal path. + run_env_test(vec![], || { + let exporter = TonicExporterBuilder::default(); + + let url = TonicExporterBuilder::resolve_endpoint( + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, + exporter.exporter_config.endpoint, + ); + + assert_eq!(url, "http://localhost:4317"); + }); + + // if builder endpoint is set, it should not use default. + run_env_test(vec![], || { + let exporter = TonicExporterBuilder::default().with_endpoint("http://localhost:1234"); + + let url = TonicExporterBuilder::resolve_endpoint( + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, + exporter.exporter_config.endpoint, + ); + + assert_eq!(url, "http://localhost:1234"); + }); + } } diff --git a/opentelemetry-otlp/src/exporter/tonic/trace.rs b/opentelemetry-otlp/src/exporter/tonic/trace.rs index a4d12ebde7..998acafad5 100644 --- a/opentelemetry-otlp/src/exporter/tonic/trace.rs +++ b/opentelemetry-otlp/src/exporter/tonic/trace.rs @@ -1,7 +1,7 @@ use core::fmt; use futures_core::future::BoxFuture; -use opentelemetry::trace::TraceError; +use opentelemetry::{otel_debug, trace::TraceError}; use opentelemetry_proto::tonic::collector::trace::v1::{ trace_service_client::TraceServiceClient, ExportTraceServiceRequest, }; @@ -43,6 +43,8 @@ impl TonicTracesClient { .accept_compressed(compression); } + otel_debug!(name: "TonicsTracesClientBuilt"); + TonicTracesClient { inner: Some(ClientInner { client, @@ -74,6 +76,8 @@ impl SpanExporter for TonicTracesClient { let resource_spans = group_spans_by_resource_and_scope(batch, &self.resource); + otel_debug!(name: "TonicsTracesClient.CallingExport"); + Box::pin(async move { client .export(Request::from_parts( diff --git a/opentelemetry-otlp/src/lib.rs b/opentelemetry-otlp/src/lib.rs index 1aad8f8677..e25638e48a 100644 --- a/opentelemetry-otlp/src/lib.rs +++ b/opentelemetry-otlp/src/lib.rs @@ -21,12 +21,15 @@ //! $ docker run -p 4317:4317 otel/opentelemetry-collector:latest //! ``` //! -//! Then install a new pipeline with the recommended defaults to start exporting -//! telemetry. You will have to build a OTLP exporter first. +//! Then create a new `Exporter`, and `Provider` with the recommended defaults to start exporting +//! telemetry. //! -//! Exporting pipelines can be started with `new_pipeline().tracing()` and -//! `new_pipeline().metrics()`, and `new_pipeline().logging()` respectively for -//! traces, metrics and logs. +//! You will have to build a OTLP exporter first. Create the correct exporter based on the signal +//! you are looking to export `SpanExporter::builder()`, `MetricExporter::builder()`, +//! `LogExporter::builder()` respectively for traces, metrics, and logs. +//! +//! Once you have the exporter, you can create your `Provider` by starting with `TracerProvider::builder()`, +//! `SdkMeterProvider::builder()`, and `LoggerProvider::builder()` respectively for traces, metrics, and logs. //! //! ```no_run //! # #[cfg(all(feature = "trace", feature = "grpc-tonic"))] @@ -36,12 +39,11 @@ //! //! fn main() -> Result<(), Box> { //! // First, create a OTLP exporter builder. Configure it as you need. -//! let otlp_exporter = opentelemetry_otlp::new_exporter().tonic(); -//! // Then pass it into pipeline builder -//! let _ = opentelemetry_otlp::new_pipeline() -//! .tracing() -//! .with_exporter(otlp_exporter) -//! .install_simple()?; +//! let otlp_exporter = opentelemetry_otlp::SpanExporter::builder().with_tonic().build()?; +//! // Then pass it into provider builder +//! let _ = opentelemetry_sdk::trace::TracerProvider::builder() +//! .with_simple_exporter(otlp_exporter) +//! .build(); //! let tracer = global::tracer("my_tracer"); //! tracer.in_span("doing_work", |cx| { //! // Traced app logic here... @@ -70,10 +72,15 @@ //! # #[cfg(all(feature = "trace", feature = "grpc-tonic"))] //! # { //! # fn main() -> Result<(), opentelemetry::trace::TraceError> { -//! let tracer = opentelemetry_otlp::new_pipeline() -//! .tracing() -//! .with_exporter(opentelemetry_otlp::new_exporter().tonic()) -//! .install_batch(opentelemetry_sdk::runtime::AsyncStd)?; +//! let tracer = opentelemetry_sdk::trace::TracerProvider::builder() +//! .with_batch_exporter( +//! opentelemetry_otlp::SpanExporter::builder() +//! .with_tonic() +//! .build()?, +//! opentelemetry_sdk::runtime::Tokio, +//! ) +//! .build(); +//! //! # Ok(()) //! # } //! # } @@ -100,7 +107,6 @@ //! * `grpc-tonic`: Use `tonic` as grpc layer. This is enabled by default. //! * `gzip-tonic`: Use gzip compression for `tonic` grpc layer. //! * `zstd-tonic`: Use zstd compression for `tonic` grpc layer. -//! * `tls-tonic`: Enable TLS. //! * `tls-roots`: Adds system trust roots to rustls-based gRPC clients using the rustls-native-certs crate //! * `tls-webkpi-roots`: Embeds Mozilla's trust roots to rustls-based gRPC clients using the webkpi-roots crate //! @@ -116,18 +122,18 @@ //! //! Example showing how to override all configuration options. //! -//! Generally there are two parts of configuration. One is metrics config -//! or tracing config. Users can config it via [`OtlpTracePipeline`] -//! or [`OtlpMetricPipeline`]. The other is exporting configuration. -//! Users can set those configurations using [`OtlpExporterPipeline`] based -//! on the choice of exporters. +//! Generally there are two parts of configuration. One is the exporter, the other is the provider. +//! Users can configure the exporter using [SpanExporter::builder()] for traces, +//! and [MetricExporter::builder()] + [opentelemetry_sdk::metrics::PeriodicReader::builder()] for metrics. +//! Once you have an exporter, you can add it to either a [opentelemetry_sdk::trace::TracerProvider::builder()] for traces, +//! or [opentelemetry_sdk::metrics::SdkMeterProvider::builder()] for metrics. //! //! ```no_run //! use opentelemetry::{global, KeyValue, trace::Tracer}; //! use opentelemetry_sdk::{trace::{self, RandomIdGenerator, Sampler}, Resource}; //! # #[cfg(feature = "metrics")] -//! use opentelemetry_sdk::metrics::reader::DefaultTemporalitySelector; -//! use opentelemetry_otlp::{Protocol, WithExportConfig, ExportConfig}; +//! use opentelemetry_sdk::metrics::Temporality; +//! use opentelemetry_otlp::{Protocol, WithExportConfig, WithTonicConfig}; //! use std::time::Duration; //! # #[cfg(feature = "grpc-tonic")] //! use tonic::metadata::*; @@ -140,51 +146,47 @@ //! map.insert("x-host", "example.com".parse().unwrap()); //! map.insert("x-number", "123".parse().unwrap()); //! map.insert_bin("trace-proto-bin", MetadataValue::from_bytes(b"[binary data]")); -//! -//! let tracer_provider = opentelemetry_otlp::new_pipeline() -//! .tracing() -//! .with_exporter( -//! opentelemetry_otlp::new_exporter() -//! .tonic() -//! .with_endpoint("http://localhost:4317") -//! .with_timeout(Duration::from_secs(3)) -//! .with_metadata(map) -//! ) -//! .with_trace_config( -//! trace::config() +//! let exporter = opentelemetry_otlp::SpanExporter::builder() +//! .with_tonic() +//! .with_endpoint("http://localhost:4317") +//! .with_timeout(Duration::from_secs(3)) +//! .with_metadata(map) +//! .build()?; +//! +//! let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder() +//! .with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio) +//! .with_config( +//! trace::Config::default() //! .with_sampler(Sampler::AlwaysOn) //! .with_id_generator(RandomIdGenerator::default()) //! .with_max_events_per_span(64) //! .with_max_attributes_per_span(16) //! .with_max_events_per_span(16) -//! .with_resource(Resource::new(vec![KeyValue::new("service.name", "example")])), -//! ) -//! .install_batch(opentelemetry_sdk::runtime::Tokio)?; -//! global::set_tracer_provider(tracer_provider); -//! let tracer = global::tracer("tracer-name"); +//! .with_resource(Resource::builder_empty().with_attributes([KeyValue::new("service.name", "example")]).build()), +//! ).build(); +//! global::set_tracer_provider(tracer_provider); +//! let tracer = global::tracer("tracer-name"); //! # tracer //! # }; //! //! # #[cfg(all(feature = "metrics", feature = "grpc-tonic"))] //! # { -//! let export_config = ExportConfig { -//! endpoint: "http://localhost:4317".to_string(), -//! timeout: Duration::from_secs(3), -//! protocol: Protocol::Grpc -//! }; -//! -//! let meter = opentelemetry_otlp::new_pipeline() -//! .metrics(opentelemetry_sdk::runtime::Tokio) -//! .with_exporter( -//! opentelemetry_otlp::new_exporter() -//! .tonic() -//! .with_export_config(export_config), -//! // can also config it using with_* functions like the tracing part above. -//! ) -//! .with_resource(Resource::new(vec![KeyValue::new("service.name", "example")])) -//! .with_period(Duration::from_secs(3)) +//! let exporter = opentelemetry_otlp::MetricExporter::builder() +//! .with_tonic() +//! .with_endpoint("http://localhost:4318/v1/metrics") +//! .with_protocol(Protocol::Grpc) +//! .with_timeout(Duration::from_secs(3)) +//! .build() +//! .unwrap(); +//! +//! let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter) +//! .with_interval(std::time::Duration::from_secs(3)) //! .with_timeout(Duration::from_secs(10)) -//! .with_temporality_selector(DefaultTemporalitySelector::new()) +//! .build(); +//! +//! let provider = opentelemetry_sdk::metrics::SdkMeterProvider::builder() +//! .with_reader(reader) +//! .with_resource(Resource::builder_empty().with_attributes([KeyValue::new("service.name", "example")]).build()) //! .build(); //! # } //! @@ -217,35 +219,44 @@ mod exporter; #[cfg(feature = "logs")] +#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))] mod logs; #[cfg(feature = "metrics")] +#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))] mod metric; #[cfg(feature = "trace")] +#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))] mod span; pub use crate::exporter::Compression; pub use crate::exporter::ExportConfig; #[cfg(feature = "trace")] +#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))] pub use crate::span::{ - OtlpTracePipeline, SpanExporter, SpanExporterBuilder, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, - OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_HEADERS, - OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, + SpanExporter, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, + OTEL_EXPORTER_OTLP_TRACES_HEADERS, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, }; #[cfg(feature = "metrics")] +#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))] pub use crate::metric::{ - MetricsExporter, MetricsExporterBuilder, OtlpMetricPipeline, - OTEL_EXPORTER_OTLP_METRICS_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, + MetricExporter, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_HEADERS, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, }; #[cfg(feature = "logs")] +#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))] pub use crate::logs::{ - LogExporter, LogExporterBuilder, OtlpLogPipeline, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION, - OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_HEADERS, - OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, + LogExporter, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, + OTEL_EXPORTER_OTLP_LOGS_HEADERS, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, }; +#[cfg(any(feature = "http-proto", feature = "http-json"))] +pub use crate::exporter::http::{HasHttpConfig, WithHttpConfig}; + +#[cfg(feature = "grpc-tonic")] +pub use crate::exporter::tonic::{HasTonicConfig, WithTonicConfig}; + pub use crate::exporter::{ HasExportConfig, WithExportConfig, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_ENDPOINT_DEFAULT, OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_PROTOCOL, @@ -255,6 +266,24 @@ pub use crate::exporter::{ use opentelemetry_sdk::export::ExportError; +/// Type to indicate the builder does not have a client set. +#[derive(Debug, Default, Clone)] +pub struct NoExporterBuilderSet; + +/// Type to hold the [TonicExporterBuilder] and indicate it has been set. +/// +/// Allowing access to [TonicExporterBuilder] specific configuration methods. +#[cfg(feature = "grpc-tonic")] +#[derive(Debug, Default)] +pub struct TonicExporterBuilderSet(TonicExporterBuilder); + +/// Type to hold the [HttpExporterBuilder] and indicate it has been set. +/// +/// Allowing access to [HttpExporterBuilder] specific configuration methods. +#[cfg(any(feature = "http-proto", feature = "http-json"))] +#[derive(Debug, Default)] +pub struct HttpExporterBuilderSet(HttpExporterBuilder); + #[cfg(any(feature = "http-proto", feature = "http-json"))] pub use crate::exporter::http::HttpExporterBuilder; @@ -264,55 +293,6 @@ pub use crate::exporter::tonic::{TonicConfig, TonicExporterBuilder}; #[cfg(feature = "serialize")] use serde::{Deserialize, Serialize}; -/// General builder for both tracing and metrics. -#[derive(Debug)] -pub struct OtlpPipeline; - -/// Build a OTLP metrics or tracing exporter builder. See functions below to understand -/// what's currently supported. -#[derive(Debug)] -pub struct OtlpExporterPipeline; - -impl OtlpExporterPipeline { - /// Use tonic as grpc layer, return a `TonicExporterBuilder` to config tonic and build the exporter. - /// - /// This exporter can be used in both `tracing` and `metrics` pipeline. - #[cfg(feature = "grpc-tonic")] - pub fn tonic(self) -> TonicExporterBuilder { - TonicExporterBuilder::default() - } - - /// Use HTTP as transport layer, return a `HttpExporterBuilder` to config the http transport - /// and build the exporter. - /// - /// This exporter can be used in both `tracing` and `metrics` pipeline. - #[cfg(any(feature = "http-proto", feature = "http-json"))] - pub fn http(self) -> HttpExporterBuilder { - HttpExporterBuilder::default() - } -} - -/// Create a new pipeline builder with the recommended configuration. -/// -/// ## Examples -/// -/// ```no_run -/// fn main() -> Result<(), Box> { -/// # #[cfg(feature = "trace")] -/// let tracing_builder = opentelemetry_otlp::new_pipeline().tracing(); -/// -/// Ok(()) -/// } -/// ``` -pub fn new_pipeline() -> OtlpPipeline { - OtlpPipeline -} - -/// Create a builder to build OTLP metrics exporter or tracing exporter. -pub fn new_exporter() -> OtlpExporterPipeline { - OtlpExporterPipeline -} - /// Wrap type for errors from this crate. #[derive(thiserror::Error, Debug)] pub enum Error { @@ -411,6 +391,12 @@ impl ExportError for Error { } } +impl opentelemetry::trace::ExportError for Error { + fn exporter_name(&self) -> &'static str { + "otlp" + } +} + /// The communication protocol to use when exporting data. #[cfg_attr(feature = "serialize", derive(Deserialize, Serialize))] #[derive(Clone, Copy, Debug, Eq, PartialEq)] diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs index 71f5a34b3d..bb643cb095 100644 --- a/opentelemetry-otlp/src/logs.rs +++ b/opentelemetry-otlp/src/logs.rs @@ -2,20 +2,21 @@ //! //! Defines a [LogExporter] to send logs via the OpenTelemetry Protocol (OTLP) -#[cfg(feature = "grpc-tonic")] -use crate::exporter::tonic::TonicExporterBuilder; - -#[cfg(feature = "http-proto")] -use crate::exporter::http::HttpExporterBuilder; - -use crate::{NoExporterConfig, OtlpPipeline}; use async_trait::async_trait; +use opentelemetry::otel_debug; use std::fmt::Debug; -use opentelemetry::logs::{LogError, LogResult}; +use opentelemetry_sdk::logs::LogResult; use opentelemetry_sdk::export::logs::LogBatch; -use opentelemetry_sdk::{runtime::RuntimeChannel, Resource}; + +use crate::{HasExportConfig, NoExporterBuilderSet}; + +#[cfg(feature = "grpc-tonic")] +use crate::{HasTonicConfig, TonicExporterBuilder, TonicExporterBuilderSet}; + +#[cfg(any(feature = "http-proto", feature = "http-json"))] +use crate::{HasHttpConfig, HttpExporterBuilder, HttpExporterBuilderSet}; /// Compression algorithm to use, defaults to none. pub const OTEL_EXPORTER_OTLP_LOGS_COMPRESSION: &str = "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION"; @@ -32,53 +33,75 @@ pub const OTEL_EXPORTER_OTLP_LOGS_TIMEOUT: &str = "OTEL_EXPORTER_OTLP_LOGS_TIMEO /// Note: this is only supported for HTTP. pub const OTEL_EXPORTER_OTLP_LOGS_HEADERS: &str = "OTEL_EXPORTER_OTLP_LOGS_HEADERS"; -impl OtlpPipeline { - /// Create a OTLP logging pipeline. - pub fn logging(self) -> OtlpLogPipeline { - OtlpLogPipeline { - resource: None, - exporter_builder: NoExporterConfig(()), - batch_config: None, +#[derive(Debug, Default, Clone)] +pub struct LogExporterBuilder { + client: C, + endpoint: Option, +} + +impl LogExporterBuilder { + pub fn new() -> Self { + LogExporterBuilder::default() + } + + #[cfg(feature = "grpc-tonic")] + pub fn with_tonic(self) -> LogExporterBuilder { + LogExporterBuilder { + client: TonicExporterBuilderSet(TonicExporterBuilder::default()), + endpoint: self.endpoint, + } + } + + #[cfg(any(feature = "http-proto", feature = "http-json"))] + pub fn with_http(self) -> LogExporterBuilder { + LogExporterBuilder { + client: HttpExporterBuilderSet(HttpExporterBuilder::default()), + endpoint: self.endpoint, } } } -/// OTLP log exporter builder -#[derive(Debug)] -#[allow(clippy::large_enum_variant)] -#[non_exhaustive] -pub enum LogExporterBuilder { - /// Tonic log exporter builder - #[cfg(feature = "grpc-tonic")] - Tonic(TonicExporterBuilder), - /// Http log exporter builder - #[cfg(feature = "http-proto")] - Http(HttpExporterBuilder), +#[cfg(feature = "grpc-tonic")] +impl LogExporterBuilder { + pub fn build(self) -> Result { + let result = self.client.0.build_log_exporter(); + otel_debug!(name: "LogExporterBuilt", result = format!("{:?}", &result)); + result + } } -impl LogExporterBuilder { - /// Build a OTLP log exporter using the given configuration. - pub fn build_log_exporter(self) -> Result { - match self { - #[cfg(feature = "grpc-tonic")] - LogExporterBuilder::Tonic(builder) => builder.build_log_exporter(), - #[cfg(feature = "http-proto")] - LogExporterBuilder::Http(builder) => builder.build_log_exporter(), - } +#[cfg(any(feature = "http-proto", feature = "http-json"))] +impl LogExporterBuilder { + pub fn build(self) -> Result { + self.client.0.build_log_exporter() + } +} + +#[cfg(feature = "grpc-tonic")] +impl HasExportConfig for LogExporterBuilder { + fn export_config(&mut self) -> &mut crate::ExportConfig { + &mut self.client.0.exporter_config + } +} + +#[cfg(any(feature = "http-proto", feature = "http-json"))] +impl HasExportConfig for LogExporterBuilder { + fn export_config(&mut self) -> &mut crate::ExportConfig { + &mut self.client.0.exporter_config } } #[cfg(feature = "grpc-tonic")] -impl From for LogExporterBuilder { - fn from(exporter: TonicExporterBuilder) -> Self { - LogExporterBuilder::Tonic(exporter) +impl HasTonicConfig for LogExporterBuilder { + fn tonic_config(&mut self) -> &mut crate::TonicConfig { + &mut self.client.0.tonic_config } } -#[cfg(feature = "http-proto")] -impl From for LogExporterBuilder { - fn from(exporter: HttpExporterBuilder) -> Self { - LogExporterBuilder::Http(exporter) +#[cfg(any(feature = "http-proto", feature = "http-json"))] +impl HasHttpConfig for LogExporterBuilder { + fn http_client_config(&mut self) -> &mut crate::exporter::http::HttpConfig { + &mut self.client.0.http_config } } @@ -89,6 +112,11 @@ pub struct LogExporter { } impl LogExporter { + /// Obtain a builder to configure a [LogExporter]. + pub fn builder() -> LogExporterBuilder { + LogExporterBuilder::default() + } + /// Create a new log exporter pub fn new(client: impl opentelemetry_sdk::export::logs::LogExporter + 'static) -> Self { LogExporter { @@ -99,7 +127,7 @@ impl LogExporter { #[async_trait] impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { - async fn export(&mut self, batch: LogBatch<'_>) -> LogResult<()> { + async fn export(&self, batch: LogBatch<'_>) -> LogResult<()> { self.client.export(batch).await } @@ -107,106 +135,3 @@ impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { self.client.set_resource(resource); } } - -/// Recommended configuration for an OTLP exporter pipeline. -#[derive(Debug)] -pub struct OtlpLogPipeline { - exporter_builder: EB, - resource: Option, - batch_config: Option, -} - -impl OtlpLogPipeline { - /// Set the Resource associated with log provider. - pub fn with_resource(self, resource: Resource) -> Self { - OtlpLogPipeline { - resource: Some(resource), - ..self - } - } - - /// Set the batch log processor configuration, and it will override the env vars. - pub fn with_batch_config(mut self, batch_config: opentelemetry_sdk::logs::BatchConfig) -> Self { - self.batch_config = Some(batch_config); - self - } -} - -impl OtlpLogPipeline { - /// Set the OTLP log exporter builder. - pub fn with_exporter>( - self, - pipeline: B, - ) -> OtlpLogPipeline { - OtlpLogPipeline { - exporter_builder: pipeline.into(), - resource: self.resource, - batch_config: self.batch_config, - } - } -} - -impl OtlpLogPipeline { - /// Install the configured log exporter. - /// - /// Returns a [`LoggerProvider`]. - /// - /// [`LoggerProvider`]: opentelemetry_sdk::logs::LoggerProvider - pub fn install_simple(self) -> Result { - Ok(build_simple_with_exporter( - self.exporter_builder.build_log_exporter()?, - self.resource, - )) - } - - /// Install the configured log exporter and a batch log processor using the - /// specified runtime. - /// - /// Returns a [`LoggerProvider`]. - /// - /// [`LoggerProvider`]: opentelemetry_sdk::logs::LoggerProvider - pub fn install_batch( - self, - runtime: R, - ) -> Result { - Ok(build_batch_with_exporter( - self.exporter_builder.build_log_exporter()?, - self.resource, - runtime, - self.batch_config, - )) - } -} - -fn build_simple_with_exporter( - exporter: LogExporter, - resource: Option, -) -> opentelemetry_sdk::logs::LoggerProvider { - let mut provider_builder = - opentelemetry_sdk::logs::LoggerProvider::builder().with_simple_exporter(exporter); - if let Some(resource) = resource { - provider_builder = provider_builder.with_resource(resource); - } - // logger would be created in the appenders like - // opentelemetry-appender-tracing, opentelemetry-appender-log etc. - provider_builder.build() -} - -fn build_batch_with_exporter( - exporter: LogExporter, - resource: Option, - runtime: R, - batch_config: Option, -) -> opentelemetry_sdk::logs::LoggerProvider { - let mut provider_builder = opentelemetry_sdk::logs::LoggerProvider::builder(); - let batch_processor = opentelemetry_sdk::logs::BatchLogProcessor::builder(exporter, runtime) - .with_batch_config(batch_config.unwrap_or_default()) - .build(); - provider_builder = provider_builder.with_log_processor(batch_processor); - - if let Some(resource) = resource { - provider_builder = provider_builder.with_resource(resource); - } - // logger would be created in the tracing appender - provider_builder.build() -} diff --git a/opentelemetry-otlp/src/metric.rs b/opentelemetry-otlp/src/metric.rs index 83474e94ff..c8bdb38d9e 100644 --- a/opentelemetry-otlp/src/metric.rs +++ b/opentelemetry-otlp/src/metric.rs @@ -1,30 +1,27 @@ //! OTEL metric exporter //! -//! Defines a [MetricsExporter] to send metric data to backend via OTLP protocol. +//! Defines a [MetricExporter] to send metric data to backend via OTLP protocol. //! -use crate::{NoExporterConfig, OtlpPipeline}; +#[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))] +use crate::HasExportConfig; + +#[cfg(any(feature = "http-proto", feature = "http-json"))] +use crate::{exporter::http::HttpExporterBuilder, HasHttpConfig, HttpExporterBuilderSet}; + +#[cfg(feature = "grpc-tonic")] +use crate::{exporter::tonic::TonicExporterBuilder, HasTonicConfig, TonicExporterBuilderSet}; + +use crate::NoExporterBuilderSet; + use async_trait::async_trait; use core::fmt; -use opentelemetry::metrics::Result; +use opentelemetry_sdk::metrics::MetricResult; -#[cfg(feature = "grpc-tonic")] -use crate::exporter::tonic::TonicExporterBuilder; -use opentelemetry_sdk::{ - metrics::{ - data::{ResourceMetrics, Temporality}, - exporter::PushMetricsExporter, - reader::{DefaultTemporalitySelector, TemporalitySelector}, - InstrumentKind, PeriodicReader, SdkMeterProvider, - }, - runtime::Runtime, - Resource, +use opentelemetry_sdk::metrics::{ + data::ResourceMetrics, exporter::PushMetricExporter, Temporality, }; use std::fmt::{Debug, Formatter}; -use std::time; - -#[cfg(feature = "http-proto")] -use crate::exporter::http::HttpExporterBuilder; /// Target to which the exporter is going to send metrics, defaults to https://localhost:4317/v1/metrics. /// Learn about the relationship between this constant and default/spans/logs at @@ -39,283 +36,139 @@ pub const OTEL_EXPORTER_OTLP_METRICS_COMPRESSION: &str = "OTEL_EXPORTER_OTLP_MET /// Example: `k1=v1,k2=v2` /// Note: this is only supported for HTTP. pub const OTEL_EXPORTER_OTLP_METRICS_HEADERS: &str = "OTEL_EXPORTER_OTLP_METRICS_HEADERS"; -impl OtlpPipeline { - /// Create a OTLP metrics pipeline. - pub fn metrics(self, rt: RT) -> OtlpMetricPipeline - where - RT: Runtime, - { - OtlpMetricPipeline { - rt, - temporality_selector: None, - exporter_pipeline: NoExporterConfig(()), - resource: None, - period: None, - timeout: None, - } - } -} - -/// OTLP metrics exporter builder. -#[derive(Debug)] -#[non_exhaustive] -pub enum MetricsExporterBuilder { - /// Tonic metrics exporter builder - #[cfg(feature = "grpc-tonic")] - Tonic(TonicExporterBuilder), - /// Http metrics exporter builder - #[cfg(feature = "http-proto")] - Http(HttpExporterBuilder), - /// Missing exporter builder - #[doc(hidden)] - #[cfg(not(any(feature = "http-proto", feature = "grpc-tonic")))] - Unconfigured, +#[derive(Debug, Default, Clone)] +pub struct MetricExporterBuilder { + client: C, + temporality: Temporality, } -impl MetricsExporterBuilder { - /// Build a OTLP metrics exporter with given configuration. - pub fn build_metrics_exporter( - self, - temporality_selector: Box, - ) -> Result { - match self { - #[cfg(feature = "grpc-tonic")] - MetricsExporterBuilder::Tonic(builder) => { - builder.build_metrics_exporter(temporality_selector) - } - #[cfg(feature = "http-proto")] - MetricsExporterBuilder::Http(builder) => { - builder.build_metrics_exporter(temporality_selector) - } - #[cfg(not(any(feature = "http-proto", feature = "grpc-tonic")))] - MetricsExporterBuilder::Unconfigured => { - drop(temporality_selector); - Err(opentelemetry::metrics::MetricsError::Other( - "no configured metrics exporter, enable `http-proto` or `grpc-tonic` feature to configure a metrics exporter".into(), - )) - } - } - } -} - -#[cfg(feature = "grpc-tonic")] -impl From for MetricsExporterBuilder { - fn from(exporter: TonicExporterBuilder) -> Self { - MetricsExporterBuilder::Tonic(exporter) - } -} - -#[cfg(feature = "http-proto")] -impl From for MetricsExporterBuilder { - fn from(exporter: HttpExporterBuilder) -> Self { - MetricsExporterBuilder::Http(exporter) +impl MetricExporterBuilder { + pub fn new() -> Self { + MetricExporterBuilder::default() } } -/// Pipeline to build OTLP metrics exporter -/// -/// Note that currently the OTLP metrics exporter only supports tonic as it's grpc layer and tokio as -/// runtime. -pub struct OtlpMetricPipeline { - rt: RT, - temporality_selector: Option>, - exporter_pipeline: EB, - resource: Option, - period: Option, - timeout: Option, -} - -impl OtlpMetricPipeline -where - RT: Runtime, -{ - /// Build with resource key value pairs. - pub fn with_resource(self, resource: Resource) -> Self { - OtlpMetricPipeline { - resource: Some(resource), - ..self +impl MetricExporterBuilder { + #[cfg(feature = "grpc-tonic")] + pub fn with_tonic(self) -> MetricExporterBuilder { + MetricExporterBuilder { + client: TonicExporterBuilderSet(TonicExporterBuilder::default()), + temporality: self.temporality, } } - /// Build with timeout - pub fn with_timeout(self, timeout: time::Duration) -> Self { - OtlpMetricPipeline { - timeout: Some(timeout), - ..self + #[cfg(any(feature = "http-proto", feature = "http-json"))] + pub fn with_http(self) -> MetricExporterBuilder { + MetricExporterBuilder { + client: HttpExporterBuilderSet(HttpExporterBuilder::default()), + temporality: self.temporality, } } - /// Build with period, your metrics will be exported with this period - pub fn with_period(self, period: time::Duration) -> Self { - OtlpMetricPipeline { - period: Some(period), - ..self + pub fn with_temporality(self, temporality: Temporality) -> MetricExporterBuilder { + MetricExporterBuilder { + client: self.client, + temporality, } } +} - /// Build with the given temporality selector - pub fn with_temporality_selector(self, selector: T) -> Self { - OtlpMetricPipeline { - temporality_selector: Some(Box::new(selector)), - ..self - } +#[cfg(feature = "grpc-tonic")] +impl MetricExporterBuilder { + pub fn build(self) -> MetricResult { + let exporter = self.client.0.build_metrics_exporter(self.temporality)?; + opentelemetry::otel_debug!(name: "MetricExporterBuilt"); + Ok(exporter) } +} - /// Build with delta temporality selector. - /// - /// This temporality selector is equivalent to OTLP Metrics Exporter's - /// `Delta` temporality preference (see [its documentation][exporter-docs]). - /// - /// [exporter-docs]: https://github.com/open-telemetry/opentelemetry-specification/blob/a1c13d59bb7d0fb086df2b3e1eaec9df9efef6cc/specification/metrics/sdk_exporters/otlp.md#additional-configuration - pub fn with_delta_temporality(self) -> Self { - self.with_temporality_selector(DeltaTemporalitySelector) +#[cfg(any(feature = "http-proto", feature = "http-json"))] +impl MetricExporterBuilder { + pub fn build(self) -> MetricResult { + let exporter = self.client.0.build_metrics_exporter(self.temporality)?; + Ok(exporter) } } -impl OtlpMetricPipeline -where - RT: Runtime, -{ - /// Build with the exporter - pub fn with_exporter>( - self, - pipeline: B, - ) -> OtlpMetricPipeline { - OtlpMetricPipeline { - exporter_pipeline: pipeline.into(), - rt: self.rt, - temporality_selector: self.temporality_selector, - resource: self.resource, - period: self.period, - timeout: self.timeout, - } +#[cfg(feature = "grpc-tonic")] +impl HasExportConfig for MetricExporterBuilder { + fn export_config(&mut self) -> &mut crate::ExportConfig { + &mut self.client.0.exporter_config } } -impl OtlpMetricPipeline -where - RT: Runtime, -{ - /// Build MeterProvider - pub fn build(self) -> Result { - let exporter = self.exporter_pipeline.build_metrics_exporter( - self.temporality_selector - .unwrap_or_else(|| Box::new(DefaultTemporalitySelector::new())), - )?; - - let mut builder = PeriodicReader::builder(exporter, self.rt); - - if let Some(period) = self.period { - builder = builder.with_interval(period); - } - if let Some(timeout) = self.timeout { - builder = builder.with_timeout(timeout) - } - - let reader = builder.build(); - - let mut provider = SdkMeterProvider::builder().with_reader(reader); - - if let Some(resource) = self.resource { - provider = provider.with_resource(resource); - } - - let provider = provider.build(); - Ok(provider) +#[cfg(any(feature = "http-proto", feature = "http-json"))] +impl HasExportConfig for MetricExporterBuilder { + fn export_config(&mut self) -> &mut crate::ExportConfig { + &mut self.client.0.exporter_config } } -impl Debug for OtlpMetricPipeline { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("OtlpMetricPipeline") - .field("exporter_pipeline", &self.exporter_pipeline) - .field("resource", &self.resource) - .field("period", &self.period) - .field("timeout", &self.timeout) - .finish() +#[cfg(feature = "grpc-tonic")] +impl HasTonicConfig for MetricExporterBuilder { + fn tonic_config(&mut self) -> &mut crate::TonicConfig { + &mut self.client.0.tonic_config } } -/// A temporality selector that returns [`Delta`][Temporality::Delta] for all -/// instruments except `UpDownCounter` and `ObservableUpDownCounter`. -/// -/// This temporality selector is equivalent to OTLP Metrics Exporter's -/// `Delta` temporality preference (see [its documentation][exporter-docs]). -/// -/// [exporter-docs]: https://github.com/open-telemetry/opentelemetry-specification/blob/a1c13d59bb7d0fb086df2b3e1eaec9df9efef6cc/specification/metrics/sdk_exporters/otlp.md#additional-configuration -#[derive(Debug)] -struct DeltaTemporalitySelector; - -impl TemporalitySelector for DeltaTemporalitySelector { - #[rustfmt::skip] - fn temporality(&self, kind: InstrumentKind) -> Temporality { - match kind { - InstrumentKind::Counter - | InstrumentKind::Histogram - | InstrumentKind::ObservableCounter - | InstrumentKind::Gauge - | InstrumentKind::ObservableGauge => { - Temporality::Delta - } - InstrumentKind::UpDownCounter - | InstrumentKind::ObservableUpDownCounter => { - Temporality::Cumulative - } - } +#[cfg(any(feature = "http-proto", feature = "http-json"))] +impl HasHttpConfig for MetricExporterBuilder { + fn http_client_config(&mut self) -> &mut crate::exporter::http::HttpConfig { + &mut self.client.0.http_config } } /// An interface for OTLP metrics clients #[async_trait] pub trait MetricsClient: fmt::Debug + Send + Sync + 'static { - async fn export(&self, metrics: &mut ResourceMetrics) -> Result<()>; - fn shutdown(&self) -> Result<()>; + async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()>; + fn shutdown(&self) -> MetricResult<()>; } /// Export metrics in OTEL format. -pub struct MetricsExporter { +pub struct MetricExporter { client: Box, - temporality_selector: Box, + temporality: Temporality, } -impl Debug for MetricsExporter { +impl Debug for MetricExporter { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("MetricsExporter").finish() - } -} - -impl TemporalitySelector for MetricsExporter { - fn temporality(&self, kind: InstrumentKind) -> Temporality { - self.temporality_selector.temporality(kind) + f.debug_struct("MetricExporter").finish() } } #[async_trait] -impl PushMetricsExporter for MetricsExporter { - async fn export(&self, metrics: &mut ResourceMetrics) -> Result<()> { +impl PushMetricExporter for MetricExporter { + async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()> { self.client.export(metrics).await } - async fn force_flush(&self) -> Result<()> { + async fn force_flush(&self) -> MetricResult<()> { // this component is stateless Ok(()) } - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { self.client.shutdown() } + + fn temporality(&self) -> Temporality { + self.temporality + } } -impl MetricsExporter { +impl MetricExporter { + /// Obtain a builder to configure a [MetricExporter]. + pub fn builder() -> MetricExporterBuilder { + MetricExporterBuilder::default() + } + /// Create a new metrics exporter - pub fn new( - client: impl MetricsClient, - temporality_selector: Box, - ) -> MetricsExporter { - MetricsExporter { + pub fn new(client: impl MetricsClient, temporality: Temporality) -> MetricExporter { + MetricExporter { client: Box::new(client), - temporality_selector, + temporality, } } } diff --git a/opentelemetry-otlp/src/span.rs b/opentelemetry-otlp/src/span.rs index 6e61cfd1a2..190e3fdfce 100644 --- a/opentelemetry-otlp/src/span.rs +++ b/opentelemetry-otlp/src/span.rs @@ -5,20 +5,21 @@ use std::fmt::Debug; use futures_core::future::BoxFuture; -use opentelemetry::trace::TraceError; -use opentelemetry_sdk::{ - self as sdk, - export::trace::{ExportResult, SpanData}, -}; -use sdk::runtime::RuntimeChannel; +use opentelemetry_sdk::export::trace::{ExportResult, SpanData}; #[cfg(feature = "grpc-tonic")] -use crate::exporter::tonic::TonicExporterBuilder; +use crate::{ + exporter::tonic::{HasTonicConfig, TonicExporterBuilder}, + TonicExporterBuilderSet, +}; #[cfg(any(feature = "http-proto", feature = "http-json"))] -use crate::exporter::http::HttpExporterBuilder; +use crate::{ + exporter::http::{HasHttpConfig, HttpExporterBuilder}, + HttpExporterBuilderSet, +}; -use crate::{NoExporterConfig, OtlpPipeline}; +use crate::{exporter::HasExportConfig, NoExporterBuilderSet}; /// Target to which the exporter is going to send spans, defaults to https://localhost:4317/v1/traces. /// Learn about the relationship between this constant and default/metrics/logs at @@ -34,167 +35,73 @@ pub const OTEL_EXPORTER_OTLP_TRACES_COMPRESSION: &str = "OTEL_EXPORTER_OTLP_TRAC /// Note: this is only supported for HTTP. pub const OTEL_EXPORTER_OTLP_TRACES_HEADERS: &str = "OTEL_EXPORTER_OTLP_TRACES_HEADERS"; -impl OtlpPipeline { - /// Create a OTLP tracing pipeline. - pub fn tracing(self) -> OtlpTracePipeline { - OtlpTracePipeline { - exporter_builder: NoExporterConfig(()), - trace_config: None, - batch_config: None, - } - } -} - -/// Recommended configuration for an OTLP exporter pipeline. -/// -/// ## Examples -/// -/// ```no_run -/// let tracing_pipeline = opentelemetry_otlp::new_pipeline().tracing(); -/// ``` -#[derive(Debug)] -pub struct OtlpTracePipeline { - exporter_builder: EB, - trace_config: Option, - batch_config: Option, +#[derive(Debug, Default, Clone)] +pub struct SpanExporterBuilder { + client: C, } -impl OtlpTracePipeline { - /// Set the trace provider configuration. - pub fn with_trace_config(mut self, trace_config: sdk::trace::Config) -> Self { - self.trace_config = Some(trace_config); - self +impl SpanExporterBuilder { + pub fn new() -> Self { + SpanExporterBuilder::default() } - /// Set the batch span processor configuration, and it will override the env vars. - pub fn with_batch_config(mut self, batch_config: sdk::trace::BatchConfig) -> Self { - self.batch_config = Some(batch_config); - self + #[cfg(feature = "grpc-tonic")] + pub fn with_tonic(self) -> SpanExporterBuilder { + SpanExporterBuilder { + client: TonicExporterBuilderSet(TonicExporterBuilder::default()), + } } -} -impl OtlpTracePipeline { - /// Set the OTLP span exporter builder. - /// - /// Note that the pipeline will not build the exporter until [`install_batch`] or [`install_simple`] - /// is called. - /// - /// [`install_batch`]: OtlpTracePipeline::install_batch - /// [`install_simple`]: OtlpTracePipeline::install_simple - pub fn with_exporter>( - self, - pipeline: B, - ) -> OtlpTracePipeline { - OtlpTracePipeline { - exporter_builder: pipeline.into(), - trace_config: self.trace_config, - batch_config: self.batch_config, + #[cfg(any(feature = "http-proto", feature = "http-json"))] + pub fn with_http(self) -> SpanExporterBuilder { + SpanExporterBuilder { + client: HttpExporterBuilderSet(HttpExporterBuilder::default()), } } } -impl OtlpTracePipeline { - /// Install the configured span exporter. - /// - /// Returns a [`TracerProvider`]. - /// - /// [`TracerProvider`]: opentelemetry::trace::TracerProvider - pub fn install_simple(self) -> Result { - Ok(build_simple_with_exporter( - self.exporter_builder.build_span_exporter()?, - self.trace_config, - )) - } - - /// Install the configured span exporter and a batch span processor using the - /// specified runtime. - /// - /// Returns a [`TracerProvider`]. - /// - /// `install_batch` will panic if not called within a tokio runtime - /// - /// [`TracerProvider`]: opentelemetry::trace::TracerProvider - pub fn install_batch( - self, - runtime: R, - ) -> Result { - Ok(build_batch_with_exporter( - self.exporter_builder.build_span_exporter()?, - self.trace_config, - runtime, - self.batch_config, - )) +#[cfg(feature = "grpc-tonic")] +impl SpanExporterBuilder { + pub fn build(self) -> Result { + let span_exporter = self.client.0.build_span_exporter()?; + opentelemetry::otel_debug!(name: "SpanExporterBuilt"); + Ok(SpanExporter::new(span_exporter)) } } -fn build_simple_with_exporter( - exporter: SpanExporter, - trace_config: Option, -) -> sdk::trace::TracerProvider { - let mut provider_builder = sdk::trace::TracerProvider::builder().with_simple_exporter(exporter); - if let Some(config) = trace_config { - provider_builder = provider_builder.with_config(config); +#[cfg(any(feature = "http-proto", feature = "http-json"))] +impl SpanExporterBuilder { + pub fn build(self) -> Result { + let span_exporter = self.client.0.build_span_exporter()?; + Ok(SpanExporter::new(span_exporter)) } - - provider_builder.build() } -fn build_batch_with_exporter( - exporter: SpanExporter, - trace_config: Option, - runtime: R, - batch_config: Option, -) -> sdk::trace::TracerProvider { - let mut provider_builder = sdk::trace::TracerProvider::builder(); - let batch_processor = sdk::trace::BatchSpanProcessor::builder(exporter, runtime) - .with_batch_config(batch_config.unwrap_or_default()) - .build(); - provider_builder = provider_builder.with_span_processor(batch_processor); - - if let Some(config) = trace_config { - provider_builder = provider_builder.with_config(config); +#[cfg(feature = "grpc-tonic")] +impl HasExportConfig for SpanExporterBuilder { + fn export_config(&mut self) -> &mut crate::ExportConfig { + &mut self.client.0.exporter_config } - provider_builder.build() } -/// OTLP span exporter builder. -#[derive(Debug)] -// This enum only used during initialization stage of application. The overhead should be OK. -// Users can also disable the unused features to make the overhead on object size smaller. -#[allow(clippy::large_enum_variant)] -#[non_exhaustive] -pub enum SpanExporterBuilder { - /// Tonic span exporter builder - #[cfg(feature = "grpc-tonic")] - Tonic(TonicExporterBuilder), - /// Http span exporter builder - #[cfg(any(feature = "http-proto", feature = "http-json"))] - Http(HttpExporterBuilder), -} - -impl SpanExporterBuilder { - /// Build a OTLP span exporter using the given tonic configuration and exporter configuration. - pub fn build_span_exporter(self) -> Result { - match self { - #[cfg(feature = "grpc-tonic")] - SpanExporterBuilder::Tonic(builder) => builder.build_span_exporter(), - #[cfg(any(feature = "http-proto", feature = "http-json"))] - SpanExporterBuilder::Http(builder) => builder.build_span_exporter(), - } +#[cfg(any(feature = "http-proto", feature = "http-json"))] +impl HasExportConfig for SpanExporterBuilder { + fn export_config(&mut self) -> &mut crate::ExportConfig { + &mut self.client.0.exporter_config } } #[cfg(feature = "grpc-tonic")] -impl From for SpanExporterBuilder { - fn from(exporter: TonicExporterBuilder) -> Self { - SpanExporterBuilder::Tonic(exporter) +impl HasTonicConfig for SpanExporterBuilder { + fn tonic_config(&mut self) -> &mut crate::TonicConfig { + &mut self.client.0.tonic_config } } #[cfg(any(feature = "http-proto", feature = "http-json"))] -impl From for SpanExporterBuilder { - fn from(exporter: HttpExporterBuilder) -> Self { - SpanExporterBuilder::Http(exporter) +impl HasHttpConfig for SpanExporterBuilder { + fn http_client_config(&mut self) -> &mut crate::exporter::http::HttpConfig { + &mut self.client.0.http_config } } @@ -203,6 +110,11 @@ impl From for SpanExporterBuilder { pub struct SpanExporter(Box); impl SpanExporter { + /// Obtain a builder to configure a [SpanExporter]. + pub fn builder() -> SpanExporterBuilder { + SpanExporterBuilder::default() + } + /// Build a new span exporter from a client pub fn new(client: impl opentelemetry_sdk::export::trace::SpanExporter + 'static) -> Self { SpanExporter(Box::new(client)) diff --git a/opentelemetry-otlp/tests/integration_test/.gitignore b/opentelemetry-otlp/tests/integration_test/.gitignore new file mode 100644 index 0000000000..059fd6dce2 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/.gitignore @@ -0,0 +1,2 @@ +lcov.info +actual/*.json diff --git a/opentelemetry-otlp/tests/integration_test/Cargo.toml b/opentelemetry-otlp/tests/integration_test/Cargo.toml index d7ef8de3d3..5314c1fe61 100644 --- a/opentelemetry-otlp/tests/integration_test/Cargo.toml +++ b/opentelemetry-otlp/tests/integration_test/Cargo.toml @@ -4,17 +4,31 @@ version = "0.1.0" edition = "2021" publish = false - [dependencies] -opentelemetry = { path = "../../../opentelemetry", features = ["metrics", "logs"] } -opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "logs", "testing"] } -opentelemetry-proto = { path = "../../../opentelemetry-proto", features = ["gen-tonic-messages", "trace", "logs", "with-serde"] } -log = { workspace = true } +opentelemetry = { path = "../../../opentelemetry", features = [] } +opentelemetry_sdk = { path = "../../../opentelemetry-sdk", features = ["rt-tokio", "testing"] } +opentelemetry-proto = { path = "../../../opentelemetry-proto", features = ["gen-tonic-messages", "trace", "logs", "metrics", "with-serde"] } tokio = { version = "1.0", features = ["full"] } serde_json = "1" -testcontainers = "0.15.0" +testcontainers = { version = "0.23.1", features = ["http_wait"]} +once_cell.workspace = true +anyhow = "1.0.94" +ctor = "0.2.9" +tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] } +tracing = {workspace = true} [target.'cfg(unix)'.dependencies] -opentelemetry-appender-log = { path = "../../../opentelemetry-appender-log", default-features = false} -opentelemetry-otlp = { path = "../../../opentelemetry-otlp", features = ["tonic", "metrics", "logs"] } -opentelemetry-semantic-conventions = { path = "../../../opentelemetry-semantic-conventions" } \ No newline at end of file +opentelemetry-appender-tracing = { path = "../../../opentelemetry-appender-tracing", default-features = false} +opentelemetry-otlp = { path = "../../../opentelemetry-otlp", default-features = false } +opentelemetry-semantic-conventions = { path = "../../../opentelemetry-semantic-conventions" } + +[features] +hyper-client = ["opentelemetry-otlp/hyper-client", "opentelemetry-otlp/http-proto", "opentelemetry-otlp/trace", "opentelemetry-otlp/logs", "opentelemetry-otlp/metrics", "internal-logs"] +reqwest-client = ["opentelemetry-otlp/reqwest-client", "opentelemetry-otlp/http-proto", "opentelemetry-otlp/trace","opentelemetry-otlp/logs", "opentelemetry-otlp/metrics", "internal-logs"] +reqwest-blocking-client = ["opentelemetry-otlp/reqwest-blocking-client", "opentelemetry-otlp/http-proto", "opentelemetry-otlp/trace","opentelemetry-otlp/logs", "opentelemetry-otlp/metrics", "internal-logs"] +tonic-client = ["opentelemetry-otlp/grpc-tonic", "opentelemetry-otlp/trace", "opentelemetry-otlp/logs", "opentelemetry-otlp/metrics", "internal-logs"] +internal-logs = ["opentelemetry-otlp/internal-logs"] + +# Keep tonic as the default client +default = ["tonic-client", "internal-logs"] + diff --git a/opentelemetry-otlp/tests/integration_test/README.md b/opentelemetry-otlp/tests/integration_test/README.md new file mode 100644 index 0000000000..10e34df6a3 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/README.md @@ -0,0 +1,17 @@ +# OTLP - Integration Tests + +This directory contains integration tests for `opentelemetry-otlp`. It uses +[testcontainers](https://testcontainers.com/) to start an instance of the OTEL +collector using [otel-collector-config.yaml](otel-collector-config.yaml), which +then uses a file exporter per signal to write the output it receives back to the +host machine. + +The tests connect directly to the collector on `localhost:4317` and +`localhost:4318`, push data through, and then check that what they expect has +popped back out into the files output by the collector. + +## Pre-requisites + +* Docker, for the test container +* TCP/4317 and TCP/4318 free on your local machine. If you are running another + collector, you'll need to stop it for the tests to run. diff --git a/opentelemetry-otlp/tests/integration_test/actual/README.md b/opentelemetry-otlp/tests/integration_test/actual/README.md new file mode 100644 index 0000000000..9380bd7807 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/actual/README.md @@ -0,0 +1 @@ +Output from the otel-collector goes here. diff --git a/opentelemetry-otlp/tests/integration_test/expected/different_metrics.json b/opentelemetry-otlp/tests/integration_test/expected/different_metrics.json new file mode 100644 index 0000000000..5b9bcdba0a --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/different_metrics.json @@ -0,0 +1,133 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "metrics-integration-test" + } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "meter" + }, + "metrics": [ + { + "name": "counter_u64", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey1", + "value": { + "stringValue": "mydifferentval" + } + }, + { + "key": "mykey2", + "value": { + "stringValue": "myvalue2" + } + } + ], + "startTimeUnixNano": "1734094309366798000", + "timeUnixNano": "1734094317871514000", + "asInt": "15" + } + ], + "aggregationTemporality": 2, + "isMonotonic": true + } + }, + { + "name": "example_histogram", + "histogram": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey3", + "value": { + "stringValue": "myvalue4" + } + } + ], + "startTimeUnixNano": "1734094309366875000", + "timeUnixNano": "1734094317871537000", + "count": "1", + "sum": 42, + "bucketCounts": [ + "0", + "0", + "0", + "0", + "1", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0" + ], + "explicitBounds": [ + 0, + 5, + 10, + 25, + 50, + 75, + 100, + 250, + 500, + 750, + 1000, + 2500, + 5000, + 7500, + 10000 + ], + "min": 42, + "max": 42 + } + ], + "aggregationTemporality": 2 + } + }, + { + "name": "example_up_down_counter", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey5", + "value": { + "stringValue": "myvalue5" + } + } + ], + "startTimeUnixNano": "1734094309366941000", + "timeUnixNano": "1734094317871548000", + "asInt": "-1" + } + ], + "aggregationTemporality": 2 + } + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/failed_logs.json b/opentelemetry-otlp/tests/integration_test/expected/failed_logs.json index 923316dfed..97d5fc407d 100644 --- a/opentelemetry-otlp/tests/integration_test/expected/failed_logs.json +++ b/opentelemetry-otlp/tests/integration_test/expected/failed_logs.json @@ -14,8 +14,8 @@ "scopeLogs": [ { "scope": { - "name": "opentelemetry-log-appender", - "version": "0.3.0" + "name": "my-target", + "version": "" }, "logRecords": [ { diff --git a/opentelemetry-otlp/tests/integration_test/expected/failed_traces.json b/opentelemetry-otlp/tests/integration_test/expected/failed_traces.json index 1d065f4b2d..6b60b58ba8 100644 --- a/opentelemetry-otlp/tests/integration_test/expected/failed_traces.json +++ b/opentelemetry-otlp/tests/integration_test/expected/failed_traces.json @@ -21,6 +21,7 @@ "traceId": "9b458af7378cba65253d7042d34fc72e", "spanId": "cd7cf7bf939930b7", "parentSpanId": "", + "flags": 1, "name": "Sub operation...", "kind": 1, "startTimeUnixNano": "1703985537070566698", @@ -40,32 +41,12 @@ } ], "status": {} - } - ] - } - ] - }, - { - "resource": { - "attributes": [ - { - "key": "service.name", - "value": { - "stringValue": "basic-otlp-tracing-example" - } - } - ] - }, - "scopeSpans": [ - { - "scope": { - "name": "ex.com/basic" - }, - "spans": [ + }, { "traceId": "9b458af7378cba65253d7042d34fc72e", "spanId": "d58cf2d702a061e0", "parentSpanId": "cd7cf7bf939930b7", + "flags": 1, "name": "operation", "kind": 1, "startTimeUnixNano": "1703985537070558635", diff --git a/opentelemetry-otlp/tests/integration_test/expected/logs.json b/opentelemetry-otlp/tests/integration_test/expected/logs.json index 4653189c82..5c1ee6cf2b 100644 --- a/opentelemetry-otlp/tests/integration_test/expected/logs.json +++ b/opentelemetry-otlp/tests/integration_test/expected/logs.json @@ -14,8 +14,8 @@ "scopeLogs": [ { "scope": { - "name": "opentelemetry-log-appender", - "version": "0.3.0" + "name": "my-target", + "version": "" }, "logRecords": [ { diff --git a/opentelemetry-otlp/tests/integration_test/expected/metrics.json b/opentelemetry-otlp/tests/integration_test/expected/metrics.json index fa713b8ea3..f1711d889e 100644 --- a/opentelemetry-otlp/tests/integration_test/expected/metrics.json +++ b/opentelemetry-otlp/tests/integration_test/expected/metrics.json @@ -6,7 +6,7 @@ { "key": "service.name", "value": { - "stringValue": "my.service" + "stringValue": "metrics-integration-test" } } ] @@ -14,106 +14,120 @@ "scopeMetrics": [ { "scope": { - "name": "my.library", - "version": "1.0.0", - "attributes": [ - { - "key": "my.scope.attribute", - "value": { - "stringValue": "some scope attribute" - } - } - ] + "name": "meter" }, "metrics": [ { - "name": "my.counter", - "unit": "1", - "description": "I am a Counter", - "metadata": [], + "name": "counter_u64", "sum": { - "aggregationTemporality": 1, - "isMonotonic": true, "dataPoints": [ { - "asDouble": 5, - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", "attributes": [ { - "key": "my.counter.attr", + "key": "mykey1", "value": { - "stringValue": "some value" + "stringValue": "myvalue1" + } + }, + { + "key": "mykey2", + "value": { + "stringValue": "myvalue2" } } ], - "exemplars": [], - "flags": 0 + "startTimeUnixNano": "1734094309366798000", + "timeUnixNano": "1734094317871514000", + "asInt": "10" } - ] + ], + "aggregationTemporality": 2, + "isMonotonic": true } }, { - "name": "my.gauge", - "unit": "1", - "description": "I am a Gauge", - "metadata": [], - "gauge": { + "name": "example_histogram", + "histogram": { "dataPoints": [ { - "asDouble": 10, - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", "attributes": [ { - "key": "my.gauge.attr", + "key": "mykey3", "value": { - "stringValue": "some value" + "stringValue": "myvalue4" } } ], - "exemplars": [], - "flags": 0 + "startTimeUnixNano": "1734094309366875000", + "timeUnixNano": "1734094317871537000", + "count": "1", + "sum": 42, + "bucketCounts": [ + "0", + "0", + "0", + "0", + "1", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0" + ], + "explicitBounds": [ + 0, + 5, + 10, + 25, + 50, + 75, + 100, + 250, + 500, + 750, + 1000, + 2500, + 5000, + 7500, + 10000 + ], + "min": 42, + "max": 42 } - ] + ], + "aggregationTemporality": 2 } }, { - "name": "my.histogram", - "unit": "1", - "description": "I am a Histogram", - "metadata": [], - "histogram": { - "aggregationTemporality": 1, + "name": "example_up_down_counter", + "sum": { "dataPoints": [ { - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", - "count": 2, - "sum": 2, - "bucketCounts": [1,1], - "explicitBounds": [1], - "min": 0, - "max": 2, "attributes": [ { - "key": "my.histogram.attr", + "key": "mykey5", "value": { - "stringValue": "some value" + "stringValue": "myvalue5" } } ], - "exemplars": [], - "flags": 0 + "startTimeUnixNano": "1734094309366941000", + "timeUnixNano": "1734094317871548000", + "asInt": "-1" } - ] + ], + "aggregationTemporality": 2 } } - ], - "schemaUrl": "whatever" + ] } - ], - "schemaUrl": "whatever" + ] } ] -} \ No newline at end of file +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/metrics/test_flush_on_shutdown.json b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_flush_on_shutdown.json new file mode 100644 index 0000000000..c390a70664 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_flush_on_shutdown.json @@ -0,0 +1,39 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "metrics-integration-test" + } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "test_flush_on_shutdown" + }, + "metrics": [ + { + "name": "counter_", + "sum": { + "dataPoints": [ + { + "startTimeUnixNano": "1734370440803831000", + "timeUnixNano": "1734370440803905000", + "asInt": "123" + } + ], + "aggregationTemporality": 2, + "isMonotonic": true + } + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/metrics/test_histogram_meter.json b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_histogram_meter.json new file mode 100644 index 0000000000..9ca8a5a49e --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_histogram_meter.json @@ -0,0 +1,84 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "metrics-integration-test" + } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "test_histogram_meter" + }, + "metrics": [ + { + "name": "example_histogram", + "histogram": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey3", + "value": { + "stringValue": "myvalue4" + } + } + ], + "startTimeUnixNano": "1734259947902842000", + "timeUnixNano": "1734259949551023000", + "count": "1", + "sum": 42, + "bucketCounts": [ + "0", + "0", + "0", + "0", + "1", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0", + "0" + ], + "explicitBounds": [ + 0, + 5, + 10, + 25, + 50, + 75, + 100, + 250, + 500, + 750, + 1000, + 2500, + 5000, + 7500, + 10000 + ], + "min": 42, + "max": 42 + } + ], + "aggregationTemporality": 2 + } + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/metrics/test_u64_counter_meter.json b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_u64_counter_meter.json new file mode 100644 index 0000000000..aeb3da7b20 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_u64_counter_meter.json @@ -0,0 +1,53 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "metrics-integration-test" + } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "test_u64_counter_meter" + }, + "metrics": [ + { + "name": "counter_u64", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey1", + "value": { + "stringValue": "myvalue1" + } + }, + { + "key": "mykey2", + "value": { + "stringValue": "myvalue2" + } + } + ], + "startTimeUnixNano": "1734255506254812000", + "timeUnixNano": "1734255533415552000", + "asInt": "10" + } + ], + "aggregationTemporality": 2, + "isMonotonic": true + } + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/metrics/test_up_down_meter.json b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_up_down_meter.json new file mode 100755 index 0000000000..a82cd63acf --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/expected/metrics/test_up_down_meter.json @@ -0,0 +1,46 @@ +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "metrics-integration-test" + } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "test_up_down_meter" + }, + "metrics": [ + { + "name": "example_up_down_counter", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "mykey5", + "value": { + "stringValue": "myvalue5" + } + } + ], + "startTimeUnixNano": "1734259947902844000", + "timeUnixNano": "1734259952816822000", + "asInt": "-1" + } + ], + "aggregationTemporality": 2 + } + } + ] + } + ] + } + ] +} diff --git a/opentelemetry-otlp/tests/integration_test/expected/serialized_metrics.json b/opentelemetry-otlp/tests/integration_test/expected/serialized_metrics.json index 4910e128a2..de13fb3cbf 100644 --- a/opentelemetry-otlp/tests/integration_test/expected/serialized_metrics.json +++ b/opentelemetry-otlp/tests/integration_test/expected/serialized_metrics.json @@ -6,7 +6,7 @@ { "key": "service.name", "value": { - "stringValue": "my.service" + "stringValue": "metrics-integration-test" } } ], @@ -15,112 +15,81 @@ "scopeMetrics": [ { "scope": { - "name": "my.library", - "version": "1.0.0", - "attributes": [ - { - "key": "my.scope.attribute", - "value": { - "stringValue": "some scope attribute" - } - } - ], + "name": "meter", + "version": "", + "attributes": [], "droppedAttributesCount": 0 }, "metrics": [ { - "name": "my.counter", - "description": "I am a Counter", - "unit": "1", + "name": "counter_u64", + "description": "", + "unit": "", "metadata": [], "sum": { "dataPoints": [ { "attributes": [ { - "key": "my.counter.attr", + "key": "mykey1", "value": { - "stringValue": "some value" + "stringValue": "myvalue1" + } + }, + { + "key": "mykey2", + "value": { + "stringValue": "myvalue2" } } ], - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", + "startTimeUnixNano": "1734094309366798000", + "timeUnixNano": "1734094317871514000", "exemplars": [], - "flags": 0, - "asDouble": 5.0 + "flags": 0 } ], - "aggregationTemporality": 1, + "aggregationTemporality": 2, "isMonotonic": true } }, { - "name": "my.gauge", - "description": "I am a Gauge", - "unit": "1", - "metadata": [], - "gauge": { - "dataPoints": [ - { - "attributes": [ - { - "key": "my.gauge.attr", - "value": { - "stringValue": "some value" - } - } - ], - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", - "exemplars": [], - "flags": 0, - "asDouble": 10.0 - } - ] - } + "name": "example_histogram", + "description": "", + "unit": "", + "metadata": [] }, { - "name": "my.histogram", - "description": "I am a Histogram", - "unit": "1", + "name": "example_up_down_counter", + "description": "", + "unit": "", "metadata": [], - "histogram": { + "sum": { "dataPoints": [ { "attributes": [ { - "key": "my.histogram.attr", + "key": "mykey5", "value": { - "stringValue": "some value" + "stringValue": "myvalue5" } } ], - "startTimeUnixNano": "1544712660300000000", - "timeUnixNano": "1544712660300000000", - "count": 2, - "sum": 2.0, - "bucketCounts": [ - 1, - 1 - ], - "explicitBounds": [ - 1.0 - ], + "startTimeUnixNano": "1734094309366941000", + "timeUnixNano": "1734094317871548000", "exemplars": [], - "flags": 0, - "min": 0.0, - "max": 2.0 + "flags": 0 } ], - "aggregationTemporality": 1 + "aggregationTemporality": 2, + "isMonotonic": false } } ], - "schemaUrl": "whatever" + "schemaUrl": "" } ], - "schemaUrl": "whatever" + "schemaUrl": "" } ] } \ No newline at end of file diff --git a/opentelemetry-otlp/tests/integration_test/expected/serialized_traces.json b/opentelemetry-otlp/tests/integration_test/expected/serialized_traces.json index e5982877cf..f1d64108ad 100644 --- a/opentelemetry-otlp/tests/integration_test/expected/serialized_traces.json +++ b/opentelemetry-otlp/tests/integration_test/expected/serialized_traces.json @@ -26,7 +26,7 @@ "spanId": "cd7cf7bf939930b7", "traceState": "", "parentSpanId": "d58cf2d702a061e0", - "flags": 0, + "flags": 1, "name": "Sub operation...", "kind": 1, "startTimeUnixNano": "1703985537070566698", @@ -55,40 +55,13 @@ "message": "", "code": 0 } - } - ], - "schemaUrl": "" - } - ], - "schemaUrl": "" - }, - { - "resource": { - "attributes": [ - { - "key": "service.name", - "value": { - "stringValue": "basic-otlp-tracing-example" - } - } - ], - "droppedAttributesCount": 0 - }, - "scopeSpans": [ - { - "scope": { - "name": "ex.com/basic", - "version": "", - "attributes": [], - "droppedAttributesCount": 0 - }, - "spans": [ + }, { "traceId": "9b458af7378cba65253d7042d34fc72e", "spanId": "d58cf2d702a061e0", "traceState": "", "parentSpanId": "", - "flags": 0, + "flags": 1, "name": "operation", "kind": 1, "startTimeUnixNano": "1703985537070558635", @@ -112,12 +85,6 @@ "value": { "intValue": "100" } - }, - { - "key": "number/int", - "value": { - "intValue": "100" - } } ], "droppedAttributesCount": 0 diff --git a/opentelemetry-otlp/tests/integration_test/expected/traces.json b/opentelemetry-otlp/tests/integration_test/expected/traces.json index 97e2b04d4d..faa581dd3b 100644 --- a/opentelemetry-otlp/tests/integration_test/expected/traces.json +++ b/opentelemetry-otlp/tests/integration_test/expected/traces.json @@ -21,6 +21,7 @@ "traceId": "9b458af7378cba65253d7042d34fc72e", "spanId": "cd7cf7bf939930b7", "parentSpanId": "d58cf2d702a061e0", + "flags": 1, "name": "Sub operation...", "kind": 1, "startTimeUnixNano": "1703985537070566698", @@ -40,32 +41,12 @@ } ], "status": {} - } - ] - } - ] - }, - { - "resource": { - "attributes": [ - { - "key": "service.name", - "value": { - "stringValue": "basic-otlp-tracing-example" - } - } - ] - }, - "scopeSpans": [ - { - "scope": { - "name": "ex.com/basic" - }, - "spans": [ + }, { "traceId": "9b458af7378cba65253d7042d34fc72e", "spanId": "d58cf2d702a061e0", "parentSpanId": "", + "flags": 1, "name": "operation", "kind": 1, "startTimeUnixNano": "1703985537070558635", @@ -88,12 +69,6 @@ "value": { "intValue": "100" } - }, - { - "key": "number/int", - "value": { - "intValue": 100 - } } ] } diff --git a/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml b/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml index 2c5a321993..548d6fa44a 100644 --- a/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml +++ b/opentelemetry-otlp/tests/integration_test/otel-collector-config.yaml @@ -2,17 +2,28 @@ receivers: otlp: protocols: grpc: + endpoint: 0.0.0.0:4317 http: + endpoint: 0.0.0.0:4318 exporters: - file: - path: /testresults/result.json + file/traces: + path: /testresults/traces.json + file/logs: + path: /testresults/logs.json + rotation: + file/metrics: + path: /testresults/metrics.json service: pipelines: traces: receivers: [otlp] - exporters: [file] + exporters: [file/traces] logs: receivers: [otlp] - exporters: [file] + exporters: [file/logs] + metrics: + receivers: [otlp] + exporters: [file/metrics] + diff --git a/opentelemetry-otlp/tests/integration_test/src/images.rs b/opentelemetry-otlp/tests/integration_test/src/images.rs deleted file mode 100644 index 37a9c1b38b..0000000000 --- a/opentelemetry-otlp/tests/integration_test/src/images.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::collections::HashMap; -use testcontainers::core::WaitFor; -use testcontainers::Image; - -pub struct Collector { - volumes: HashMap, -} - -impl Image for Collector { - type Args = (); - - fn name(&self) -> String { - "otel/opentelemetry-collector".to_string() - } - - fn tag(&self) -> String { - "latest".to_string() - } - - fn ready_conditions(&self) -> Vec { - vec![WaitFor::Nothing] - } - - fn volumes(&self) -> Box + '_> { - Box::new(self.volumes.iter()) - } - - fn expose_ports(&self) -> Vec { - vec![ - // 4317, // gRPC port, defined in Dockerfile - // 4318, // HTTP port, defined in Dockerfile - ] - } -} - -impl Default for Collector { - fn default() -> Self { - Collector { - volumes: HashMap::from([( - "./otel-collector-config.yaml".into(), - "/etc/otelcol/config.yaml".into(), - )]), - } - } -} - -impl Collector { - pub fn with_volume(mut self, src: &str, dst: &str) -> Self { - self.volumes.insert(src.into(), dst.into()); - self - } -} diff --git a/opentelemetry-otlp/tests/integration_test/src/lib.rs b/opentelemetry-otlp/tests/integration_test/src/lib.rs index e6bc88c742..65faf81bf4 100644 --- a/opentelemetry-otlp/tests/integration_test/src/lib.rs +++ b/opentelemetry-otlp/tests/integration_test/src/lib.rs @@ -1,4 +1,4 @@ -pub mod images; pub mod logs_asserter; pub mod metrics_asserter; +pub mod test_utils; pub mod trace_asserter; diff --git a/opentelemetry-otlp/tests/integration_test/src/logs_asserter.rs b/opentelemetry-otlp/tests/integration_test/src/logs_asserter.rs index da045691f5..8caa49393d 100644 --- a/opentelemetry-otlp/tests/integration_test/src/logs_asserter.rs +++ b/opentelemetry-otlp/tests/integration_test/src/logs_asserter.rs @@ -43,6 +43,8 @@ impl LogsAsserter { let result_scope_logs = &result_resource_logs.scope_logs[i]; let expected_scope_logs = &expected_resource_logs.scope_logs[i]; + assert_eq!(result_scope_logs.scope, expected_scope_logs.scope); + results_logs.extend(result_scope_logs.log_records.clone()); expected_logs.extend(expected_scope_logs.log_records.clone()); } diff --git a/opentelemetry-otlp/tests/integration_test/src/metrics_asserter.rs b/opentelemetry-otlp/tests/integration_test/src/metrics_asserter.rs index 4845270999..f370df8a62 100644 --- a/opentelemetry-otlp/tests/integration_test/src/metrics_asserter.rs +++ b/opentelemetry-otlp/tests/integration_test/src/metrics_asserter.rs @@ -1,40 +1,64 @@ +use anyhow::Result; +use serde_json::Value; use std::fs::File; +use std::io::{BufReader, Read}; -use opentelemetry_proto::tonic::metrics::v1::{MetricsData, ResourceMetrics}; +pub fn read_metrics_from_json(file: File) -> Result { + // Create a buffered reader for the file + let mut reader = BufReader::new(file); + let mut contents = String::new(); + + // Read the file contents into a string + reader + .read_to_string(&mut contents) + .expect("Failed to read json file"); + + // Parse the contents into a JSON Value + let metrics_data: Value = serde_json::from_str(&contents)?; + Ok(metrics_data) +} pub struct MetricsAsserter { - results: Vec, - expected: Vec, + results: Value, + expected: Value, } impl MetricsAsserter { - pub fn new(results: Vec, expected: Vec) -> Self { + pub fn new(results: Value, expected: Value) -> Self { MetricsAsserter { results, expected } } - pub fn assert(self) { - self.assert_resource_metrics_eq(&self.results, &self.expected); + pub fn assert(mut self) { + // Normalize JSON by cleaning out timestamps + Self::zero_out_timestamps(&mut self.results); + Self::zero_out_timestamps(&mut self.expected); + + // Perform the assertion + assert_eq!( + self.results, self.expected, + "Metrics did not match. Results: {:#?}, Expected: {:#?}", + self.results, self.expected + ); } - fn assert_resource_metrics_eq( - &self, - results: &[ResourceMetrics], - expected: &[ResourceMetrics], - ) { - assert_eq!(results.len(), expected.len()); - for i in 0..results.len() { - let result_resource_metrics = &results[i]; - let expected_resource_metrics = &expected[i]; - assert_eq!(result_resource_metrics, expected_resource_metrics); + /// Recursively removes or zeros out timestamp fields in the JSON + fn zero_out_timestamps(value: &mut Value) { + match value { + Value::Object(map) => { + for (key, val) in map.iter_mut() { + if key == "startTimeUnixNano" || key == "timeUnixNano" { + *val = Value::String("0".to_string()); + } else { + Self::zero_out_timestamps(val); + } + } + } + Value::Array(array) => { + for item in array.iter_mut() { + Self::zero_out_timestamps(item); + } + } + _ => {} } } } - -// read a file contains ResourceMetrics in json format -pub fn read_metrics_from_json(file: File) -> Vec { - let reader = std::io::BufReader::new(file); - - let metrics_data: MetricsData = - serde_json::from_reader(reader).expect("Failed to read json file"); - metrics_data.resource_metrics -} diff --git a/opentelemetry-otlp/tests/integration_test/src/test_utils.rs b/opentelemetry-otlp/tests/integration_test/src/test_utils.rs new file mode 100644 index 0000000000..bd62674868 --- /dev/null +++ b/opentelemetry-otlp/tests/integration_test/src/test_utils.rs @@ -0,0 +1,151 @@ +//! Supporting infrastructure for OTLP integration tests. +//! +//! This module provides the pieces needed to work with an actual opentelemetry-collector +//! instance, which is started in Docker and has its output plumbed back into the host filesystem. +//! This lets us write tests that push data over OTLP (HTTP or gRPC) to the collector, and then read +//! that data back from the filesystem to ensure everything worked out as expected. +//! +//! To use this module, all you need to do is call `start_collector_container()` from each +//! of your tests, and use a single `#[dtor]` at the end of your test file to call +//! `stop_collector_container`. Note that as cargo integration tests run a process-per-test-file, +//! each test will get its own fresh instance of the container. +//! +//! Only a single test suite can run at once, as each container has statically mapped ports, but +//! this works nicely with the way cargo executes the suite. +//! +//! To skip integration tests with cargo, you can run `cargo test --mod`, which will run unit tests +//! only. +//! +#![cfg(unix)] + +use anyhow::Result; +use opentelemetry::{otel_debug, otel_info}; +use std::fs; +use std::fs::File; +use std::os::unix::fs::PermissionsExt; +use std::sync::{Arc, Mutex, Once, OnceLock}; +use testcontainers::core::wait::HttpWaitStrategy; +use testcontainers::core::{ContainerPort, Mount}; +use testcontainers::{core::WaitFor, runners::AsyncRunner, ContainerAsync, GenericImage, ImageExt}; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; +use tracing_subscriber::{EnvFilter, Layer}; + +// Static references for container management +static COLLECTOR_ARC: OnceLock>>>> = OnceLock::new(); + +pub static METRICS_FILE: &str = "./actual/metrics.json"; +pub static LOGS_FILE: &str = "./actual/logs.json"; +pub static TRACES_FILE: &str = "./actual/traces.json"; + +static INIT_TRACING: Once = Once::new(); + +fn init_tracing() { + INIT_TRACING.call_once(|| { + // Info and above for all, debug for opentelemetry + let filter_fmt = + EnvFilter::new("info").add_directive("opentelemetry=debug".parse().unwrap()); + let fmt_layer = tracing_subscriber::fmt::layer() + .with_thread_names(true) + .with_filter(filter_fmt); + + // Initialize the tracing subscriber with the OpenTelemetry layer and the + // Fmt layer. + tracing_subscriber::registry().with(fmt_layer).init(); + otel_info!(name: "tracing initializing completed!"); + }); +} + +pub async fn start_collector_container() -> Result<()> { + init_tracing(); + + let mut arc_guard = COLLECTOR_ARC + .get_or_init(|| Mutex::new(None)) + .lock() + .unwrap(); + + // If the container isn't running, start it. + if arc_guard.is_none() { + // Make sure all our test data is mounted + upsert_empty_file(METRICS_FILE); + upsert_empty_file(TRACES_FILE); + upsert_empty_file(LOGS_FILE); + + // Start a new container + let container_instance = GenericImage::new("otel/opentelemetry-collector", "latest") + .with_wait_for(WaitFor::http( + HttpWaitStrategy::new("/") + .with_expected_status_code(404u16) + .with_port(ContainerPort::Tcp(4318)), + )) + .with_mapped_port(4317, ContainerPort::Tcp(4317)) + .with_mapped_port(4318, ContainerPort::Tcp(4318)) + .with_mount(Mount::bind_mount( + fs::canonicalize("./otel-collector-config.yaml")?.to_string_lossy(), + "/etc/otelcol/config.yaml", + )) + .with_mount(Mount::bind_mount( + fs::canonicalize("./actual/logs.json")?.to_string_lossy(), + "/testresults/logs.json", + )) + .with_mount(Mount::bind_mount( + fs::canonicalize("./actual/metrics.json")?.to_string_lossy(), + "/testresults/metrics.json", + )) + .with_mount(Mount::bind_mount( + fs::canonicalize("./actual/traces.json")?.to_string_lossy(), + "/testresults/traces.json", + )) + .start() + .await?; + + let container = Arc::new(container_instance); + otel_debug!( + name: "Container started", + ports = format!("{:?}", container.ports().await)); + + // Give the container a second to stabilize + //tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + // Store the container in COLLECTOR_ARC + *arc_guard = Some(Arc::clone(&container)); + } + + Ok(()) +} + +/// +/// Creates an empty file with permissions that make it usable both within docker +/// and on the host. +/// +fn upsert_empty_file(path: &str) -> File { + let file = File::create(path).unwrap(); + file.set_permissions(std::fs::Permissions::from_mode(0o666)) + .unwrap(); + file +} + +/// +/// Shuts down our collector container. This should be run as part of each test +/// suite shutting down! +/// +pub fn stop_collector_container() { + // This is a bit heinous. We don't have an async runtime left when + // we hit this call, so we can't use the async methods on the testcontainers + // interface to shutdown. + // We _need_ to do this here, because otherwise we have no "all the tests in the module + // were complete" hook. + // + // https://github.com/testcontainers/testcontainers-rs/issues/707 + otel_debug!(name: "stop_collector_container"); + + if let Some(mutex_option_arc) = COLLECTOR_ARC.get() { + let guard = mutex_option_arc.lock().unwrap(); + if let Some(container_arc) = &*guard { + std::process::Command::new("docker") + .args(["container", "rm", "-f", container_arc.id()]) + .output() + .expect("failed to stop testcontainer"); + } + } +} diff --git a/opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs b/opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs index 00c7c2300d..ce7eec928a 100644 --- a/opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs +++ b/opentelemetry-otlp/tests/integration_test/src/trace_asserter.rs @@ -1,3 +1,4 @@ +use anyhow::Result; use opentelemetry_proto::tonic::trace::v1::{ResourceSpans, Span, TracesData}; use std::collections::{HashMap, HashSet}; use std::fmt::{Debug, Formatter}; @@ -213,9 +214,9 @@ fn span_eq(left: &Span, right: &Span) -> bool { } // read a file contains ResourceSpans in json format -pub fn read_spans_from_json(file: File) -> Vec { +pub fn read_spans_from_json(file: File) -> Result> { let reader = std::io::BufReader::new(file); - let trace_data: TracesData = serde_json::from_reader(reader).expect("Failed to read json file"); - trace_data.resource_spans + let trace_data: TracesData = serde_json::from_reader(reader)?; + Ok(trace_data.resource_spans) } diff --git a/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs b/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs deleted file mode 100644 index 5f5468d0dc..0000000000 --- a/opentelemetry-otlp/tests/integration_test/tests/integration_tests.rs +++ /dev/null @@ -1,142 +0,0 @@ -#![cfg(unix)] - -use integration_test_runner::images::Collector; -use std::fs::File; -use std::os::unix::fs::PermissionsExt; -use std::time::Duration; -use testcontainers::clients::Cli; -use testcontainers::core::Port; -use testcontainers::RunnableImage; - -mod logs; -mod metrics; -mod traces; - -const COLLECTOR_CONTAINER_NAME: &str = "otel-collector"; -const TEST_RESULT_DIR_IN_CONTAINER: &str = "testresults"; -const EXPECTED_DIR: &str = "./expected"; -const RESULT_FILE_PATH: &str = "./result.json"; - -struct TestSuite { - expected_file_path: &'static str, -} - -impl TestSuite { - fn new(expected_file_path: &'static str) -> Self { - Self { expected_file_path } - } - - pub fn expected_file_path(&self) -> String { - format!("{}/{}", EXPECTED_DIR, self.expected_file_path) - } - - pub fn result_file_path_in_container(&self) -> String { - format!("/{}/{}", TEST_RESULT_DIR_IN_CONTAINER, RESULT_FILE_PATH) - } - - pub fn result_file_path(&self) -> String { - format!("./{}", RESULT_FILE_PATH) - } - - /// Create a empty file on localhost and copy it to container with proper permissions - /// we have to create the file for the container otherwise we will encounter a permission denied error. - /// see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/3159 - pub fn create_temporary_result_file(&self) -> File { - let file = File::create(self.result_file_path()).unwrap(); - file.set_permissions(std::fs::Permissions::from_mode(0o666)) - .unwrap(); - file - } -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -#[ignore] // skip when running unit test -async fn integration_tests() { - trace_integration_tests().await; - logs_integration_tests().await; -} - -async fn trace_integration_tests() { - let test_suites = [TestSuite::new("traces.json")]; - let mut collector_image = Collector::default(); - for test in test_suites.as_ref() { - let _ = test.create_temporary_result_file(); - collector_image = collector_image.with_volume( - test.result_file_path().as_str(), - test.result_file_path_in_container().as_str(), - ); - } - - let docker = Cli::default(); - let mut image = - RunnableImage::from(collector_image).with_container_name(COLLECTOR_CONTAINER_NAME); - - for port in [ - 4317, // gRPC port - 4318, // HTTP port - ] { - image = image.with_mapped_port(Port { - local: port, - internal: port, - }) - } - - let collector_container = docker.run(image); - - tokio::time::sleep(Duration::from_secs(5)).await; - traces::traces().await.unwrap(); - - // wait for file to flush to disks - // ideally we should use volume mount but otel collector file exporter doesn't handle permission too well - // bind mount mitigate the issue by set up the permission correctly on host system - tokio::time::sleep(Duration::from_secs(5)).await; - traces::assert_traces_results( - test_suites[0].result_file_path().as_str(), - test_suites[0].expected_file_path().as_str(), - ); - - collector_container.stop(); -} - -async fn logs_integration_tests() { - let test_suites = [TestSuite::new("logs.json")]; - - let mut collector_image = Collector::default(); - for test in test_suites.as_ref() { - let _ = test.create_temporary_result_file(); - collector_image = collector_image.with_volume( - test.result_file_path().as_str(), - test.result_file_path_in_container().as_str(), - ); - } - - let docker = Cli::default(); - let mut image = - RunnableImage::from(collector_image).with_container_name(COLLECTOR_CONTAINER_NAME); - - for port in [ - 4317, // gRPC port - 4318, // HTTP port - ] { - image = image.with_mapped_port(Port { - local: port, - internal: port, - }) - } - - let collector_container = docker.run(image); - - tokio::time::sleep(Duration::from_secs(5)).await; - logs::logs().await.unwrap(); - - // wait for file to flush to disks - // ideally we should use volume mount but otel collector file exporter doesn't handle permission too well - // bind mount mitigate the issue by set up the permission correctly on host system - tokio::time::sleep(Duration::from_secs(5)).await; - logs::assert_logs_results( - test_suites[0].result_file_path().as_str(), - test_suites[0].expected_file_path().as_str(), - ); - - collector_container.stop(); -} diff --git a/opentelemetry-otlp/tests/integration_test/tests/logs.rs b/opentelemetry-otlp/tests/integration_test/tests/logs.rs index 0c4fb773e9..a20f84475f 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/logs.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/logs.rs @@ -1,35 +1,89 @@ #![cfg(unix)] +use anyhow::Result; +use ctor::dtor; use integration_test_runner::logs_asserter::{read_logs_from_json, LogsAsserter}; -use log::{info, Level}; -use opentelemetry::logs::LogError; -use opentelemetry::KeyValue; -use opentelemetry_appender_log::OpenTelemetryLogBridge; -use opentelemetry_sdk::{logs as sdklogs, runtime, Resource}; -use std::error::Error; +use integration_test_runner::test_utils; +use opentelemetry_otlp::LogExporter; +use opentelemetry_sdk::logs::LoggerProvider; +use opentelemetry_sdk::{logs as sdklogs, Resource}; use std::fs::File; use std::os::unix::fs::MetadataExt; -fn init_logs() -> Result { - opentelemetry_otlp::new_pipeline() - .logging() - .with_exporter(opentelemetry_otlp::new_exporter().tonic()) - .with_resource(Resource::new(vec![KeyValue::new( - opentelemetry_semantic_conventions::resource::SERVICE_NAME, - "logs-integration-test", - )])) - .install_batch(runtime::Tokio) +fn init_logs() -> Result { + let exporter_builder = LogExporter::builder(); + #[cfg(feature = "tonic-client")] + let exporter_builder = exporter_builder.with_tonic(); + #[cfg(not(feature = "tonic-client"))] + #[cfg(any( + feature = "hyper-client", + feature = "reqwest-client", + feature = "reqwest-blocking-client" + ))] + let exporter_builder = exporter_builder.with_http(); + + let exporter = exporter_builder.build()?; + + Ok(LoggerProvider::builder() + .with_batch_exporter(exporter) + .with_resource( + Resource::builder_empty() + .with_service_name("logs-integration-test") + .build(), + ) + .build()) } -pub async fn logs() -> Result<(), Box> { - let logger_provider = init_logs().unwrap(); - let otel_log_appender = OpenTelemetryLogBridge::new(&logger_provider); - log::set_boxed_logger(Box::new(otel_log_appender))?; - log::set_max_level(Level::Info.to_level_filter()); +#[cfg(test)] +mod logtests { + use super::*; + use integration_test_runner::logs_asserter::{read_logs_from_json, LogsAsserter}; + use std::{fs::File, time::Duration}; + #[test] + #[should_panic(expected = "assertion `left == right` failed: body does not match")] + pub fn test_assert_logs_eq_failure() { + let left = read_logs_from_json(File::open("./expected/logs.json").unwrap()); + let right = read_logs_from_json(File::open("./expected/failed_logs.json").unwrap()); + LogsAsserter::new(right, left).assert(); + } + + #[test] + pub fn test_assert_logs_eq() { + let logs = read_logs_from_json(File::open("./expected/logs.json").unwrap()); + LogsAsserter::new(logs.clone(), logs).assert(); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + #[cfg(not(feature = "hyper-client"))] + #[cfg(not(feature = "reqwest-client"))] + pub async fn test_logs() -> Result<()> { + // Make sure the container is running + + use integration_test_runner::test_utils; + use opentelemetry_appender_tracing::layer; + use tracing::info; + use tracing_subscriber::layer::SubscriberExt; + + use crate::{assert_logs_results, init_logs}; + test_utils::start_collector_container().await?; - info!(target: "my-target", "hello from {}. My price is {}.", "banana", 2.99); - let _ = logger_provider.shutdown(); - Ok(()) + let logger_provider = init_logs().unwrap(); + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + let subscriber = tracing_subscriber::registry().with(layer); + { + let _guard = tracing::subscriber::set_default(subscriber); + info!(target: "my-target", "hello from {}. My price is {}.", "banana", 2.99); + } + // TODO: remove below wait before calling logger_provider.shutdown() + // tokio::time::sleep(Duration::from_secs(10)).await; + let _ = logger_provider.shutdown(); + + tokio::time::sleep(Duration::from_secs(10)).await; + + assert_logs_results(test_utils::LOGS_FILE, "expected/logs.json"); + + Ok(()) + } } pub fn assert_logs_results(result: &str, expected: &str) { @@ -41,16 +95,11 @@ pub fn assert_logs_results(result: &str, expected: &str) { assert!(File::open(result).unwrap().metadata().unwrap().size() > 0) } -#[test] -#[should_panic(expected = "assertion `left == right` failed: body does not match")] -pub fn test_assert_logs_eq_failure() { - let left = read_logs_from_json(File::open("./expected/logs.json").unwrap()); - let right = read_logs_from_json(File::open("./expected/failed_logs.json").unwrap()); - LogsAsserter::new(right, left).assert(); -} - -#[test] -pub fn test_assert_logs_eq() { - let logs = read_logs_from_json(File::open("./expected/logs.json").unwrap()); - LogsAsserter::new(logs.clone(), logs).assert(); +/// +/// Make sure we stop the collector container, otherwise it will sit around hogging our +/// ports and subsequent test runs will fail. +/// +#[dtor] +fn shutdown() { + test_utils::stop_collector_container(); } diff --git a/opentelemetry-otlp/tests/integration_test/tests/metrics.rs b/opentelemetry-otlp/tests/integration_test/tests/metrics.rs index 5395c67d58..125c501e14 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/metrics.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/metrics.rs @@ -1,23 +1,332 @@ -use std::{fs::File, io::Write}; +//! OTLP integration tests for metrics +//! Note: these are all expressed using Serde types for the deserialized metrics records. +//! We might consider changing this once we have fixed the issue identified in the #[ignore]d test +//! `test_roundtrip_example_data` - as the roundtripping is currently broken for metrics. +//! +#![cfg(unix)] +use anyhow::{Context, Result}; +use ctor::dtor; use integration_test_runner::metrics_asserter::{read_metrics_from_json, MetricsAsserter}; +use integration_test_runner::test_utils; +use integration_test_runner::test_utils::start_collector_container; +use opentelemetry::KeyValue; +use opentelemetry_otlp::MetricExporter; use opentelemetry_proto::tonic::metrics::v1::MetricsData; +use opentelemetry_sdk::metrics::{MeterProviderBuilder, PeriodicReader, SdkMeterProvider}; +use opentelemetry_sdk::Resource; +use serde_json::Value; +use std::fs; +use std::fs::File; +use std::sync::Mutex; +use std::time::Duration; -#[test] -fn test_serde() { - let metrics = read_metrics_from_json(File::open("./expected/metrics.json").unwrap()); +static SETUP_DONE: Mutex = Mutex::new(false); - let json = serde_json::to_string_pretty(&MetricsData { - resource_metrics: metrics, - }) - .expect("Failed to serialize metrics"); +static RESULT_PATH: &str = "actual/metrics.json"; - // Write to file. - let mut file = File::create("./expected/serialized_metrics.json").unwrap(); - file.write_all(json.as_bytes()).unwrap(); +/// Initializes the OpenTelemetry metrics pipeline +async fn init_metrics() -> SdkMeterProvider { + let exporter = create_exporter(); - let left = read_metrics_from_json(File::open("./expected/metrics.json").unwrap()); - let right = read_metrics_from_json(File::open("./expected/serialized_metrics.json").unwrap()); + let reader = PeriodicReader::builder(exporter) + .with_interval(Duration::from_millis(500)) + .with_timeout(Duration::from_secs(1)) + .build(); - MetricsAsserter::new(left, right).assert(); + let resource = Resource::builder_empty() + .with_service_name("metrics-integration-test") + .build(); + + let meter_provider = MeterProviderBuilder::default() + .with_resource(resource) + .with_reader(reader) + .build(); + + opentelemetry::global::set_meter_provider(meter_provider.clone()); + + meter_provider +} + +/// +/// Creates an exporter using the appropriate HTTP or gRPC client based on +/// the configured features. +/// +fn create_exporter() -> MetricExporter { + let exporter_builder = MetricExporter::builder(); + + #[cfg(feature = "tonic-client")] + let exporter_builder = exporter_builder.with_tonic(); + #[cfg(not(feature = "tonic-client"))] + #[cfg(any( + feature = "hyper-client", + feature = "reqwest-client", + feature = "reqwest-blocking-client" + ))] + let exporter_builder = exporter_builder.with_http(); + + exporter_builder + .build() + .expect("Failed to build MetricExporter") +} + +/// +/// Retrieves the latest metrics for the given scope. Each test should use +/// its own scope, so that we can easily pull the data for it out from the rest +/// of the data. +/// +/// This will also retrieve the resource attached to the scope. +/// +pub fn fetch_latest_metrics_for_scope(scope_name: &str) -> Result { + // Open the file and fetch the contents + let contents = fs::read_to_string(test_utils::METRICS_FILE)?; + + // Find the last parseable metrics line that contains the desired scope + let json_line = contents + .lines() + .rev() + .find_map(|line| { + // Attempt to parse the line as JSON + serde_json::from_str::(line) + .ok() + .and_then(|mut json_line| { + // Check if it contains the specified scope + if let Some(resource_metrics) = json_line + .get_mut("resourceMetrics") + .and_then(|v| v.as_array_mut()) + { + resource_metrics.retain_mut(|resource| { + if let Some(scope_metrics) = resource + .get_mut("scopeMetrics") + .and_then(|v| v.as_array_mut()) + { + scope_metrics.retain(|scope| { + scope + .get("scope") + .and_then(|s| s.get("name")) + .and_then(|name| name.as_str()) + .map_or(false, |n| n == scope_name) + }); + + // Keep the resource only if it has any matching `ScopeMetrics` + !scope_metrics.is_empty() + } else { + false + } + }); + + // If any resource metrics remain, return this line + if !resource_metrics.is_empty() { + return Some(json_line); + } + } + + None + }) + }) + .with_context(|| { + format!( + "No valid JSON line containing scope `{}` found.", + scope_name + ) + })?; + + Ok(json_line) +} + +/// +/// Performs setup for metrics tests +/// +async fn setup_metrics_test() -> Result<()> { + // Make sure the collector container is running + start_collector_container().await?; + + let mut done = SETUP_DONE.lock().unwrap(); + if !*done { + println!("Running setup before any tests..."); + *done = true; // Mark setup as done + + // Initialize the metrics subsystem + _ = init_metrics().await; + } + + // Truncate results + _ = File::create(RESULT_PATH).expect("it's good"); + + Ok(()) +} + +/// +/// Check that the metrics for the given scope match what we expect. This +/// includes zeroing out timestamps, which we reasonably expect not to match. +/// +pub fn validate_metrics_against_results(scope_name: &str) -> Result<()> { + // Define the results file path + let results_file_path = format!("./expected/metrics/{}.json", scope_name); + + // Fetch the actual metrics for the given scope + let actual_metrics = fetch_latest_metrics_for_scope(scope_name) + .context(format!("Failed to fetch metrics for scope: {}", scope_name))?; + + // Read the expected metrics from the results file + let expected_metrics = { + let file = File::open(&results_file_path).context(format!( + "Failed to open results file: {}", + results_file_path + ))?; + read_metrics_from_json(file) + }?; + + // Compare the actual metrics with the expected metrics + MetricsAsserter::new(actual_metrics, expected_metrics).assert(); + + Ok(()) +} + +/// +/// TODO - the HTTP metrics exporters except reqwest-blocking-client do not seem +/// to work at the moment. +/// TODO - fix this asynchronously. +/// +#[cfg(test)] +#[cfg(not(feature = "hyper-client"))] +#[cfg(not(feature = "reqwest-client"))] +mod tests { + + use super::*; + use opentelemetry::metrics::MeterProvider; + + /// + /// Validate JSON/Protobuf models roundtrip correctly. + /// + /// TODO - this test fails currently. Fields disappear, such as the actual value of a given metric. + /// This appears to be on the _deserialization_ side. + /// Issue: https://github.com/open-telemetry/opentelemetry-rust/issues/2434 + /// + #[tokio::test] + #[ignore] + async fn test_roundtrip_example_data() -> Result<()> { + let metrics_in = include_str!("../expected/metrics/test_u64_counter_meter.json"); + let metrics: MetricsData = serde_json::from_str(metrics_in)?; + let metrics_out = serde_json::to_string(&metrics)?; + + println!("{:}", metrics_out); + + let metrics_in_json: Value = serde_json::from_str(metrics_in)?; + let metrics_out_json: Value = serde_json::from_str(&metrics_out)?; + + assert_eq!(metrics_in_json, metrics_out_json); + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_u64_counter() -> Result<()> { + let _result_path = setup_metrics_test().await; + const METER_NAME: &str = "test_u64_counter_meter"; + + // Add data to u64_counter + let meter = opentelemetry::global::meter_provider().meter(METER_NAME); + + let counter = meter.u64_counter("counter_u64").build(); + counter.add( + 10, + &[ + KeyValue::new("mykey1", "myvalue1"), + KeyValue::new("mykey2", "myvalue2"), + ], + ); + + tokio::time::sleep(Duration::from_secs(2)).await; + + // Validate metrics against results file + validate_metrics_against_results(METER_NAME)?; + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + // #[ignore] // skip when running unit test + async fn test_histogram() -> Result<()> { + _ = setup_metrics_test().await; + const METER_NAME: &str = "test_histogram_meter"; + + // Add data to histogram + let meter = opentelemetry::global::meter_provider().meter(METER_NAME); + let histogram = meter.u64_histogram("example_histogram").build(); + histogram.record(42, &[KeyValue::new("mykey3", "myvalue4")]); + tokio::time::sleep(Duration::from_secs(5)).await; + + validate_metrics_against_results(METER_NAME)?; + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + // #[ignore] // skip when running unit test + async fn test_up_down_counter() -> Result<()> { + _ = setup_metrics_test().await; + const METER_NAME: &str = "test_up_down_meter"; + + // Add data to up_down_counter + let meter = opentelemetry::global::meter_provider().meter(METER_NAME); + let up_down_counter = meter.i64_up_down_counter("example_up_down_counter").build(); + up_down_counter.add(-1, &[KeyValue::new("mykey5", "myvalue5")]); + tokio::time::sleep(Duration::from_secs(5)).await; + + validate_metrics_against_results(METER_NAME)?; + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + #[ignore] + async fn test_flush_on_shutdown() -> Result<()> { + const METER_NAME: &str = "test_flush_on_shutdown"; + + // Set everything up by hand, so that we can shutdown() the exporter + // and make sure our data is flushed through. + + // Make sure the collector is running + start_collector_container().await?; + + // Set up the exporter + let exporter = create_exporter(); + let reader = PeriodicReader::builder(exporter) + .with_interval(Duration::from_secs(30)) + .with_timeout(Duration::from_secs(1)) + .build(); + let resource = Resource::builder_empty() + .with_service_name("metrics-integration-test") + .build(); + let meter_provider = MeterProviderBuilder::default() + .with_resource(resource) + .with_reader(reader) + .build(); + + // Send something + let meter = meter_provider.meter(METER_NAME); + let counter = meter.u64_counter("counter_").build(); + counter.add(123, &[]); + + // Shutdown + meter_provider.shutdown()?; + + // We still need to sleep, to give otel-collector a chance to flush to disk + tokio::time::sleep(Duration::from_secs(2)).await; + + validate_metrics_against_results(METER_NAME)?; + + Ok(()) + } +} + +/// +/// Make sure we stop the collector container, otherwise it will sit around hogging our +/// ports and subsequent test runs will fail. +/// +#[dtor] +fn shutdown() { + println!("metrics::shutdown"); + test_utils::stop_collector_container(); } diff --git a/opentelemetry-otlp/tests/integration_test/tests/traces.rs b/opentelemetry-otlp/tests/integration_test/tests/traces.rs index f7e3b3a510..1601e04132 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/traces.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/traces.rs @@ -2,36 +2,55 @@ use integration_test_runner::trace_asserter::{read_spans_from_json, TraceAsserter}; use opentelemetry::global; -use opentelemetry::global::shutdown_tracer_provider; use opentelemetry::trace::TraceError; use opentelemetry::{ trace::{TraceContextExt, Tracer}, Key, KeyValue, }; +use opentelemetry_otlp::SpanExporter; + +use anyhow::Result; +use ctor::dtor; +use integration_test_runner::test_utils; use opentelemetry_proto::tonic::trace::v1::TracesData; use opentelemetry_sdk::{runtime, trace as sdktrace, Resource}; -use std::error::Error; use std::fs::File; use std::io::Write; use std::os::unix::fs::MetadataExt; +use std::time::Duration; +use tokio::time::sleep; fn init_tracer_provider() -> Result { - opentelemetry_otlp::new_pipeline() - .tracing() - .with_exporter(opentelemetry_otlp::new_exporter().tonic()) - .with_trace_config( - sdktrace::Config::default().with_resource(Resource::new(vec![KeyValue::new( - opentelemetry_semantic_conventions::resource::SERVICE_NAME, - "basic-otlp-tracing-example", - )])), + let exporter_builder = SpanExporter::builder(); + #[cfg(feature = "tonic-client")] + let exporter_builder = exporter_builder.with_tonic(); + #[cfg(not(feature = "tonic-client"))] + #[cfg(any( + feature = "hyper-client", + feature = "reqwest-client", + feature = "reqwest-blocking-client" + ))] + let exporter_builder = exporter_builder.with_http(); + + let exporter = exporter_builder.build()?; + + Ok(opentelemetry_sdk::trace::TracerProvider::builder() + .with_batch_exporter(exporter, runtime::Tokio) + .with_resource( + Resource::builder_empty() + .with_service_name("basic-otlp-tracing-example") + .build(), ) - .install_batch(runtime::Tokio) + .build()) } const LEMONS_KEY: Key = Key::from_static_str("lemons"); const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); -pub async fn traces() -> Result<(), Box> { +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +pub async fn traces() -> Result<()> { + test_utils::start_collector_container().await?; + let tracer_provider = init_tracer_provider().expect("Failed to initialize tracer provider."); global::set_tracer_provider(tracer_provider.clone()); @@ -53,44 +72,53 @@ pub async fn traces() -> Result<(), Box> { }); }); - shutdown_tracer_provider(); + tracer_provider.shutdown()?; + + // Give it a second to flush + sleep(Duration::from_secs(2)).await; + + // Validate results + assert_traces_results(test_utils::TRACES_FILE, "./expected/traces.json")?; Ok(()) } -pub fn assert_traces_results(result: &str, expected: &str) { - let left = read_spans_from_json(File::open(expected).unwrap()); - let right = read_spans_from_json(File::open(result).unwrap()); +pub fn assert_traces_results(result: &str, expected: &str) -> Result<()> { + let left = read_spans_from_json(File::open(expected)?)?; + let right = read_spans_from_json(File::open(result)?)?; TraceAsserter::new(left, right).assert(); // we cannot read result json file because the timestamp was represents as string instead of u64. // need to fix it on json file exporter + assert!(File::open(result)?.metadata()?.size() > 0); - assert!(File::open(result).unwrap().metadata().unwrap().size() > 0) + Ok(()) } #[test] #[should_panic(expected = "left: \"Sub operation...\"")] // we swap the parent spans with child spans in failed_traces.json pub fn test_assert_span_eq_failure() { - let left = read_spans_from_json(File::open("./expected/traces.json").unwrap()); - let right = read_spans_from_json(File::open("./expected/failed_traces.json").unwrap()); + let left = read_spans_from_json(File::open("./expected/traces.json").unwrap()).unwrap(); + let right = read_spans_from_json(File::open("./expected/failed_traces.json").unwrap()).unwrap(); TraceAsserter::new(right, left).assert(); } #[test] -pub fn test_assert_span_eq() { - let spans = read_spans_from_json(File::open("./expected/traces.json").unwrap()); +pub fn test_assert_span_eq() -> Result<()> { + let spans = read_spans_from_json(File::open("./expected/traces.json")?)?; TraceAsserter::new(spans.clone(), spans).assert(); + + Ok(()) } #[test] -pub fn test_serde() { +pub fn test_serde() -> Result<()> { let spans = read_spans_from_json( File::open("./expected/traces.json").expect("Failed to read traces.json"), - ); + )?; let json = serde_json::to_string_pretty(&TracesData { resource_spans: spans, }) @@ -102,11 +130,22 @@ pub fn test_serde() { let left = read_spans_from_json( File::open("./expected/traces.json").expect("Failed to read traces.json"), - ); + )?; let right = read_spans_from_json( File::open("./expected/serialized_traces.json") .expect("Failed to read serialized_traces.json"), - ); + )?; TraceAsserter::new(left, right).assert(); + + Ok(()) +} + +/// +/// Make sure we stop the collector container, otherwise it will sit around hogging our +/// ports and subsequent test runs will fail. +/// +#[dtor] +fn shutdown() { + test_utils::stop_collector_container(); } diff --git a/opentelemetry-otlp/tests/smoke.rs b/opentelemetry-otlp/tests/smoke.rs index c217f8f9d6..ba09407e1e 100644 --- a/opentelemetry-otlp/tests/smoke.rs +++ b/opentelemetry-otlp/tests/smoke.rs @@ -1,8 +1,7 @@ use futures_util::StreamExt; use opentelemetry::global; -use opentelemetry::global::shutdown_tracer_provider; use opentelemetry::trace::{Span, SpanKind, Tracer}; -use opentelemetry_otlp::WithExportConfig; +use opentelemetry_otlp::{WithExportConfig, WithTonicConfig}; use opentelemetry_proto::tonic::collector::trace::v1::{ trace_service_server::{TraceService, TraceServiceServer}, ExportTraceServiceRequest, ExportTraceServiceResponse, @@ -84,25 +83,28 @@ async fn smoke_tracer() { println!("Installing tracer provider..."); let mut metadata = tonic::metadata::MetadataMap::new(); metadata.insert("x-header-key", "header-value".parse().unwrap()); - let tracer_provider = opentelemetry_otlp::new_pipeline() - .tracing() - .with_exporter( + let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder() + .with_batch_exporter( #[cfg(feature = "gzip-tonic")] - opentelemetry_otlp::new_exporter() - .tonic() + opentelemetry_otlp::SpanExporter::builder() + .with_tonic() .with_compression(opentelemetry_otlp::Compression::Gzip) .with_endpoint(format!("http://{}", addr)) - .with_metadata(metadata), + .with_metadata(metadata) + .build() + .expect("gzip-tonic SpanExporter failed to build"), #[cfg(not(feature = "gzip-tonic"))] - opentelemetry_otlp::new_exporter() - .tonic() + opentelemetry_otlp::SpanExporter::builder() + .with_tonic() .with_endpoint(format!("http://{}", addr)) - .with_metadata(metadata), + .with_metadata(metadata) + .build() + .expect("NON gzip-tonic SpanExporter failed to build"), + opentelemetry_sdk::runtime::Tokio, ) - .install_batch(opentelemetry_sdk::runtime::Tokio) - .expect("failed to install"); + .build(); - global::set_tracer_provider(tracer_provider); + global::set_tracer_provider(tracer_provider.clone()); let tracer = global::tracer("smoke"); @@ -114,7 +116,9 @@ async fn smoke_tracer() { span.add_event("my-test-event", vec![]); span.end(); - shutdown_tracer_provider(); + tracer_provider + .shutdown() + .expect("tracer_provider should shutdown successfully"); } println!("Waiting for request..."); diff --git a/opentelemetry-prometheus/CHANGELOG.md b/opentelemetry-prometheus/CHANGELOG.md index 6486650baa..b45f2b51dc 100644 --- a/opentelemetry-prometheus/CHANGELOG.md +++ b/opentelemetry-prometheus/CHANGELOG.md @@ -2,6 +2,16 @@ ## vNext +- Bump msrv to 1.75.0. + + +## v0.27.0 + +- Update `opentelemetry` dependency version to 0.27 +- Update `opentelemetry_sdk` dependency version to 0.27 +- Update `opentelemetry-semantic-conventions` dependency version to 0.27 + + ## v0.17.0 ### Changed diff --git a/opentelemetry-prometheus/Cargo.toml b/opentelemetry-prometheus/Cargo.toml index 3bbe65e35a..fc6c2221d6 100644 --- a/opentelemetry-prometheus/Cargo.toml +++ b/opentelemetry-prometheus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-prometheus" -version = "0.17.0" +version = "0.27.0" description = "Prometheus exporter for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -13,7 +13,7 @@ categories = [ keywords = ["opentelemetry", "prometheus", "metrics", "async"] license = "Apache-2.0" edition = "2021" -rust-version = "1.65" +rust-version = "1.75.0" [package.metadata.docs.rs] all-features = true @@ -21,17 +21,20 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] once_cell = { workspace = true } -opentelemetry = { version = "0.24", default-features = false, features = ["metrics"] } -opentelemetry_sdk = { version = "0.24", default-features = false, features = ["metrics"] } +opentelemetry = { version = "0.27", default-features = false, features = ["metrics"] } +opentelemetry_sdk = { version = "0.27", default-features = false, features = ["metrics"] } prometheus = "0.13" protobuf = "2.14" +tracing = {workspace = true, optional = true} # optional for opentelemetry internal logging [dev-dependencies] -opentelemetry-semantic-conventions = { version = "0.16" } +opentelemetry-semantic-conventions = { version = "0.27" } http-body-util = { workspace = true } hyper = { workspace = true, features = ["full"] } hyper-util = { workspace = true, features = ["full"] } tokio = { workspace = true, features = ["full"] } [features] +default = ["internal-logs"] prometheus-encoding = [] +internal-logs = ["tracing"] diff --git a/opentelemetry-prometheus/README.md b/opentelemetry-prometheus/README.md index 360414b5aa..35142e9193 100644 --- a/opentelemetry-prometheus/README.md +++ b/opentelemetry-prometheus/README.md @@ -7,7 +7,7 @@ [`Prometheus`] integration for applications instrumented with [`OpenTelemetry`]. **The development of prometheus exporter has halt until the Opentelemetry metrics API and SDK reaches 1.0. Current -implementation is based on Opentelemetry API and SDK 0.24**. +implementation is based on Opentelemetry API and SDK 0.27**. [![Crates.io: opentelemetry-prometheus](https://img.shields.io/crates/v/opentelemetry-prometheus.svg)](https://crates.io/crates/opentelemetry-prometheus) [![Documentation](https://docs.rs/opentelemetry-prometheus/badge.svg)](https://docs.rs/opentelemetry-prometheus) diff --git a/opentelemetry-prometheus/examples/hyper.rs b/opentelemetry-prometheus/examples/hyper.rs index 692d21a719..a2bd593ba9 100644 --- a/opentelemetry-prometheus/examples/hyper.rs +++ b/opentelemetry-prometheus/examples/hyper.rs @@ -85,17 +85,17 @@ pub async fn main() -> Result<(), Box> { http_counter: meter .u64_counter("http_requests_total") .with_description("Total number of HTTP requests made.") - .init(), + .build(), http_body_gauge: meter .u64_histogram("example.http_response_size") .with_unit("By") .with_description("The metrics HTTP response sizes in bytes.") - .init(), + .build(), http_req_histogram: meter .f64_histogram("example.http_request_duration") .with_unit("ms") .with_description("The HTTP request latencies in milliseconds.") - .init(), + .build(), }); let addr: SocketAddr = ([127, 0, 0, 1], 3000).into(); diff --git a/opentelemetry-prometheus/src/config.rs b/opentelemetry-prometheus/src/config.rs index 0143e59692..cec975e485 100644 --- a/opentelemetry-prometheus/src/config.rs +++ b/opentelemetry-prometheus/src/config.rs @@ -1,10 +1,6 @@ use core::fmt; use once_cell::sync::OnceCell; -use opentelemetry::metrics::{MetricsError, Result}; -use opentelemetry_sdk::metrics::{ - reader::{AggregationSelector, MetricProducer}, - ManualReaderBuilder, -}; +use opentelemetry_sdk::metrics::{ManualReaderBuilder, MetricError, MetricResult}; use std::sync::{Arc, Mutex}; use crate::{Collector, PrometheusExporter, ResourceSelector}; @@ -105,16 +101,6 @@ impl ExporterBuilder { self } - /// Configure the [AggregationSelector] the exporter will use. - /// - /// If no selector is provided, the [DefaultAggregationSelector] is used. - /// - /// [DefaultAggregationSelector]: opentelemetry_sdk::metrics::reader::DefaultAggregationSelector - pub fn with_aggregation_selector(mut self, agg: impl AggregationSelector + 'static) -> Self { - self.reader = self.reader.with_aggregation_selector(agg); - self - } - /// Configures whether to export resource as attributes with every metric. /// /// Note that this is orthogonal to the `target_info` metric, which can be disabled using `without_target_info`. @@ -128,17 +114,8 @@ impl ExporterBuilder { self } - /// Registers an external [MetricProducer] with this reader. - /// - /// The producer is used as a source of aggregated metric data which is - /// incorporated into metrics collected from the SDK. - pub fn with_producer(mut self, producer: impl MetricProducer + 'static) -> Self { - self.reader = self.reader.with_producer(producer); - self - } - /// Creates a new [PrometheusExporter] from this configuration. - pub fn build(self) -> Result { + pub fn build(self) -> MetricResult { let reader = Arc::new(self.reader.build()); let collector = Collector { @@ -157,7 +134,7 @@ impl ExporterBuilder { let registry = self.registry.unwrap_or_default(); registry .register(Box::new(collector)) - .map_err(|e| MetricsError::Other(e.to_string()))?; + .map_err(|e| MetricError::Other(e.to_string()))?; Ok(PrometheusExporter { reader }) } diff --git a/opentelemetry-prometheus/src/lib.rs b/opentelemetry-prometheus/src/lib.rs index 64cf6e5266..968635a715 100644 --- a/opentelemetry-prometheus/src/lib.rs +++ b/opentelemetry-prometheus/src/lib.rs @@ -28,11 +28,11 @@ //! let counter = meter //! .u64_counter("a.counter") //! .with_description("Counts things") -//! .init(); +//! .build(); //! let histogram = meter //! .u64_histogram("a.histogram") //! .with_description("Records values") -//! .init(); +//! .build(); //! //! counter.add(100, &[KeyValue::new("key", "value")]); //! histogram.record(100, &[KeyValue::new("key", "value")]); @@ -97,18 +97,14 @@ #![cfg_attr(test, deny(warnings))] use once_cell::sync::{Lazy, OnceCell}; -use opentelemetry::{ - global, - metrics::{MetricsError, Result}, - Key, Value, -}; +use opentelemetry::{otel_error, otel_warn, InstrumentationScope, Key, Value}; use opentelemetry_sdk::{ metrics::{ - data::{self, ResourceMetrics, Temporality}, - reader::{AggregationSelector, MetricReader, TemporalitySelector}, - Aggregation, InstrumentKind, ManualReader, Pipeline, + data::{self, ResourceMetrics}, + reader::MetricReader, + InstrumentKind, ManualReader, MetricResult, Pipeline, Temporality, }, - Resource, Scope, + Resource, }; use prometheus::{ core::Desc, @@ -152,36 +148,28 @@ pub struct PrometheusExporter { reader: Arc, } -impl TemporalitySelector for PrometheusExporter { - /// Note: Prometheus only supports cumulative temporality so this will always be - /// [Temporality::Cumulative]. - fn temporality(&self, kind: InstrumentKind) -> Temporality { - self.reader.temporality(kind) - } -} - -impl AggregationSelector for PrometheusExporter { - fn aggregation(&self, kind: InstrumentKind) -> Aggregation { - self.reader.aggregation(kind) - } -} - impl MetricReader for PrometheusExporter { fn register_pipeline(&self, pipeline: Weak) { self.reader.register_pipeline(pipeline) } - fn collect(&self, rm: &mut ResourceMetrics) -> Result<()> { + fn collect(&self, rm: &mut ResourceMetrics) -> MetricResult<()> { self.reader.collect(rm) } - fn force_flush(&self) -> Result<()> { + fn force_flush(&self) -> MetricResult<()> { self.reader.force_flush() } - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { self.reader.shutdown() } + + /// Note: Prometheus only supports cumulative temporality, so this will always be + /// [Temporality::Cumulative]. + fn temporality(&self, _kind: InstrumentKind) -> Temporality { + Temporality::Cumulative + } } struct Collector { @@ -199,7 +187,7 @@ struct Collector { #[derive(Default)] struct CollectorInner { - scope_infos: HashMap, + scope_infos: HashMap, metric_families: HashMap, } @@ -287,7 +275,10 @@ impl prometheus::core::Collector for Collector { let mut inner = match self.inner.lock() { Ok(guard) => guard, Err(err) => { - global::handle_error(err); + otel_error!( + name: "MetricScrapeFailed", + message = err.to_string(), + ); return Vec::new(); } }; @@ -297,7 +288,10 @@ impl prometheus::core::Collector for Collector { scope_metrics: vec![], }; if let Err(err) = self.reader.collect(&mut metrics) { - global::handle_error(err); + otel_error!( + name: "MetricScrapeFailed", + message = err.to_string(), + ); return vec![]; } let mut res = Vec::with_capacity(metrics.scope_metrics.len() + 1); @@ -317,7 +311,7 @@ impl prometheus::core::Collector for Collector { for scope_metrics in metrics.scope_metrics { let scope_labels = if !self.disable_scope_info { - if !scope_metrics.scope.attributes.is_empty() { + if scope_metrics.scope.attributes().count() > 0 { let scope_info = inner .scope_infos .entry(scope_metrics.scope.clone()) @@ -326,12 +320,12 @@ impl prometheus::core::Collector for Collector { } let mut labels = - Vec::with_capacity(1 + scope_metrics.scope.version.is_some() as usize); + Vec::with_capacity(1 + scope_metrics.scope.version().is_some() as usize); let mut name = LabelPair::new(); name.set_name(SCOPE_INFO_KEYS[0].into()); - name.set_value(scope_metrics.scope.name.to_string()); + name.set_value(scope_metrics.scope.name().to_string()); labels.push(name); - if let Some(version) = &scope_metrics.scope.version { + if let Some(version) = &scope_metrics.scope.version() { let mut l_version = LabelPair::new(); l_version.set_name(SCOPE_INFO_KEYS[1].into()); l_version.set_value(version.to_string()); @@ -427,11 +421,19 @@ fn validate_metrics( ) -> (bool, Option) { if let Some(existing) = mfs.get(name) { if existing.get_field_type() != metric_type { - global::handle_error(MetricsError::Other(format!("Instrument type conflict, using existing type definition. Instrument {name}, Existing: {:?}, dropped: {:?}", existing.get_field_type(), metric_type))); + otel_warn!( + name: "MetricValidationFailed", + message = "Instrument type conflict, using existing type definition", + metric_type = format!("Instrument {name}, Existing: {:?}, dropped: {:?}", existing.get_field_type(), metric_type).as_str(), + ); return (true, None); } if existing.get_help() != description { - global::handle_error(MetricsError::Other(format!("Instrument description conflict, using existing. Instrument {name}, Existing: {:?}, dropped: {:?}", existing.get_help(), description))); + otel_warn!( + name: "MetricValidationFailed", + message = "Instrument description conflict, using existing", + metric_description = format!("Instrument {name}, Existing: {:?}, dropped: {:?}", existing.get_help().to_string(), description.to_string()).as_str(), + ); return (false, Some(existing.get_help().to_string())); } (false, None) @@ -584,16 +586,16 @@ fn create_info_metric( mf } -fn create_scope_info_metric(scope: &Scope) -> MetricFamily { +fn create_scope_info_metric(scope: &InstrumentationScope) -> MetricFamily { let mut g = prometheus::proto::Gauge::default(); g.set_value(1.0); - let mut labels = Vec::with_capacity(1 + scope.version.is_some() as usize); + let mut labels = Vec::with_capacity(1 + scope.version().is_some() as usize); let mut name = LabelPair::new(); name.set_name(SCOPE_INFO_KEYS[0].into()); - name.set_value(scope.name.to_string()); + name.set_value(scope.name().to_string()); labels.push(name); - if let Some(version) = &scope.version { + if let Some(version) = &scope.version() { let mut v_label = LabelPair::new(); v_label.set_name(SCOPE_INFO_KEYS[1].into()); v_label.set_value(version.to_string()); diff --git a/opentelemetry-prometheus/tests/integration_test.rs b/opentelemetry-prometheus/tests/integration_test.rs index 906290a092..286f8e0392 100644 --- a/opentelemetry-prometheus/tests/integration_test.rs +++ b/opentelemetry-prometheus/tests/integration_test.rs @@ -4,10 +4,10 @@ use std::path::Path; use std::time::Duration; use opentelemetry::metrics::{Meter, MeterProvider as _}; -use opentelemetry::Key; use opentelemetry::KeyValue; +use opentelemetry::{InstrumentationScope, Key}; use opentelemetry_prometheus::{ExporterBuilder, ResourceSelector}; -use opentelemetry_sdk::metrics::{new_view, Aggregation, Instrument, SdkMeterProvider, Stream}; +use opentelemetry_sdk::metrics::SdkMeterProvider; use opentelemetry_sdk::resource::{ EnvResourceDetector, SdkProvidedResourceDetector, TelemetryResourceDetector, }; @@ -15,6 +15,16 @@ use opentelemetry_sdk::Resource; use opentelemetry_semantic_conventions::resource::{SERVICE_NAME, TELEMETRY_SDK_VERSION}; use prometheus::{Encoder, TextEncoder}; +const BOUNDARIES: &[f64] = &[ + 0.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 1000.0, +]; + +const BYTES_BOUNDARIES: &[f64] = &[ + 0.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 2500.0, 5000.0, 7500.0, + 10000.0, +]; + +#[ignore = "https://github.com/open-telemetry/opentelemetry-rust/pull/2224"] #[test] fn prometheus_exporter_integration() { struct TestCase { @@ -46,24 +56,24 @@ fn prometheus_exporter_integration() { expected_file: "counter.txt", record_metrics: Box::new(|meter| { let attrs = vec![ - Key::new("A").string("B"), - Key::new("C").string("D"), - Key::new("E").bool(true), - Key::new("F").i64(42), + KeyValue::new("A", "B"), + KeyValue::new("C", "D"), + KeyValue::new("E", true), + KeyValue::new("F", 42), ]; let counter = meter .f64_counter("foo") .with_description("a simple counter") .with_unit("ms") - .init(); + .build(); counter.add(5.0, &attrs); counter.add(10.3, &attrs); counter.add(9.0, &attrs); let attrs2 = vec![ - Key::new("A").string("D"), - Key::new("C").string("B"), - Key::new("E").bool(true), - Key::new("F").i64(42), + KeyValue::new("A", "D"), + KeyValue::new("C", "B"), + KeyValue::new("E", true), + KeyValue::new("F", 42), ]; counter.add(5.0, &attrs2); }), @@ -75,24 +85,24 @@ fn prometheus_exporter_integration() { builder: ExporterBuilder::default().without_counter_suffixes(), record_metrics: Box::new(|meter| { let attrs = vec![ - Key::new("A").string("B"), - Key::new("C").string("D"), - Key::new("E").bool(true), - Key::new("F").i64(42), + KeyValue::new("A", "B"), + KeyValue::new("C", "D"), + KeyValue::new("E", true), + KeyValue::new("F", 42), ]; let counter = meter .f64_counter("foo") .with_description("a simple counter without a total suffix") .with_unit("ms") - .init(); + .build(); counter.add(5.0, &attrs); counter.add(10.3, &attrs); counter.add(9.0, &attrs); let attrs2 = vec![ - Key::new("A").string("D"), - Key::new("C").string("B"), - Key::new("E").bool(true), - Key::new("F").i64(42), + KeyValue::new("A", "D"), + KeyValue::new("C", "B"), + KeyValue::new("E", true), + KeyValue::new("F", 42), ]; counter.add(5.0, &attrs2); }), @@ -102,12 +112,12 @@ fn prometheus_exporter_integration() { name: "gauge", expected_file: "gauge.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = vec![KeyValue::new("A", "B"), KeyValue::new("C", "D")]; let gauge = meter .f64_up_down_counter("bar") .with_description("a fun little gauge") .with_unit("1") - .init(); + .build(); gauge.add(1.0, &attrs); gauge.add(-0.25, &attrs); }), @@ -117,12 +127,13 @@ fn prometheus_exporter_integration() { name: "histogram", expected_file: "histogram.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = vec![KeyValue::new("A", "B"), KeyValue::new("C", "D")]; let histogram = meter .f64_histogram("histogram_baz") .with_description("a very nice histogram") .with_unit("By") - .init(); + .with_boundaries(BOUNDARIES.to_vec()) + .build(); histogram.record(23.0, &attrs); histogram.record(7.0, &attrs); histogram.record(101.0, &attrs); @@ -137,18 +148,18 @@ fn prometheus_exporter_integration() { record_metrics: Box::new(|meter| { let attrs = vec![ // exact match, value should be overwritten - Key::new("A.B").string("X"), - Key::new("A.B").string("Q"), + KeyValue::new("A.B", "X"), + KeyValue::new("A.B", "Q"), // unintended match due to sanitization, values should be concatenated - Key::new("C.D").string("Y"), - Key::new("C/D").string("Z"), + KeyValue::new("C.D", "Y"), + KeyValue::new("C/D", "Z"), ]; let counter = meter .f64_counter("foo") .with_description("a sanitary counter") // This unit is not added to .with_unit("By") - .init(); + .build(); counter.add(5.0, &attrs); counter.add(10.3, &attrs); counter.add(9.0, &attrs); @@ -159,12 +170,12 @@ fn prometheus_exporter_integration() { name: "invalid instruments are renamed", expected_file: "sanitized_names.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = vec![KeyValue::new("A", "B"), KeyValue::new("C", "D")]; // Valid. let mut gauge = meter .f64_up_down_counter("bar") .with_description("a fun little gauge") - .init(); + .build(); gauge.add(100., &attrs); gauge.add(-25.0, &attrs); @@ -172,19 +183,20 @@ fn prometheus_exporter_integration() { gauge = meter .f64_up_down_counter("invalid.gauge.name") .with_description("a gauge with an invalid name") - .init(); + .build(); gauge.add(100.0, &attrs); let counter = meter .f64_counter("0invalid.counter.name") .with_description("a counter with an invalid name") - .init(); + .build(); counter.add(100.0, &attrs); let histogram = meter .f64_histogram("invalid.hist.name") .with_description("a histogram with an invalid name") - .init(); + .with_boundaries(BOUNDARIES.to_vec()) + .build(); histogram.record(23.0, &attrs); }), ..Default::default() @@ -195,15 +207,15 @@ fn prometheus_exporter_integration() { expected_file: "empty_resource.txt", record_metrics: Box::new(|meter| { let attrs = vec![ - Key::new("A").string("B"), - Key::new("C").string("D"), - Key::new("E").bool(true), - Key::new("F").i64(42), + KeyValue::new("A", "B"), + KeyValue::new("C", "D"), + KeyValue::new("E", true), + KeyValue::new("F", 42), ]; let counter = meter .f64_counter("foo") .with_description("a simple counter") - .init(); + .build(); counter.add(5.0, &attrs); counter.add(10.3, &attrs); counter.add(9.0, &attrs); @@ -212,19 +224,19 @@ fn prometheus_exporter_integration() { }, TestCase { name: "custom resource", - custom_resource_attrs: vec![Key::new("A").string("B"), Key::new("C").string("D")], + custom_resource_attrs: vec![KeyValue::new("A", "B"), KeyValue::new("C", "D")], expected_file: "custom_resource.txt", record_metrics: Box::new(|meter| { let attrs = vec![ - Key::new("A").string("B"), - Key::new("C").string("D"), - Key::new("E").bool(true), - Key::new("F").i64(42), + KeyValue::new("A", "B"), + KeyValue::new("C", "D"), + KeyValue::new("E", true), + KeyValue::new("F", 42), ]; let counter = meter .f64_counter("foo") .with_description("a simple counter") - .init(); + .build(); counter.add(5., &attrs); counter.add(10.3, &attrs); counter.add(9.0, &attrs); @@ -237,15 +249,15 @@ fn prometheus_exporter_integration() { expected_file: "without_target_info.txt", record_metrics: Box::new(|meter| { let attrs = vec![ - Key::new("A").string("B"), - Key::new("C").string("D"), - Key::new("E").bool(true), - Key::new("F").i64(42), + KeyValue::new("A", "B"), + KeyValue::new("C", "D"), + KeyValue::new("E", true), + KeyValue::new("F", 42), ]; let counter = meter .f64_counter("foo") .with_description("a simple counter") - .init(); + .build(); counter.add(5.0, &attrs); counter.add(10.3, &attrs); counter.add(9.0, &attrs); @@ -257,12 +269,12 @@ fn prometheus_exporter_integration() { builder: ExporterBuilder::default().without_scope_info(), expected_file: "without_scope_info.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = vec![KeyValue::new("A", "B"), KeyValue::new("C", "D")]; let gauge = meter .i64_up_down_counter("bar") .with_description("a fun little gauge") .with_unit("1") - .init(); + .build(); gauge.add(2, &attrs); gauge.add(-1, &attrs); }), @@ -275,12 +287,12 @@ fn prometheus_exporter_integration() { .without_target_info(), expected_file: "without_scope_and_target_info.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = vec![KeyValue::new("A", "B"), KeyValue::new("C", "D")]; let counter = meter .u64_counter("bar") .with_description("a fun little counter") .with_unit("By") - .init(); + .build(); counter.add(2, &attrs); counter.add(1, &attrs); }), @@ -292,15 +304,15 @@ fn prometheus_exporter_integration() { expected_file: "with_namespace.txt", record_metrics: Box::new(|meter| { let attrs = vec![ - Key::new("A").string("B"), - Key::new("C").string("D"), - Key::new("E").bool(true), - Key::new("F").i64(42), + KeyValue::new("A", "B"), + KeyValue::new("C", "D"), + KeyValue::new("E", true), + KeyValue::new("F", 42), ]; let counter = meter .f64_counter("foo") .with_description("a simple counter") - .init(); + .build(); counter.add(5.0, &attrs); counter.add(10.3, &attrs); @@ -313,12 +325,12 @@ fn prometheus_exporter_integration() { builder: ExporterBuilder::default().with_resource_selector(ResourceSelector::All), expected_file: "resource_in_every_metrics.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = vec![KeyValue::new("A", "B"), KeyValue::new("C", "D")]; let gauge = meter .i64_up_down_counter("bar") .with_description("a fun little gauge") .with_unit("1") - .init(); + .build(); gauge.add(2, &attrs); gauge.add(-1, &attrs); }), @@ -330,12 +342,12 @@ fn prometheus_exporter_integration() { .with_resource_selector(HashSet::from([Key::new("service.name")])), expected_file: "select_resource_in_every_metrics.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = vec![KeyValue::new("A", "B"), KeyValue::new("C", "D")]; let gauge = meter .i64_up_down_counter("bar") .with_description("a fun little gauge") .with_unit("1") - .init(); + .build(); gauge.add(2, &attrs); gauge.add(-1, &attrs); }), @@ -373,25 +385,16 @@ fn prometheus_exporter_integration() { let provider = SdkMeterProvider::builder() .with_resource(res) .with_reader(exporter) - .with_view( - new_view( - Instrument::new().name("histogram_*"), - Stream::new().aggregation(Aggregation::ExplicitBucketHistogram { - boundaries: vec![ - 0.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 1000.0, - ], - record_min_max: true, - }), - ) - .unwrap(), - ) .build(); - let meter = provider.versioned_meter( - "testmeter", - Some("v0.1.0"), - None::<&'static str>, - Some(vec![KeyValue::new("k", "v")]), - ); + + let scope = InstrumentationScope::builder("testmeter") + .with_version("v0.1.0") + .with_schema_url("https://opentelemetry.io/schema/1.0.0") + .with_attributes(vec![KeyValue::new("k", "v")]) + .build(); + + let meter = provider.meter_with_scope(scope); + (tc.record_metrics)(meter); let content = fs::read_to_string(Path::new("./tests/data").join(tc.expected_file)) @@ -448,30 +451,32 @@ fn multiple_scopes() { .with_resource(resource) .build(); + let scope_foo = InstrumentationScope::builder("meterfoo") + .with_version("v0.1.0") + .with_schema_url("https://opentelemetry.io/schema/1.0.0") + .with_attributes(vec![KeyValue::new("k", "v")]) + .build(); + let foo_counter = provider - .versioned_meter( - "meterfoo", - Some("v0.1.0"), - None::<&'static str>, - Some(vec![KeyValue::new("k", "v")]), - ) + .meter_with_scope(scope_foo) .u64_counter("foo") .with_unit("ms") .with_description("meter foo counter") - .init(); + .build(); foo_counter.add(100, &[KeyValue::new("type", "foo")]); + let scope_bar = InstrumentationScope::builder("meterbar") + .with_version("v0.1.0") + .with_schema_url("https://opentelemetry.io/schema/1.0.0") + .with_attributes(vec![KeyValue::new("k", "v")]) + .build(); + let bar_counter = provider - .versioned_meter( - "meterbar", - Some("v0.1.0"), - None::<&'static str>, - Some(vec![KeyValue::new("k", "v")]), - ) + .meter_with_scope(scope_bar) .u64_counter("bar") .with_unit("ms") .with_description("meter bar counter") - .init(); + .build(); bar_counter.add(200, &[KeyValue::new("type", "bar")]); let content = fs::read_to_string("./tests/data/multi_scopes.txt").unwrap(); @@ -509,7 +514,7 @@ fn duplicate_metrics() { .u64_counter("foo") .with_unit("By") .with_description("meter counter foo") - .init(); + .build(); foo_a.add(100, &[KeyValue::new("A", "B")]); @@ -517,7 +522,7 @@ fn duplicate_metrics() { .u64_counter("foo") .with_unit("By") .with_description("meter counter foo") - .init(); + .build(); foo_b.add(100, &[KeyValue::new("A", "B")]); }), @@ -531,7 +536,7 @@ fn duplicate_metrics() { .i64_up_down_counter("foo") .with_unit("By") .with_description("meter gauge foo") - .init(); + .build(); foo_a.add(100, &[KeyValue::new("A", "B")]); @@ -539,7 +544,7 @@ fn duplicate_metrics() { .i64_up_down_counter("foo") .with_unit("By") .with_description("meter gauge foo") - .init(); + .build(); foo_b.add(100, &[KeyValue::new("A", "B")]); }), @@ -553,7 +558,8 @@ fn duplicate_metrics() { .u64_histogram("foo") .with_unit("By") .with_description("meter histogram foo") - .init(); + .with_boundaries(BYTES_BOUNDARIES.to_vec()) + .build(); foo_a.record(100, &[KeyValue::new("A", "B")]); @@ -561,7 +567,8 @@ fn duplicate_metrics() { .u64_histogram("foo") .with_unit("By") .with_description("meter histogram foo") - .init(); + .with_boundaries(BYTES_BOUNDARIES.to_vec()) + .build(); foo_b.record(100, &[KeyValue::new("A", "B")]); }), @@ -575,7 +582,7 @@ fn duplicate_metrics() { .u64_counter("bar") .with_unit("By") .with_description("meter a bar") - .init(); + .build(); bar_a.add(100, &[KeyValue::new("type", "bar")]); @@ -583,7 +590,7 @@ fn duplicate_metrics() { .u64_counter("bar") .with_unit("By") .with_description("meter b bar") - .init(); + .build(); bar_b.add(100, &[KeyValue::new("type", "bar")]); }), @@ -600,7 +607,7 @@ fn duplicate_metrics() { .i64_up_down_counter("bar") .with_unit("By") .with_description("meter a bar") - .init(); + .build(); bar_a.add(100, &[KeyValue::new("type", "bar")]); @@ -608,7 +615,7 @@ fn duplicate_metrics() { .i64_up_down_counter("bar") .with_unit("By") .with_description("meter b bar") - .init(); + .build(); bar_b.add(100, &[KeyValue::new("type", "bar")]); }), @@ -625,7 +632,8 @@ fn duplicate_metrics() { .u64_histogram("bar") .with_unit("By") .with_description("meter a bar") - .init(); + .with_boundaries(BYTES_BOUNDARIES.to_vec()) + .build(); bar_a.record(100, &[KeyValue::new("A", "B")]); @@ -633,7 +641,8 @@ fn duplicate_metrics() { .u64_histogram("bar") .with_unit("By") .with_description("meter b bar") - .init(); + .with_boundaries(BYTES_BOUNDARIES.to_vec()) + .build(); bar_b.record(100, &[KeyValue::new("A", "B")]); }), @@ -650,7 +659,7 @@ fn duplicate_metrics() { .u64_counter("bar") .with_unit("By") .with_description("meter bar") - .init(); + .build(); baz_a.add(100, &[KeyValue::new("type", "bar")]); @@ -658,7 +667,7 @@ fn duplicate_metrics() { .u64_counter("bar") .with_unit("ms") .with_description("meter bar") - .init(); + .build(); baz_b.add(100, &[KeyValue::new("type", "bar")]); }), @@ -673,7 +682,7 @@ fn duplicate_metrics() { .i64_up_down_counter("bar") .with_unit("By") .with_description("meter gauge bar") - .init(); + .build(); bar_a.add(100, &[KeyValue::new("type", "bar")]); @@ -681,7 +690,7 @@ fn duplicate_metrics() { .i64_up_down_counter("bar") .with_unit("ms") .with_description("meter gauge bar") - .init(); + .build(); bar_b.add(100, &[KeyValue::new("type", "bar")]); }), @@ -696,7 +705,8 @@ fn duplicate_metrics() { .u64_histogram("bar") .with_unit("By") .with_description("meter histogram bar") - .init(); + .with_boundaries(BYTES_BOUNDARIES.to_vec()) + .build(); bar_a.record(100, &[KeyValue::new("A", "B")]); @@ -704,7 +714,8 @@ fn duplicate_metrics() { .u64_histogram("bar") .with_unit("ms") .with_description("meter histogram bar") - .init(); + .with_boundaries(BYTES_BOUNDARIES.to_vec()) + .build(); bar_b.record(100, &[KeyValue::new("A", "B")]); }), @@ -719,7 +730,7 @@ fn duplicate_metrics() { .u64_counter("foo") .with_unit("By") .with_description("meter foo") - .init(); + .build(); counter.add(100, &[KeyValue::new("type", "foo")]); @@ -727,7 +738,7 @@ fn duplicate_metrics() { .i64_up_down_counter("foo_total") .with_unit("By") .with_description("meter foo") - .init(); + .build(); gauge.add(200, &[KeyValue::new("type", "foo")]); }), @@ -745,7 +756,7 @@ fn duplicate_metrics() { .i64_up_down_counter("foo") .with_unit("By") .with_description("meter gauge foo") - .init(); + .build(); foo_a.add(100, &[KeyValue::new("A", "B")]); @@ -753,7 +764,8 @@ fn duplicate_metrics() { .u64_histogram("foo") .with_unit("By") .with_description("meter histogram foo") - .init(); + .with_boundaries(BOUNDARIES.to_vec()) + .build(); foo_histogram_a.record(100, &[KeyValue::new("A", "B")]); }), @@ -793,18 +805,20 @@ fn duplicate_metrics() { .with_reader(exporter) .build(); - let meter_a = provider.versioned_meter( - "ma", - Some("v0.1.0"), - None::<&'static str>, - Some(vec![KeyValue::new("k", "v")]), - ); - let meter_b = provider.versioned_meter( - "mb", - Some("v0.1.0"), - None::<&'static str>, - Some(vec![KeyValue::new("k", "v")]), - ); + let scope_ma = InstrumentationScope::builder("ma") + .with_version("v0.1.0") + .with_schema_url("https://opentelemetry.io/schema/1.0.0") + .with_attributes(vec![KeyValue::new("k", "v")]) + .build(); + + let scope_mb = InstrumentationScope::builder("mb") + .with_version("v0.1.0") + .with_schema_url("https://opentelemetry.io/schema/1.0.0") + .with_attributes(vec![KeyValue::new("k", "v")]) + .build(); + + let meter_a = provider.meter_with_scope(scope_ma); + let meter_b = provider.meter_with_scope(scope_mb); (tc.record_metrics)(meter_a, meter_b); diff --git a/opentelemetry-proto/CHANGELOG.md b/opentelemetry-proto/CHANGELOG.md index cbc72a39cd..3fc9721c5c 100644 --- a/opentelemetry-proto/CHANGELOG.md +++ b/opentelemetry-proto/CHANGELOG.md @@ -2,7 +2,29 @@ ## vNext +- Update proto definitions to v1.4.0 [#2315](https://github.com/open-telemetry/opentelemetry-rust/pull/2315) +- Bump msrv to 1.75.0. +- Update proto definitions to v1.5.0 [#2439](https://github.com/open-telemetry/opentelemetry-rust/pull/2439) + + +## 0.27.0 + +Released 2024-Nov-11 + +- Update `opentelemetry` dependency version to 0.27 +- Update `opentelemetry_sdk` dependency version to 0.27 + +## v0.26.1 + +- Require tonic 0.12.3 to match generated gRPC code [#2168](https://github.com/open-telemetry/opentelemetry-rust/pull/2168) + +## v0.26.0 +Released 2024-Sep-30 + +- Update `opentelemetry` dependency version to 0.26 +- Update `opentelemetry_sdk` dependency version to 0.26 - Fix JSON serialization of `metrics::Exemplar` and `trace::span::Link` [#2069](https://github.com/open-telemetry/opentelemetry-rust/pull/2069) +- Bump MSRV to 1.71.1 [2140](https://github.com/open-telemetry/opentelemetry-rust/pull/2140) ## v0.25.0 - Update `opentelemetry` dependency version to 0.25 diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index 7c4a5f7a19..5ee5fbaf96 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-proto" -version = "0.25.0" +version = "0.27.0" description = "Protobuf generated files and transformations." homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-proto" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-proto" @@ -13,7 +13,7 @@ categories = [ keywords = ["opentelemetry", "otlp", "logging", "tracing", "metrics"] license = "Apache-2.0" edition = "2021" -rust-version = "1.70" +rust-version = "1.75.0" autotests = false [lib] @@ -51,8 +51,8 @@ populate-logs-event-name = [] [dependencies] tonic = { workspace = true, optional = true, features = ["codegen", "prost"] } prost = { workspace = true, optional = true } -opentelemetry = { version = "0.25", default-features = false, path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.25", default-features = false, path = "../opentelemetry-sdk" } +opentelemetry = { version = "0.27", default-features = false, path = "../opentelemetry" } +opentelemetry_sdk = { version = "0.27", default-features = false, path = "../opentelemetry-sdk" } schemars = { version = "0.8", optional = true } serde = { workspace = true, optional = true, features = ["serde_derive"] } hex = { version = "0.4.3", optional = true } diff --git a/opentelemetry-proto/README.md b/opentelemetry-proto/README.md index e655fbeacd..8c82384779 100644 --- a/opentelemetry-proto/README.md +++ b/opentelemetry-proto/README.md @@ -6,5 +6,17 @@ This crate contains generated files from [opentelemetry-proto](https://github.com/open-telemetry/opentelemetry-proto) repository and transformation between types from generated files and types defined in [opentelemetry](https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry). +*[Supported Rust Versions](#supported-rust-versions)* -*Compiler support: [requires `rustc` 1.70+] \ No newline at end of file +## Supported Rust Versions + +OpenTelemetry is built against the latest stable release. The minimum supported +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build +on Rust versions earlier than the minimum supported version. + +The current stable Rust compiler and the three most recent minor versions +before it will always be supported. For example, if the current stable compiler +version is 1.49, the minimum supported version will not be increased past 1.46, +three minor versions prior. Increasing the minimum supported compiler version +is not considered a semver breaking change as long as doing so complies with +this policy. \ No newline at end of file diff --git a/opentelemetry-proto/src/proto.rs b/opentelemetry-proto/src/proto.rs index ba4038072f..792e1fc945 100644 --- a/opentelemetry-proto/src/proto.rs +++ b/opentelemetry-proto/src/proto.rs @@ -6,7 +6,7 @@ pub(crate) mod serializers { use crate::tonic::common::v1::any_value::{self, Value}; use crate::tonic::common::v1::AnyValue; use serde::de::{self, MapAccess, Visitor}; - use serde::ser::SerializeStruct; + use serde::ser::{SerializeMap, SerializeStruct}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; @@ -45,35 +45,23 @@ pub(crate) mod serializers { } // AnyValue <-> KeyValue conversion - pub fn serialize_to_value(value: &Option, serializer: S) -> Result + pub fn serialize_to_value(value: &Option, serializer: S) -> Result where S: Serializer, { - match value { - Some(any_value) => match &any_value.value { - Some(Value::IntValue(i)) => { - // Attempt to create a struct to wrap the intValue - let mut state = match serializer.serialize_struct("Value", 1) { - Ok(s) => s, - Err(e) => return Err(e), // Handle the error or return it - }; - - // Attempt to serialize the intValue field - if let Err(e) = state.serialize_field("intValue", &i.to_string()) { - return Err(e); // Handle the error or return it - } - - // Finalize the struct serialization - state.end() - } - Some(value) => value.serialize(serializer), - None => serializer.serialize_none(), - }, + match &value { + Some(Value::IntValue(i)) => { + // Attempt to serialize the intValue field + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("intValue", &i.to_string()); + map.end() + } + Some(value) => value.serialize(serializer), None => serializer.serialize_none(), } } - pub fn deserialize_from_value<'de, D>(deserializer: D) -> Result, D::Error> + pub fn deserialize_from_value<'de, D>(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, { @@ -99,13 +87,13 @@ pub(crate) mod serializers { } impl<'de> de::Visitor<'de> for ValueVisitor { - type Value = AnyValue; + type Value = Option; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a JSON object for AnyValue") } - fn visit_map(self, mut map: V) -> Result + fn visit_map(self, mut map: V) -> Result, V::Error> where V: de::MapAccess<'de>, { @@ -150,17 +138,17 @@ pub(crate) mod serializers { } if let Some(v) = value { - Ok(AnyValue { value: Some(v) }) + Ok(Some(v)) } else { Err(de::Error::custom( - "Invalid data for AnyValue, no known keys found", + "Invalid data for Value, no known keys found", )) } } } let value = deserializer.deserialize_map(ValueVisitor)?; - Ok(Some(value)) + Ok(value) } pub fn serialize_u64_to_string(value: &u64, serializer: S) -> Result diff --git a/opentelemetry-proto/src/proto/opentelemetry-proto b/opentelemetry-proto/src/proto/opentelemetry-proto index 40b3c1b746..2bd940b2b7 160000 --- a/opentelemetry-proto/src/proto/opentelemetry-proto +++ b/opentelemetry-proto/src/proto/opentelemetry-proto @@ -1 +1 @@ -Subproject commit 40b3c1b746767cbc13c2e39da3eaf1a23e54ffdd +Subproject commit 2bd940b2b77c1ab57c27166af21384906da7bb2b diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs index f8ecf74237..84c9a5ca93 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs @@ -61,7 +61,13 @@ pub struct ExportLogsPartialSuccess { /// Generated client implementations. #[cfg(feature = "gen-tonic")] pub mod logs_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// Service that can be used to push logs between one Application instrumented with @@ -160,8 +166,7 @@ pub mod logs_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -184,7 +189,13 @@ pub mod logs_service_client { /// Generated server implementations. #[cfg(feature = "gen-tonic")] pub mod logs_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with LogsServiceServer. #[async_trait] @@ -325,17 +336,19 @@ pub mod logs_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs index b5eb36e882..1f6951a60f 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs @@ -61,7 +61,13 @@ pub struct ExportMetricsPartialSuccess { /// Generated client implementations. #[cfg(feature = "gen-tonic")] pub mod metrics_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// Service that can be used to push metrics between one Application @@ -160,8 +166,7 @@ pub mod metrics_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -184,7 +189,13 @@ pub mod metrics_service_client { /// Generated server implementations. #[cfg(feature = "gen-tonic")] pub mod metrics_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with MetricsServiceServer. #[async_trait] @@ -325,17 +336,19 @@ pub mod metrics_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs index ef73c77877..bf1238f82e 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs @@ -61,7 +61,13 @@ pub struct ExportTracePartialSuccess { /// Generated client implementations. #[cfg(feature = "gen-tonic")] pub mod trace_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// Service that can be used to push spans between one Application instrumented with @@ -160,8 +166,7 @@ pub mod trace_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -184,7 +189,13 @@ pub mod trace_service_client { /// Generated server implementations. #[cfg(feature = "gen-tonic")] pub mod trace_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with TraceServiceServer. #[async_trait] @@ -325,17 +336,19 @@ pub mod trace_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", tonic::Code::Unimplemented as i32) - .header( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ) - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs index 4b08daffa2..b5bde05c27 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs @@ -10,6 +10,14 @@ pub struct AnyValue { /// The value is one of the listed fields. It is valid for all values to be unspecified /// in which case this AnyValue is considered to be "empty". #[prost(oneof = "any_value::Value", tags = "1, 2, 3, 4, 5, 6, 7")] + #[cfg_attr( + feature = "with-serde", + serde( + flatten, + serialize_with = "crate::proto::serializers::serialize_to_value", + deserialize_with = "crate::proto::serializers::deserialize_from_value" + ) + )] pub value: ::core::option::Option, } /// Nested message and enum types in `AnyValue`. @@ -75,13 +83,6 @@ pub struct KeyValue { #[prost(string, tag = "1")] pub key: ::prost::alloc::string::String, #[prost(message, optional, tag = "2")] - #[cfg_attr( - feature = "with-serde", - serde( - serialize_with = "crate::proto::serializers::serialize_to_value", - deserialize_with = "crate::proto::serializers::deserialize_from_value" - ) - )] pub value: ::core::option::Option, } /// InstrumentationScope is a message representing the instrumentation scope information diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs index e05763747d..04f1c6dad4 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs @@ -37,7 +37,8 @@ pub struct ResourceLogs { #[prost(message, repeated, tag = "2")] pub scope_logs: ::prost::alloc::vec::Vec, /// The Schema URL, if known. This is the identifier of the Schema that the resource data - /// is recorded in. To learn more about Schema URL see + /// is recorded in. Notably, the last part of the URL path is the version number of the + /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see /// /// This schema_url applies to the data in the "resource" field. It does not apply /// to the data in the "scope_logs" field which have their own schema_url field. @@ -60,7 +61,8 @@ pub struct ScopeLogs { #[prost(message, repeated, tag = "2")] pub log_records: ::prost::alloc::vec::Vec, /// The Schema URL, if known. This is the identifier of the Schema that the log data - /// is recorded in. To learn more about Schema URL see + /// is recorded in. Notably, the last part of the URL path is the version number of the + /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see /// /// This schema_url applies to all logs in the "logs" field. #[prost(string, tag = "3")] @@ -122,13 +124,6 @@ pub struct LogRecord { /// string message (including multi-line) describing the event in a free form or it can /// be a structured data composed of arrays and maps of other values. \[Optional\]. #[prost(message, optional, tag = "5")] - #[cfg_attr( - feature = "with-serde", - serde( - serialize_with = "crate::proto::serializers::serialize_to_value", - deserialize_with = "crate::proto::serializers::deserialize_from_value" - ) - )] pub body: ::core::option::Option, /// Additional attributes that describe the specific event occurrence. \[Optional\]. /// Attribute keys MUST be unique (it is not allowed to have more than one @@ -185,6 +180,20 @@ pub struct LogRecord { ) )] pub span_id: ::prost::alloc::vec::Vec, + /// A unique identifier of event category/type. + /// All events with the same event_name are expected to conform to the same + /// schema for both their attributes and their body. + /// + /// Recommended to be fully qualified and short (no longer than 256 characters). + /// + /// Presence of event_name on the log record identifies this record + /// as an event. + /// + /// \[Optional\]. + /// + /// Status: \[Development\] + #[prost(string, tag = "12")] + pub event_name: ::prost::alloc::string::String, } /// Possible values for LogRecord.SeverityNumber. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] @@ -227,31 +236,31 @@ impl SeverityNumber { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - SeverityNumber::Unspecified => "SEVERITY_NUMBER_UNSPECIFIED", - SeverityNumber::Trace => "SEVERITY_NUMBER_TRACE", - SeverityNumber::Trace2 => "SEVERITY_NUMBER_TRACE2", - SeverityNumber::Trace3 => "SEVERITY_NUMBER_TRACE3", - SeverityNumber::Trace4 => "SEVERITY_NUMBER_TRACE4", - SeverityNumber::Debug => "SEVERITY_NUMBER_DEBUG", - SeverityNumber::Debug2 => "SEVERITY_NUMBER_DEBUG2", - SeverityNumber::Debug3 => "SEVERITY_NUMBER_DEBUG3", - SeverityNumber::Debug4 => "SEVERITY_NUMBER_DEBUG4", - SeverityNumber::Info => "SEVERITY_NUMBER_INFO", - SeverityNumber::Info2 => "SEVERITY_NUMBER_INFO2", - SeverityNumber::Info3 => "SEVERITY_NUMBER_INFO3", - SeverityNumber::Info4 => "SEVERITY_NUMBER_INFO4", - SeverityNumber::Warn => "SEVERITY_NUMBER_WARN", - SeverityNumber::Warn2 => "SEVERITY_NUMBER_WARN2", - SeverityNumber::Warn3 => "SEVERITY_NUMBER_WARN3", - SeverityNumber::Warn4 => "SEVERITY_NUMBER_WARN4", - SeverityNumber::Error => "SEVERITY_NUMBER_ERROR", - SeverityNumber::Error2 => "SEVERITY_NUMBER_ERROR2", - SeverityNumber::Error3 => "SEVERITY_NUMBER_ERROR3", - SeverityNumber::Error4 => "SEVERITY_NUMBER_ERROR4", - SeverityNumber::Fatal => "SEVERITY_NUMBER_FATAL", - SeverityNumber::Fatal2 => "SEVERITY_NUMBER_FATAL2", - SeverityNumber::Fatal3 => "SEVERITY_NUMBER_FATAL3", - SeverityNumber::Fatal4 => "SEVERITY_NUMBER_FATAL4", + Self::Unspecified => "SEVERITY_NUMBER_UNSPECIFIED", + Self::Trace => "SEVERITY_NUMBER_TRACE", + Self::Trace2 => "SEVERITY_NUMBER_TRACE2", + Self::Trace3 => "SEVERITY_NUMBER_TRACE3", + Self::Trace4 => "SEVERITY_NUMBER_TRACE4", + Self::Debug => "SEVERITY_NUMBER_DEBUG", + Self::Debug2 => "SEVERITY_NUMBER_DEBUG2", + Self::Debug3 => "SEVERITY_NUMBER_DEBUG3", + Self::Debug4 => "SEVERITY_NUMBER_DEBUG4", + Self::Info => "SEVERITY_NUMBER_INFO", + Self::Info2 => "SEVERITY_NUMBER_INFO2", + Self::Info3 => "SEVERITY_NUMBER_INFO3", + Self::Info4 => "SEVERITY_NUMBER_INFO4", + Self::Warn => "SEVERITY_NUMBER_WARN", + Self::Warn2 => "SEVERITY_NUMBER_WARN2", + Self::Warn3 => "SEVERITY_NUMBER_WARN3", + Self::Warn4 => "SEVERITY_NUMBER_WARN4", + Self::Error => "SEVERITY_NUMBER_ERROR", + Self::Error2 => "SEVERITY_NUMBER_ERROR2", + Self::Error3 => "SEVERITY_NUMBER_ERROR3", + Self::Error4 => "SEVERITY_NUMBER_ERROR4", + Self::Fatal => "SEVERITY_NUMBER_FATAL", + Self::Fatal2 => "SEVERITY_NUMBER_FATAL2", + Self::Fatal3 => "SEVERITY_NUMBER_FATAL3", + Self::Fatal4 => "SEVERITY_NUMBER_FATAL4", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -313,8 +322,8 @@ impl LogRecordFlags { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - LogRecordFlags::DoNotUse => "LOG_RECORD_FLAGS_DO_NOT_USE", - LogRecordFlags::TraceFlagsMask => "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK", + Self::DoNotUse => "LOG_RECORD_FLAGS_DO_NOT_USE", + Self::TraceFlagsMask => "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK", } } /// Creates an enum from field names used in the ProtoBuf definition. diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs index 54d6c69856..fb04bec6cc 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs @@ -3,6 +3,24 @@ /// storage, OR can be embedded by other protocols that transfer OTLP metrics /// data but do not implement the OTLP protocol. /// +/// MetricsData +/// └─── ResourceMetrics +/// ├── Resource +/// ├── SchemaURL +/// └── ScopeMetrics +/// ├── Scope +/// ├── SchemaURL +/// └── Metric +/// ├── Name +/// ├── Description +/// ├── Unit +/// └── data +/// ├── Gauge +/// ├── Sum +/// ├── Histogram +/// ├── ExponentialHistogram +/// └── Summary +/// /// The main difference between this message and collector protocol is that /// in this message there will not be any "control" or "metadata" specific to /// OTLP protocol. @@ -37,7 +55,8 @@ pub struct ResourceMetrics { #[prost(message, repeated, tag = "2")] pub scope_metrics: ::prost::alloc::vec::Vec, /// The Schema URL, if known. This is the identifier of the Schema that the resource data - /// is recorded in. To learn more about Schema URL see + /// is recorded in. Notably, the last part of the URL path is the version number of the + /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see /// /// This schema_url applies to the data in the "resource" field. It does not apply /// to the data in the "scope_metrics" field which have their own schema_url field. @@ -60,7 +79,8 @@ pub struct ScopeMetrics { #[prost(message, repeated, tag = "2")] pub metrics: ::prost::alloc::vec::Vec, /// The Schema URL, if known. This is the identifier of the Schema that the metric data - /// is recorded in. To learn more about Schema URL see + /// is recorded in. Notably, the last part of the URL path is the version number of the + /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see /// /// This schema_url applies to all metrics in the "metrics" field. #[prost(string, tag = "3")] @@ -71,7 +91,6 @@ pub struct ScopeMetrics { /// /// /// -/// /// The data model and relation between entities is shown in the /// diagram below. Here, "DataPoint" is the term used to refer to any /// one of the specific data point value types, and "points" is the term used @@ -83,7 +102,7 @@ pub struct ScopeMetrics { /// - DataPoint contains timestamps, attributes, and one of the possible value type /// fields. /// -/// Metric +/// Metric /// +------------+ /// |name | /// |description | @@ -277,6 +296,9 @@ pub struct ExponentialHistogram { /// data type. These data points cannot always be merged in a meaningful way. /// While they can be useful in some applications, histogram data points are /// recommended for new applications. +/// Summary metrics do not have an aggregation temporality field. This is +/// because the count and sum fields of a SummaryDataPoint are assumed to be +/// cumulative values. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] @@ -417,7 +439,7 @@ pub struct HistogramDataPoint { /// events, and is assumed to be monotonic over the values of these events. /// Negative events *can* be recorded, but sum should not be filled out when /// doing so. This is specifically to enforce compatibility w/ OpenMetrics, - /// see: + /// see: #[prost(double, optional, tag = "5")] pub sum: ::core::option::Option, /// bucket_counts is an optional field contains the count values of histogram @@ -500,7 +522,7 @@ pub struct ExponentialHistogramDataPoint { /// events, and is assumed to be monotonic over the values of these events. /// Negative events *can* be recorded, but sum should not be filled out when /// doing so. This is specifically to enforce compatibility w/ OpenMetrics, - /// see: + /// see: #[prost(double, optional, tag = "5")] pub sum: ::core::option::Option, /// scale describes the resolution of the histogram. Boundaries are @@ -587,7 +609,8 @@ pub mod exponential_histogram_data_point { } } /// SummaryDataPoint is a single data point in a timeseries that describes the -/// time-varying values of a Summary metric. +/// time-varying values of a Summary metric. The count and sum fields represent +/// cumulative values. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] @@ -622,7 +645,7 @@ pub struct SummaryDataPoint { /// events, and is assumed to be monotonic over the values of these events. /// Negative events *can* be recorded, but sum should not be filled out when /// doing so. This is specifically to enforce compatibility w/ OpenMetrics, - /// see: + /// see: #[prost(double, tag = "5")] pub sum: f64, /// (Optional) list of values at different quantiles of the distribution calculated @@ -808,9 +831,9 @@ impl AggregationTemporality { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - AggregationTemporality::Unspecified => "AGGREGATION_TEMPORALITY_UNSPECIFIED", - AggregationTemporality::Delta => "AGGREGATION_TEMPORALITY_DELTA", - AggregationTemporality::Cumulative => "AGGREGATION_TEMPORALITY_CUMULATIVE", + Self::Unspecified => "AGGREGATION_TEMPORALITY_UNSPECIFIED", + Self::Delta => "AGGREGATION_TEMPORALITY_DELTA", + Self::Cumulative => "AGGREGATION_TEMPORALITY_CUMULATIVE", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -851,10 +874,8 @@ impl DataPointFlags { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - DataPointFlags::DoNotUse => "DATA_POINT_FLAGS_DO_NOT_USE", - DataPointFlags::NoRecordedValueMask => { - "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK" - } + Self::DoNotUse => "DATA_POINT_FLAGS_DO_NOT_USE", + Self::NoRecordedValueMask => "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK", } } /// Creates an enum from field names used in the ProtoBuf definition. diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs index 675fee275c..2e37483e94 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs @@ -37,7 +37,8 @@ pub struct ResourceSpans { #[prost(message, repeated, tag = "2")] pub scope_spans: ::prost::alloc::vec::Vec, /// The Schema URL, if known. This is the identifier of the Schema that the resource data - /// is recorded in. To learn more about Schema URL see + /// is recorded in. Notably, the last part of the URL path is the version number of the + /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see /// /// This schema_url applies to the data in the "resource" field. It does not apply /// to the data in the "scope_spans" field which have their own schema_url field. @@ -60,7 +61,8 @@ pub struct ScopeSpans { #[prost(message, repeated, tag = "2")] pub spans: ::prost::alloc::vec::Vec, /// The Schema URL, if known. This is the identifier of the Schema that the span data - /// is recorded in. To learn more about Schema URL see + /// is recorded in. Notably, the last part of the URL path is the version number of the + /// schema: http\[s\]://server\[:port\]/path/. To learn more about Schema URL see /// /// This schema_url applies to all spans and span events in the "spans" field. #[prost(string, tag = "3")] @@ -378,12 +380,12 @@ pub mod span { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - SpanKind::Unspecified => "SPAN_KIND_UNSPECIFIED", - SpanKind::Internal => "SPAN_KIND_INTERNAL", - SpanKind::Server => "SPAN_KIND_SERVER", - SpanKind::Client => "SPAN_KIND_CLIENT", - SpanKind::Producer => "SPAN_KIND_PRODUCER", - SpanKind::Consumer => "SPAN_KIND_CONSUMER", + Self::Unspecified => "SPAN_KIND_UNSPECIFIED", + Self::Internal => "SPAN_KIND_INTERNAL", + Self::Server => "SPAN_KIND_SERVER", + Self::Client => "SPAN_KIND_CLIENT", + Self::Producer => "SPAN_KIND_PRODUCER", + Self::Consumer => "SPAN_KIND_CONSUMER", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -450,9 +452,9 @@ pub mod status { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - StatusCode::Unset => "STATUS_CODE_UNSET", - StatusCode::Ok => "STATUS_CODE_OK", - StatusCode::Error => "STATUS_CODE_ERROR", + Self::Unset => "STATUS_CODE_UNSET", + Self::Ok => "STATUS_CODE_OK", + Self::Error => "STATUS_CODE_ERROR", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -504,10 +506,10 @@ impl SpanFlags { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - SpanFlags::DoNotUse => "SPAN_FLAGS_DO_NOT_USE", - SpanFlags::TraceFlagsMask => "SPAN_FLAGS_TRACE_FLAGS_MASK", - SpanFlags::ContextHasIsRemoteMask => "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", - SpanFlags::ContextIsRemoteMask => "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", + Self::DoNotUse => "SPAN_FLAGS_DO_NOT_USE", + Self::TraceFlagsMask => "SPAN_FLAGS_TRACE_FLAGS_MASK", + Self::ContextHasIsRemoteMask => "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", + Self::ContextIsRemoteMask => "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", } } /// Creates an enum from field names used in the ProtoBuf definition. diff --git a/opentelemetry-proto/src/transform/common.rs b/opentelemetry-proto/src/transform/common.rs index ff42479288..37efc27199 100644 --- a/opentelemetry-proto/src/transform/common.rs +++ b/opentelemetry-proto/src/transform/common.rs @@ -44,13 +44,13 @@ pub mod tonic { impl From<( - opentelemetry_sdk::InstrumentationLibrary, + opentelemetry::InstrumentationScope, Option>, )> for InstrumentationScope { fn from( data: ( - opentelemetry_sdk::InstrumentationLibrary, + opentelemetry::InstrumentationScope, Option>, ), ) -> Self { @@ -64,9 +64,9 @@ pub mod tonic { } } else { InstrumentationScope { - name: library.name.into_owned(), - version: library.version.map(Cow::into_owned).unwrap_or_default(), - attributes: Attributes::from(library.attributes).0, + name: library.name().to_owned(), + version: library.version().map(ToOwned::to_owned).unwrap_or_default(), + attributes: Attributes::from(library.attributes().cloned()).0, ..Default::default() } } @@ -75,13 +75,13 @@ pub mod tonic { impl From<( - &opentelemetry_sdk::InstrumentationLibrary, + &opentelemetry::InstrumentationScope, Option>, )> for InstrumentationScope { fn from( data: ( - &opentelemetry_sdk::InstrumentationLibrary, + &opentelemetry::InstrumentationScope, Option>, ), ) -> Self { @@ -95,13 +95,9 @@ pub mod tonic { } } else { InstrumentationScope { - name: library.name.to_string(), - version: library - .version - .as_ref() - .map(ToString::to_string) - .unwrap_or_default(), - attributes: Attributes::from(library.attributes.clone()).0, + name: library.name().to_owned(), + version: library.version().map(ToOwned::to_owned).unwrap_or_default(), + attributes: Attributes::from(library.attributes().cloned()).0, ..Default::default() } } @@ -112,8 +108,8 @@ pub mod tonic { #[derive(Default, Debug)] pub struct Attributes(pub ::std::vec::Vec); - impl From> for Attributes { - fn from(kvs: Vec) -> Self { + impl> From for Attributes { + fn from(kvs: I) -> Self { Attributes( kvs.into_iter() .map(|api_kv| KeyValue { @@ -152,7 +148,9 @@ pub mod tonic { Array::I64(vals) => array_into_proto(vals), Array::F64(vals) => array_into_proto(vals), Array::String(vals) => array_into_proto(vals), + _ => unreachable!("Nonexistent array type"), // Needs to be updated when new array types are added })), + _ => unreachable!("Nonexistent value type"), // Needs to be updated when new value types are added }, } } diff --git a/opentelemetry-proto/src/transform/logs.rs b/opentelemetry-proto/src/transform/logs.rs index 4cd2c1617b..785bdfd97f 100644 --- a/opentelemetry-proto/src/transform/logs.rs +++ b/opentelemetry-proto/src/transform/logs.rs @@ -51,14 +51,15 @@ pub mod tonic { .collect(), }), LogsAnyValue::Bytes(v) => Value::BytesValue(*v), + _ => unreachable!("Nonexistent value type"), } } } impl From<&opentelemetry_sdk::logs::LogRecord> for LogRecord { fn from(log_record: &opentelemetry_sdk::logs::LogRecord) -> Self { - let trace_context = log_record.trace_context.as_ref(); - let severity_number = match log_record.severity_number { + let trace_context = log_record.trace_context(); + let severity_number = match log_record.severity_number() { Some(Severity::Trace) => SeverityNumber::Trace, Some(Severity::Trace2) => SeverityNumber::Trace2, Some(Severity::Trace3) => SeverityNumber::Trace3, @@ -87,8 +88,8 @@ pub mod tonic { }; LogRecord { - time_unix_nano: log_record.timestamp.map(to_nanos).unwrap_or_default(), - observed_time_unix_nano: to_nanos(log_record.observed_timestamp.unwrap()), + time_unix_nano: log_record.timestamp().map(to_nanos).unwrap_or_default(), + observed_time_unix_nano: to_nanos(log_record.observed_timestamp().unwrap()), attributes: { let attributes: Vec = log_record .attributes_iter() @@ -101,7 +102,7 @@ pub mod tonic { .collect(); #[cfg(feature = "populate-logs-event-name")] { - if let Some(event_name) = &log_record.event_name { + if let Some(event_name) = &log_record.event_name() { let mut attributes_with_name = attributes; attributes_with_name.push(KeyValue { key: "event.name".into(), @@ -117,9 +118,13 @@ pub mod tonic { #[cfg(not(feature = "populate-logs-event-name"))] attributes }, + event_name: log_record.event_name().unwrap_or_default().into(), severity_number: severity_number.into(), - severity_text: log_record.severity_text.map(Into::into).unwrap_or_default(), - body: log_record.body.clone().map(Into::into), + severity_text: log_record + .severity_text() + .map(Into::into) + .unwrap_or_default(), + body: log_record.body().cloned().map(Into::into), dropped_attributes_count: 0, flags: trace_context .map(|ctx| { @@ -142,7 +147,7 @@ pub mod tonic { From<( ( &opentelemetry_sdk::logs::LogRecord, - &opentelemetry::InstrumentationLibrary, + &opentelemetry::InstrumentationScope, ), &ResourceAttributesWithSchema, )> for ResourceLogs @@ -151,7 +156,7 @@ pub mod tonic { data: ( ( &opentelemetry_sdk::logs::LogRecord, - &opentelemetry::InstrumentationLibrary, + &opentelemetry::InstrumentationScope, ), &ResourceAttributesWithSchema, ), @@ -166,11 +171,10 @@ pub mod tonic { schema_url: resource.schema_url.clone().unwrap_or_default(), scope_logs: vec![ScopeLogs { schema_url: instrumentation - .schema_url - .clone() - .map(Into::into) + .schema_url() + .map(ToOwned::to_owned) .unwrap_or_default(), - scope: Some((instrumentation, log_record.target.clone()).into()), + scope: Some((instrumentation, log_record.target().cloned()).into()), log_records: vec![log_record.into()], }], } @@ -188,14 +192,14 @@ pub mod tonic { Cow<'static, str>, Vec<( &opentelemetry_sdk::logs::LogRecord, - &opentelemetry::InstrumentationLibrary, + &opentelemetry::InstrumentationScope, )>, >, (log_record, instrumentation)| { let key = log_record - .target - .clone() - .unwrap_or_else(|| Cow::Owned(instrumentation.name.clone().into_owned())); + .target() + .cloned() + .unwrap_or_else(|| Cow::Owned(instrumentation.name().to_owned())); scope_map .entry(key) .or_default() @@ -234,25 +238,25 @@ pub mod tonic { mod tests { use crate::transform::common::tonic::ResourceAttributesWithSchema; use opentelemetry::logs::LogRecord as _; - use opentelemetry::InstrumentationLibrary; + use opentelemetry::InstrumentationScope; use opentelemetry_sdk::{export::logs::LogBatch, logs::LogRecord, Resource}; use std::time::SystemTime; fn create_test_log_data( instrumentation_name: &str, _message: &str, - ) -> (LogRecord, InstrumentationLibrary) { + ) -> (LogRecord, InstrumentationScope) { let mut logrecord = LogRecord::default(); logrecord.set_timestamp(SystemTime::now()); logrecord.set_observed_timestamp(SystemTime::now()); let instrumentation = - InstrumentationLibrary::builder(instrumentation_name.to_string()).build(); + InstrumentationScope::builder(instrumentation_name.to_string()).build(); (logrecord, instrumentation) } #[test] fn test_group_logs_by_resource_and_scope_single_scope() { - let resource = Resource::default(); + let resource = Resource::builder().build(); let (log_record1, instrum_lib1) = create_test_log_data("test-lib", "Log 1"); let (log_record2, instrum_lib2) = create_test_log_data("test-lib", "Log 2"); @@ -273,7 +277,7 @@ mod tests { #[test] fn test_group_logs_by_resource_and_scope_multiple_scopes() { - let resource = Resource::default(); + let resource = Resource::builder().build(); let (log_record1, instrum_lib1) = create_test_log_data("lib1", "Log 1"); let (log_record2, instrum_lib2) = create_test_log_data("lib2", "Log 2"); diff --git a/opentelemetry-proto/src/transform/metrics.rs b/opentelemetry-proto/src/transform/metrics.rs index f718c96280..cb135ebf83 100644 --- a/opentelemetry-proto/src/transform/metrics.rs +++ b/opentelemetry-proto/src/transform/metrics.rs @@ -8,12 +8,13 @@ pub mod tonic { use std::any::Any; use std::fmt; - use opentelemetry::{global, metrics::MetricsError, Key, Value}; + use opentelemetry::{otel_debug, Key, Value}; use opentelemetry_sdk::metrics::data::{ self, Exemplar as SdkExemplar, ExponentialHistogram as SdkExponentialHistogram, Gauge as SdkGauge, Histogram as SdkHistogram, Metric as SdkMetric, - ScopeMetrics as SdkScopeMetrics, Sum as SdkSum, Temporality, + ScopeMetrics as SdkScopeMetrics, Sum as SdkSum, }; + use opentelemetry_sdk::metrics::Temporality; use opentelemetry_sdk::Resource as SdkResource; use crate::proto::tonic::{ @@ -97,10 +98,12 @@ pub mod tonic { Temporality::Cumulative => AggregationTemporality::Cumulative, Temporality::Delta => AggregationTemporality::Delta, other => { - opentelemetry::global::handle_error(MetricsError::Other(format!( - "Unknown temporality {:?}, using default instead.", - other - ))); + otel_debug!( + name: "AggregationTemporality::Unknown", + message = "Unknown temporality,using default instead.", + unknown_temporality = format!("{:?}", other), + default_temporality = format!("{:?}", Temporality::Cumulative) + ); AggregationTemporality::Cumulative } } @@ -135,9 +138,8 @@ pub mod tonic { metrics: sm.metrics.iter().map(Into::into).collect(), schema_url: sm .scope - .schema_url - .as_ref() - .map(ToString::to_string) + .schema_url() + .map(ToOwned::to_owned) .unwrap_or_default(), } } @@ -184,7 +186,11 @@ pub mod tonic { } else if let Some(gauge) = data.downcast_ref::>() { Ok(TonicMetricData::Gauge(gauge.into())) } else { - global::handle_error(MetricsError::Other("unknown aggregator".into())); + otel_debug!( + name: "TonicMetricData::UnknownAggregator", + message= "Unknown aggregator type", + unknown_type= format!("{:?}", data), + ); Err(()) } } @@ -224,8 +230,8 @@ pub mod tonic { .iter() .map(|dp| TonicHistogramDataPoint { attributes: dp.attributes.iter().map(Into::into).collect(), - start_time_unix_nano: to_nanos(dp.start_time), - time_unix_nano: to_nanos(dp.time), + start_time_unix_nano: to_nanos(hist.start_time), + time_unix_nano: to_nanos(hist.time), count: dp.count, sum: Some(dp.sum.into_f64()), bucket_counts: dp.bucket_counts.clone(), @@ -252,8 +258,8 @@ pub mod tonic { .iter() .map(|dp| TonicExponentialHistogramDataPoint { attributes: dp.attributes.iter().map(Into::into).collect(), - start_time_unix_nano: to_nanos(dp.start_time), - time_unix_nano: to_nanos(dp.time), + start_time_unix_nano: to_nanos(hist.start_time), + time_unix_nano: to_nanos(hist.time), count: dp.count as u64, sum: Some(dp.sum.into_f64()), scale: dp.scale.into(), @@ -289,8 +295,8 @@ pub mod tonic { .iter() .map(|dp| TonicNumberDataPoint { attributes: dp.attributes.iter().map(Into::into).collect(), - start_time_unix_nano: dp.start_time.map(to_nanos).unwrap_or_default(), - time_unix_nano: dp.time.map(to_nanos).unwrap_or_default(), + start_time_unix_nano: to_nanos(sum.start_time), + time_unix_nano: to_nanos(sum.time), exemplars: dp.exemplars.iter().map(Into::into).collect(), flags: TonicDataPointFlags::default() as u32, value: Some(dp.value.into()), @@ -313,8 +319,8 @@ pub mod tonic { .iter() .map(|dp| TonicNumberDataPoint { attributes: dp.attributes.iter().map(Into::into).collect(), - start_time_unix_nano: dp.start_time.map(to_nanos).unwrap_or_default(), - time_unix_nano: dp.time.map(to_nanos).unwrap_or_default(), + start_time_unix_nano: gauge.start_time.map(to_nanos).unwrap_or_default(), + time_unix_nano: to_nanos(gauge.time), exemplars: dp.exemplars.iter().map(Into::into).collect(), flags: TonicDataPointFlags::default() as u32, value: Some(dp.value.into()), diff --git a/opentelemetry-proto/src/transform/trace.rs b/opentelemetry-proto/src/transform/trace.rs index 12871b33aa..8806af41c9 100644 --- a/opentelemetry-proto/src/transform/trace.rs +++ b/opentelemetry-proto/src/transform/trace.rs @@ -101,12 +101,11 @@ pub mod tonic { schema_url: resource.schema_url.clone().unwrap_or_default(), scope_spans: vec![ScopeSpans { schema_url: source_span - .instrumentation_lib - .schema_url - .as_ref() - .map(ToString::to_string) + .instrumentation_scope + .schema_url() + .map(ToOwned::to_owned) .unwrap_or_default(), - scope: Some((source_span.instrumentation_lib, None).into()), + scope: Some((source_span.instrumentation_scope, None).into()), spans: vec![Span { trace_id: source_span.span_context.trace_id().to_bytes().to_vec(), span_id: source_span.span_context.span_id().to_bytes().to_vec(), @@ -155,12 +154,11 @@ pub mod tonic { spans: Vec, resource: &ResourceAttributesWithSchema, ) -> Vec { - // Group spans by their instrumentation library + // Group spans by their instrumentation scope let scope_map = spans.iter().fold( HashMap::new(), - |mut scope_map: HashMap<&opentelemetry_sdk::InstrumentationLibrary, Vec<&SpanData>>, - span| { - let instrumentation = &span.instrumentation_lib; + |mut scope_map: HashMap<&opentelemetry::InstrumentationScope, Vec<&SpanData>>, span| { + let instrumentation = &span.instrumentation_scope; scope_map.entry(instrumentation).or_default().push(span); scope_map }, @@ -198,11 +196,11 @@ mod tests { use opentelemetry::trace::{ SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState, }; + use opentelemetry::InstrumentationScope; use opentelemetry::KeyValue; use opentelemetry_sdk::export::trace::SpanData; use opentelemetry_sdk::resource::Resource; use opentelemetry_sdk::trace::{SpanEvents, SpanLinks}; - use opentelemetry_sdk::InstrumentationLibrary; use std::borrow::Cow; use std::time::{Duration, SystemTime}; @@ -227,13 +225,15 @@ mod tests { events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, - instrumentation_lib: InstrumentationLibrary::builder(instrumentation_name).build(), + instrumentation_scope: InstrumentationScope::builder(instrumentation_name).build(), } } #[test] fn test_group_spans_by_resource_and_scope_single_scope() { - let resource = Resource::new(vec![KeyValue::new("resource_key", "resource_value")]); + let resource = Resource::builder_empty() + .with_attribute(KeyValue::new("resource_key", "resource_value")) + .build(); let span_data = create_test_span_data("lib1"); let spans = vec![span_data.clone()]; @@ -278,7 +278,9 @@ mod tests { #[test] fn test_group_spans_by_resource_and_scope_multiple_scopes() { - let resource = Resource::new(vec![KeyValue::new("resource_key", "resource_value")]); + let resource = Resource::builder_empty() + .with_attribute(KeyValue::new("resource_key", "resource_value")) + .build(); let span_data1 = create_test_span_data("lib1"); let span_data2 = create_test_span_data("lib1"); let span_data3 = create_test_span_data("lib2"); diff --git a/opentelemetry-proto/tests/grpc_build.rs b/opentelemetry-proto/tests/grpc_build.rs index b01c992c75..d09a13cd64 100644 --- a/opentelemetry-proto/tests/grpc_build.rs +++ b/opentelemetry-proto/tests/grpc_build.rs @@ -111,11 +111,10 @@ fn build_tonic() { .field_attribute(path, "#[cfg_attr(feature = \"with-serde\", serde(serialize_with = \"crate::proto::serializers::serialize_u64_to_string\", deserialize_with = \"crate::proto::serializers::deserialize_string_to_u64\"))]") } - // add custom serializer and deserializer for AnyValue - for path in ["common.v1.KeyValue.value", "logs.v1.LogRecord.body"] { - builder = builder - .field_attribute(path, "#[cfg_attr(feature =\"with-serde\", serde(serialize_with = \"crate::proto::serializers::serialize_to_value\", deserialize_with = \"crate::proto::serializers::deserialize_from_value\"))]"); - } + // special serializer and deserializer for value + // The Value::value field must be hidden + builder = builder + .field_attribute("common.v1.AnyValue.value", "#[cfg_attr(feature =\"with-serde\", serde(flatten, serialize_with = \"crate::proto::serializers::serialize_to_value\", deserialize_with = \"crate::proto::serializers::deserialize_from_value\"))]"); // flatten for path in ["metrics.v1.Metric.data", "metrics.v1.NumberDataPoint.value"] { @@ -125,7 +124,7 @@ fn build_tonic() { builder .out_dir(out_dir.path()) - .compile(TONIC_PROTO_FILES, TONIC_INCLUDES) + .compile_protos(TONIC_PROTO_FILES, TONIC_INCLUDES) .expect("cannot compile protobuf using tonic"); let after_build = build_content_map(out_dir.path(), true); diff --git a/opentelemetry-proto/tests/json_serde.rs b/opentelemetry-proto/tests/json_serde.rs index ba04cdf613..389541cce7 100644 --- a/opentelemetry-proto/tests/json_serde.rs +++ b/opentelemetry-proto/tests/json_serde.rs @@ -518,14 +518,10 @@ mod json_serde { "arrayValue": { "values": [ { - "value": { - "stringValue": "foo" - } + "stringValue": "foo" }, { - "value": { - "stringValue": "bar" - } + "stringValue": "bar" } ] } @@ -557,14 +553,10 @@ mod json_serde { "arrayValue": { "values": [ { - "value": { - "stringValue": "foo" - } + "stringValue": "foo" }, { - "value": { - "intValue": 1337 - } + "intValue": "1337" } ] } @@ -1195,6 +1187,7 @@ mod json_serde { "Example log record", ))), }), + event_name: "test_log_event".to_string(), attributes: vec![ KeyValue { key: String::from("string.attribute"), @@ -1339,14 +1332,10 @@ mod json_serde { "arrayValue": { "values": [ { - "value": { - "stringValue": "many" - } + "stringValue": "many" }, { - "value": { - "stringValue": "values" - } + "stringValue": "values" } ] } @@ -1371,7 +1360,8 @@ mod json_serde { "droppedAttributesCount": 0, "flags": 0, "traceId": "5b8efff798038103d269b633813fc60c", - "spanId": "eee19b7ec3c1b174" + "spanId": "eee19b7ec3c1b174", + "eventName": "test_log_event" } ], "schemaUrl": "" @@ -1453,14 +1443,10 @@ mod json_serde { "arrayValue": { "values": [ { - "value": { - "stringValue": "many" - } + "stringValue": "many" }, { - "value": { - "stringValue": "values" - } + "stringValue": "values" } ] } @@ -1481,7 +1467,8 @@ mod json_serde { } } } - ] + ], + "eventName": "test_log_event" } ] } diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index dd164a4756..ffaf1f6f2c 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -2,10 +2,160 @@ ## vNext -- Update `async-std` dependency version to 1.13 -- *Breaking* - Remove support for `MetricProducer` which allowed metrics from - external sources to be sent through OpenTelemetry. - [#2105](https://github.com/open-telemetry/opentelemetry-rust/pull/2105) +- *Breaking(Affects custom metric exporter authors only)* `start_time` and `time` is moved from DataPoints to aggregations (Sum, Gauge, Histogram, ExpoHistogram) see [#2377](https://github.com/open-telemetry/opentelemetry-rust/pull/2377) and [#2411](https://github.com/open-telemetry/opentelemetry-rust/pull/2411), to reduce memory. + +- *Breaking* `start_time` is no longer optional for `Sum` aggregation, see [#2367](https://github.com/open-telemetry/opentelemetry-rust/pull/2367), but is still optional for `Gauge` aggregation see [#2389](https://github.com/open-telemetry/opentelemetry-rust/pull/2389). + +- *Breaking* + - SimpleLogProcessor modified to be generic over `LogExporter` to + avoid dynamic dispatch to invoke exporter. If you were using + `with_simple_exporter` to add `LogExporter` with SimpleLogProcessor, this is a + transparent change. + [#2338](https://github.com/open-telemetry/opentelemetry-rust/pull/2338) + - `ResourceDetector.detect()` no longer supports timeout option. + - `opentelemetry::global::shutdown_tracer_provider()` Removed from the API, should now use `tracer_provider.shutdown()` see [#2369](https://github.com/open-telemetry/opentelemetry-rust/pull/2369) for a migration example. "Tracer provider" is cheaply cloneable, so users are encouraged to set a clone of it as the global (ex: `global::set_tracer_provider(provider.clone()))`, so that instrumentations and other components can obtain tracers from `global::tracer()`. The tracer_provider must be kept around to call shutdown on it at the end of application (ex: `tracer_provider.shutdown()`) +- *Feature*: Add `ResourceBuilder` for an easy way to create new `Resource`s +- *Breaking*: Remove `Resource::{new,empty,from_detectors,new_with_defaults,from_schema_url,merge,default}` from public api. To create Resources you should only use `Resource::builder()` or `Resource::builder_empty()`. See [#2322](https://github.com/open-telemetry/opentelemetry-rust/pull/2322) for a migration guide. + Example Usage: + ```rust + // old + Resource::default().with_attributes([ + KeyValue::new("service.name", "test_service"), + KeyValue::new("key", "value"), + ]); + + // new + Resource::builder() + .with_service_name("test_service") + .with_attribute(KeyValue::new("key", "value")) + .build(); + ``` +- *Breaking* The LogExporter::export() method no longer requires a mutable reference to self.: + Before: + async fn export(&mut self, _batch: LogBatch<'_>) -> LogResult<()> + After: + async fn export(&self, _batch: LogBatch<'_>) -> LogResult<()> + Custom exporters will need to internally synchronize any mutable state, if applicable. + +- *Breaking* Removed the following deprecated struct: + - logs::LogData - Previously deprecated in version 0.27.1 + Migration Guidance: This structure is no longer utilized within the SDK, and users should not have dependencies on it. + +- *Breaking* Removed the following deprecated methods: + - `Logger::provider()` : Previously deprecated in version 0.27.1 + - `Logger::instrumentation_scope()` : Previously deprecated in version 0.27.1. + Migration Guidance: + - These methods were intended for log appenders. Keep the clone of the provider handle, instead of depending on above methods. + +- *Breaking* - `PeriodicReader` Updates + + `PeriodicReader` no longer requires an async runtime by default. Instead, it + now creates its own background thread for execution. This change allows + metrics to be used in environments without async runtimes. + + For users who prefer the previous behavior of relying on a specific + `Runtime`, they can do so by enabling the feature flag + **`experimental_metrics_periodicreader_with_async_runtime`**. + + Migration Guide: + + 1. *Default Implementation, requires no async runtime* (**Recommended**) The + new default implementation does not require a runtime argument. Replace the + builder method accordingly: + - *Before:* + ```rust + let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter, runtime::Tokio).build(); + ``` + - *After:* + ```rust + let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter).build(); + ``` + + 2. *Async Runtime Support* + If your application cannot spin up new threads or you prefer using async + runtimes, enable the + "experimental_metrics_periodicreader_with_async_runtime" feature flag and + adjust code as below. + + - *Before:* + ```rust + let reader = opentelemetry_sdk::metrics::PeriodicReader::builder(exporter, runtime::Tokio).build(); + ``` + + - *After:* + ```rust + let reader = opentelemetry_sdk::metrics::periodic_reader_with_async_runtime::PeriodicReader::builder(exporter, runtime::Tokio).build(); + ``` + + *Requirements:* + - Enable the feature flag: + `experimental_metrics_periodicreader_with_async_runtime`. + - Continue enabling one of the async runtime feature flags: `rt-tokio`, + `rt-tokio-current-thread`, or `rt-async-std`. + + - Bump msrv to 1.75.0. + +- *Breaking* : [#2314](https://github.com/open-telemetry/opentelemetry-rust/pull/2314) + - The LogRecord struct has been updated: + - All fields are now pub(crate) instead of pub. + - Getter methods have been introduced to access field values. + This change impacts custom exporter and processor developers by requiring updates to code that directly accessed LogRecord fields. They must now use the provided getter methods (e.g., `log_record.event_name()` instead of `log_record.event_name`). + +- Upgrade the tracing crate used for internal logging to version 0.1.40 or later. This is necessary because the internal logging macros utilize the name field as +metadata, a feature introduced in version 0.1.40. [#2418](https://github.com/open-telemetry/opentelemetry-rust/pull/2418) + +- **Breaking** [#2436](https://github.com/open-telemetry/opentelemetry-rust/pull/2436) + + `BatchLogProcessor` no longer requires an async runtime by default. Instead, a dedicated + background thread is created to do the batch processing and exporting. + + For users who prefer the previous behavior of relying on a specific + `Runtime`, they can do so by enabling the feature flag + **`experimental_logs_batch_log_processor_with_async_runtime`**. + + 1. *Default Implementation, requires no async runtime* (**Recommended**) The + new default implementation does not require a runtime argument. Replace the + builder method accordingly: + - *Before:* + ```rust + let logger_provider = LoggerProvider::builder() + .with_log_processor(BatchLogProcessor::builder(exporter, runtime::Tokio).build()) + .build(); + ``` + + - *After:* + ```rust + let logger_provider = LoggerProvider::builder() + .with_log_processor(BatchLogProcessor::builder(exporter).build()) + .build(); + ``` + + 2. *Async Runtime Support* + If your application cannot spin up new threads or you prefer using async + runtimes, enable the + "experimental_logs_batch_log_processor_with_async_runtime" feature flag and + adjust code as below. + + - *Before:* + ```rust + let logger_provider = LoggerProvider::builder() + .with_log_processor(BatchLogProcessor::builder(exporter, runtime::Tokio).build()) + .build(); + ``` + + - *After:* + ```rust + let logger_provider = LoggerProvider::builder() + .with_log_processor(BatchLogProcessorWithAsyncRuntime::builder(exporter, runtime::Tokio).build()) + .build(); + ``` + + *Requirements:* + - Enable the feature flag: + `experimental_logs_batch_log_processor_with_async_runtime`. + - Continue enabling one of the async runtime feature flags: `rt-tokio`, + `rt-tokio-current-thread`, or `rt-async-std`. + - Added Two new methods to the LogRecord struct's public API: ```rust update_attribute(&Key, &AnyValue) -> Option @@ -16,6 +166,130 @@ remove_attribute(&mut self, key: &Key) -> usize ``` - Removes all occurrences of attributes with the specified key and returns the count of deleted attributes. +## 0.27.1 + +Released 2024-Nov-27 + +- **DEPRECATED**: + - `trace::Config` methods are moving onto `TracerProvider` Builder to be consistent with other signals. See https://github.com/open-telemetry/opentelemetry-rust/pull/2303 for migration guide. + `trace::Config` is scheduled to be removed from public API in `v0.28.0`. + example: + ```rust + // old + let tracer_provider: TracerProvider = TracerProvider::builder() + .with_config(Config::default().with_resource(Resource::empty())) + .build(); + + // new + let tracer_provider: TracerProvider = TracerProvider::builder() + .with_resource(Resource::empty()) + .build(); + ``` + - `logs::LogData` struct is deprecated, and scheduled to be removed from public API in `v0.28.0`. + - Bug fix: Empty Meter names are retained as-is instead of replacing with + "rust.opentelemetry.io/sdk/meter" + [#2334](https://github.com/open-telemetry/opentelemetry-rust/pull/2334) + + - Bug fix: Empty Logger names are retained as-is instead of replacing with + "rust.opentelemetry.io/sdk/logger" + [#2316](https://github.com/open-telemetry/opentelemetry-rust/pull/2316) + + - `Logger::provider`: This method is deprecated as of version `0.27.1`. To be removed in `0.28.0`. + - `Logger::instrumentation_scope`: This method is deprecated as of version `0.27.1`. To be removed in `0.28.0` + Migration Guidance: + - These methods are intended for log appenders. Keep the clone of the provider handle, instead of depending on above methods. + + + - **Bug Fix:** Validates the `with_boundaries` bucket boundaries used in + Histograms. The boundaries provided by the user must not contain `f64::NAN`, + `f64::INFINITY` or `f64::NEG_INFINITY` and must be sorted in strictly + increasing order, and contain no duplicates. Instruments will not record + measurements if the boundaries are invalid. + [#2351](https://github.com/open-telemetry/opentelemetry-rust/pull/2351) + +## 0.27.0 + +Released 2024-Nov-11 + +- Update `opentelemetry` dependency version to 0.27 +- Update `opentelemetry-http` dependency version to 0.27 + +- Bump MSRV to 1.70 [#2179](https://github.com/open-telemetry/opentelemetry-rust/pull/2179) +- Implement `LogRecord::set_trace_context` for `LogRecord`. Respect any trace context set on a `LogRecord` when emitting through a `Logger`. +- Improved `LoggerProvider` shutdown handling to prevent redundant shutdown calls when `drop` is invoked. [#2195](https://github.com/open-telemetry/opentelemetry-rust/pull/2195) +- When creating new metric instruments by calling `build()`, SDK would return a no-op instrument if the validation fails (eg: Invalid metric name). [#2166](https://github.com/open-telemetry/opentelemetry-rust/pull/2166) +- **BREAKING for Metrics users**: + - **Replaced** + - ([#2217](https://github.com/open-telemetry/opentelemetry-rust/pull/2217)): Removed `{Delta,Cumulative}TemporalitySelector::new()` in favor of directly using `Temporality` enum to simplify the configuration of MetricsExporterBuilder with different temporalities. + - **Renamed** + - ([#2232](https://github.com/open-telemetry/opentelemetry-rust/pull/2232)): The `init` method used to create instruments has been renamed to `build`. + Before: + ```rust + let counter = meter.u64_counter("my_counter").init(); + ``` + Now: + ```rust + let counter = meter.u64_counter("my_counter").build(); + ``` + - ([#2255](https://github.com/open-telemetry/opentelemetry-rust/pull/2255)): de-pluralize Metric types. + - `PushMetricsExporter` -> `PushMetricExporter` + - `InMemoryMetricsExporter` -> `InMemoryMetricExporter` + - `InMemoryMetricsExporterBuilder` -> `InMemoryMetricExporterBuilder` +- **BREAKING**: [#2220](https://github.com/open-telemetry/opentelemetry-rust/pull/2220) + - Removed `InstrumentationLibrary` re-export and its `Scope` alias, use `opentelemetry::InstrumentationLibrary` instead. + - Unified builders across signals + - Removed deprecated `LoggerProvider::versioned_logger`, `TracerProvider::versioned_tracer` + - Removed `MeterProvider::versioned_meter` + - Replaced these methods with `LoggerProvider::logger_with_scope`, `TracerProvider::logger_with_scope`, `MeterProvider::meter_with_scope` + +- [#2272](https://github.com/open-telemetry/opentelemetry-rust/pull/2272) + - Pin url version to `2.5.2`. The higher version breaks the build refer: [servo/rust-url#992.](https://github.com/servo/rust-url/issues/992) + The `url` crate is used when `jaeger_remote_sampler` feature is enabled. + +- **BREAKING**: [#2266](https://github.com/open-telemetry/opentelemetry-rust/pull/2266) + - Moved `ExportError` trait from `opentelemetry::ExportError` to `opentelemetry_sdk::export::ExportError` + - Moved `LogError` enum from `opentelemetry::logs::LogError` to `opentelemetry_sdk::logs::LogError` + - Moved `LogResult` type alias from `opentelemetry::logs::LogResult` to `opentelemetry_sdk::logs::LogResult` + - Renamed `opentelemetry::metrics::Result` type alias to `opentelemetry::metrics::MetricResult` + - Renamed `opentelemetry::metrics::MetricsError` enum to `opentelemetry::metrics::MetricError` + - Moved `MetricError` enum from `opentelemetry::metrics::MetricError` to `opentelemetry_sdk::metrics::MetricError` + - Moved `MetricResult` type alias from `opentelemetry::metrics::MetricResult` to `opentelemetry_sdk::metrics::MetricResult` + + - Users calling public APIs that return these constructs (e.g, LoggerProvider::shutdown(), MeterProvider::force_flush()) should now import them from the SDK instead of the API. + - Developers creating custom exporters should ensure they import these constructs from the SDK, not the API. + - [2291](https://github.com/open-telemetry/opentelemetry-rust/pull/2291) Rename `logs_level_enabled flag` to `spec_unstable_logs_enabled`. Please enable this updated flag if the feature is needed. This flag will be removed once the feature is stabilized in the specifications. + +- **BREAKING**: `Temporality` enum moved from `opentelemetry_sdk::metrics::data::Temporality` to `opentelemetry_sdk::metrics::Temporality`. + +- **BREAKING**: `Views` are now an opt-in ONLY feature. Please include the feature `spec_unstable_metrics_views` to enable `Views`. It will be stabilized post 1.0 stable release of the SDK. [#2295](https://github.com/open-telemetry/opentelemetry-rust/issues/2295) + +- Added a new `PeriodicReader` implementation (`PeriodicReaderWithOwnThread`) + that does not rely on an async runtime, and instead creates own Thread. This + is under feature flag "experimental_metrics_periodic_reader_no_runtime". The + functionality maybe moved into existing PeriodReader or even removed in the + future. As of today, this cannot be used as-is with OTLP Metric Exporter or + any exporter that require an async runtime. + +## v0.26.0 +Released 2024-Sep-30 + +- Update `opentelemetry` dependency version to 0.26 +- **BREAKING** Public API changes: + - **Removed**: `SdkMeter` struct [#2113](https://github.com/open-telemetry/opentelemetry-rust/pull/2113). This API is only meant for internal use. + - **Removed**: `AggregationSelector` trait and `DefaultAggregationSelector` struct [#2085](https://github.com/open-telemetry/opentelemetry-rust/pull/2085). This API was unnecessary. The feature to customize aggregation for instruments should be offered by `Views` API. + +- Update `async-std` dependency version to 1.13 +- *Breaking* - Remove support for `MetricProducer` which allowed metrics from + external sources to be sent through OpenTelemetry. + [#2105](https://github.com/open-telemetry/opentelemetry-rust/pull/2105) + +- Feature: `SimpleSpanProcessor::new` is now public [#2119](https://github.com/open-telemetry/opentelemetry-rust/pull/2119) +- For Delta Temporality, exporters are not invoked unless there were new + measurements since the last collect/export. + [#2153](https://github.com/open-telemetry/opentelemetry-rust/pull/2153) +- `MeterProvider` modified to not invoke shutdown on `Drop`, if user has already + called `shutdown()`. + [#2156](https://github.com/open-telemetry/opentelemetry-rust/pull/2156) ## v0.25.0 diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 8761a6a669..2c3643dee1 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -1,23 +1,22 @@ [package] name = "opentelemetry_sdk" -version = "0.25.0" +version = "0.27.1" description = "The SDK for the OpenTelemetry metrics collection and distributed tracing framework" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" readme = "README.md" license = "Apache-2.0" edition = "2021" -rust-version = "1.65" +rust-version = "1.75.0" [dependencies] -opentelemetry = { version = "0.25", path = "../opentelemetry/" } -opentelemetry-http = { version = "0.25", path = "../opentelemetry-http", optional = true } +opentelemetry = { version = "0.27", path = "../opentelemetry/" } +opentelemetry-http = { version = "0.27", path = "../opentelemetry-http", optional = true } async-std = { workspace = true, features = ["unstable"], optional = true } async-trait = { workspace = true, optional = true } futures-channel = "0.3" futures-executor = { workspace = true } futures-util = { workspace = true, features = ["std", "sink", "async-await-macro"] } -once_cell = { workspace = true } percent-encoding = { version = "2.0", optional = true } rand = { workspace = true, features = ["std", "std_rng","small_rng"], optional = true } glob = { version = "0.3.1", optional =true} @@ -28,6 +27,7 @@ url = { workspace = true, optional = true } tokio = { workspace = true, features = ["rt", "time"], optional = true } tokio-stream = { workspace = true, optional = true } http = { workspace = true, optional = true } +tracing = {workspace = true, optional = true} [package.metadata.docs.rs] all-features = true @@ -35,22 +35,28 @@ rustdoc-args = ["--cfg", "docsrs"] [dev-dependencies] criterion = { workspace = true, features = ["html_reports"] } +rstest = "0.23.0" temp-env = { workspace = true } [target.'cfg(not(target_os = "windows"))'.dev-dependencies] pprof = { version = "0.13", features = ["flamegraph", "criterion"] } [features] -default = ["trace", "metrics", "logs"] -trace = ["opentelemetry/trace", "rand", "async-trait", "percent-encoding"] +default = ["trace", "metrics", "logs", "internal-logs"] +trace = ["opentelemetry/trace", "rand", "percent-encoding"] jaeger_remote_sampler = ["trace", "opentelemetry-http", "http", "serde", "serde_json", "url"] logs = ["opentelemetry/logs", "async-trait", "serde_json"] -logs_level_enabled = ["logs", "opentelemetry/logs_level_enabled"] +spec_unstable_logs_enabled = ["logs", "opentelemetry/spec_unstable_logs_enabled"] metrics = ["opentelemetry/metrics", "glob", "async-trait"] testing = ["opentelemetry/testing", "trace", "metrics", "logs", "rt-async-std", "rt-tokio", "rt-tokio-current-thread", "tokio/macros", "tokio/rt-multi-thread"] rt-tokio = ["tokio", "tokio-stream"] rt-tokio-current-thread = ["tokio", "tokio-stream"] rt-async-std = ["async-std"] +internal-logs = ["tracing"] +experimental_metrics_periodic_reader_no_runtime = ["metrics"] +experimental_metrics_periodicreader_with_async_runtime = ["metrics"] +spec_unstable_metrics_views = ["metrics"] +experimental_logs_batch_log_processor_with_async_runtime = ["logs"] [[bench]] name = "context" diff --git a/opentelemetry-sdk/README.md b/opentelemetry-sdk/README.md index f190e9dd9f..bbbd4c20a0 100644 --- a/opentelemetry-sdk/README.md +++ b/opentelemetry-sdk/README.md @@ -29,11 +29,10 @@ can easily instrument your applications or systems, no matter their language, infrastructure, or runtime environment. Crucially, the storage and visualization of telemetry is intentionally left to other tools. -*Compiler support: [requires `rustc` 1.65+][msrv]* +*[Supported Rust Versions](#supported-rust-versions)* [Prometheus]: https://prometheus.io [Jaeger]: https://www.jaegertracing.io -[msrv]: #supported-rust-versions ### What does this crate contain? @@ -104,7 +103,7 @@ See [docs](https://docs.rs/opentelemetry-sdk). ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.65. The current OpenTelemetry version is not guaranteed to build +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions diff --git a/opentelemetry-sdk/benches/batch_span_processor.rs b/opentelemetry-sdk/benches/batch_span_processor.rs index abc7d0df02..ed20c45a06 100644 --- a/opentelemetry-sdk/benches/batch_span_processor.rs +++ b/opentelemetry-sdk/benches/batch_span_processor.rs @@ -32,7 +32,7 @@ fn get_span_data() -> Vec { events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, - instrumentation_lib: Default::default(), + instrumentation_scope: Default::default(), }) .collect::>() } diff --git a/opentelemetry-sdk/benches/context.rs b/opentelemetry-sdk/benches/context.rs index 3ef494990c..87f49942a6 100644 --- a/opentelemetry-sdk/benches/context.rs +++ b/opentelemetry-sdk/benches/context.rs @@ -10,7 +10,6 @@ use opentelemetry::{ }; use opentelemetry_sdk::{ export::trace::{ExportResult, SpanData, SpanExporter}, - trace, trace::{Sampler, TracerProvider}, }; #[cfg(not(target_os = "windows"))] @@ -127,9 +126,7 @@ impl Display for Environment { fn parent_sampled_tracer(inner_sampler: Sampler) -> (TracerProvider, BoxedTracer) { let provider = TracerProvider::builder() - .with_config( - trace::Config::default().with_sampler(Sampler::ParentBased(Box::new(inner_sampler))), - ) + .with_sampler(Sampler::ParentBased(Box::new(inner_sampler))) .with_simple_exporter(NoopExporter) .build(); let tracer = provider.tracer(module_path!()); diff --git a/opentelemetry-sdk/benches/log.rs b/opentelemetry-sdk/benches/log.rs index 20d3503100..99083fbdae 100644 --- a/opentelemetry-sdk/benches/log.rs +++ b/opentelemetry-sdk/benches/log.rs @@ -20,21 +20,18 @@ use std::time::SystemTime; use criterion::{criterion_group, criterion_main, Criterion}; -use opentelemetry::logs::{ - AnyValue, LogRecord as _, LogResult, Logger as _, LoggerProvider as _, Severity, -}; +use opentelemetry::logs::{AnyValue, LogRecord as _, Logger as _, LoggerProvider as _, Severity}; use opentelemetry::trace::Tracer; use opentelemetry::trace::TracerProvider as _; -use opentelemetry::{InstrumentationLibrary, Key}; -use opentelemetry_sdk::logs::{LogProcessor, LogRecord, Logger, LoggerProvider}; -use opentelemetry_sdk::trace; +use opentelemetry::{InstrumentationScope, Key}; +use opentelemetry_sdk::logs::{LogProcessor, LogRecord, LogResult, Logger, LoggerProvider}; use opentelemetry_sdk::trace::{Sampler, TracerProvider}; #[derive(Debug)] struct NoopProcessor; impl LogProcessor for NoopProcessor { - fn emit(&self, _data: &mut LogRecord, _library: &InstrumentationLibrary) {} + fn emit(&self, _data: &mut LogRecord, _scope: &InstrumentationScope) {} fn force_flush(&self) -> LogResult<()> { Ok(()) @@ -67,7 +64,7 @@ fn log_benchmark_group(c: &mut Criterion, name: &str, f: F) { // setup tracing as well. let tracer_provider = TracerProvider::builder() - .with_config(trace::Config::default().with_sampler(Sampler::AlwaysOn)) + .with_sampler(Sampler::AlwaysOn) .build(); let tracer = tracer_provider.tracer("bench-tracer"); diff --git a/opentelemetry-sdk/benches/log_exporter.rs b/opentelemetry-sdk/benches/log_exporter.rs index 3549c08af5..c2ecb78ce9 100644 --- a/opentelemetry-sdk/benches/log_exporter.rs +++ b/opentelemetry-sdk/benches/log_exporter.rs @@ -16,9 +16,10 @@ use std::time::SystemTime; use async_trait::async_trait; use criterion::{criterion_group, criterion_main, Criterion}; -use opentelemetry::logs::{LogRecord as _, LogResult, Logger as _, LoggerProvider as _, Severity}; +use opentelemetry::logs::{LogRecord as _, Logger as _, LoggerProvider as _, Severity}; +use opentelemetry_sdk::logs::LogResult; -use opentelemetry::InstrumentationLibrary; +use opentelemetry::InstrumentationScope; use opentelemetry_sdk::export::logs::LogBatch; use opentelemetry_sdk::logs::LogProcessor; use opentelemetry_sdk::logs::LogRecord; @@ -65,9 +66,9 @@ impl ExportingProcessorWithFuture { } impl LogProcessor for ExportingProcessorWithFuture { - fn emit(&self, record: &mut LogRecord, library: &InstrumentationLibrary) { + fn emit(&self, record: &mut LogRecord, scope: &InstrumentationScope) { let mut exporter = self.exporter.lock().expect("lock error"); - let logs = [(record as &LogRecord, library)]; + let logs = [(record as &LogRecord, scope)]; futures_executor::block_on(exporter.export(LogBatch::new(&logs))); } @@ -94,8 +95,8 @@ impl ExportingProcessorWithoutFuture { } impl LogProcessor for ExportingProcessorWithoutFuture { - fn emit(&self, record: &mut LogRecord, library: &InstrumentationLibrary) { - let logs = [(record as &LogRecord, library)]; + fn emit(&self, record: &mut LogRecord, scope: &InstrumentationScope) { + let logs = [(record as &LogRecord, scope)]; self.exporter .lock() .expect("lock error") diff --git a/opentelemetry-sdk/benches/log_processor.rs b/opentelemetry-sdk/benches/log_processor.rs index 1bc0a130b4..87cd242d0d 100644 --- a/opentelemetry-sdk/benches/log_processor.rs +++ b/opentelemetry-sdk/benches/log_processor.rs @@ -18,9 +18,11 @@ use std::{ }; use criterion::{criterion_group, criterion_main, Criterion}; -use opentelemetry::logs::{LogRecord as _, LogResult, Logger as _, LoggerProvider as _, Severity}; -use opentelemetry::InstrumentationLibrary; -use opentelemetry_sdk::logs::{LogProcessor, LogRecord, Logger, LoggerProvider}; +use opentelemetry::{ + logs::{LogRecord as _, Logger as _, LoggerProvider as _, Severity}, + InstrumentationScope, +}; +use opentelemetry_sdk::logs::{LogProcessor, LogRecord, LogResult, Logger, LoggerProvider}; // Run this benchmark with: // cargo bench --bench log_processor @@ -43,7 +45,7 @@ fn create_log_record(logger: &Logger) -> LogRecord { struct NoopProcessor; impl LogProcessor for NoopProcessor { - fn emit(&self, _data: &mut LogRecord, _library: &InstrumentationLibrary) {} + fn emit(&self, _data: &mut LogRecord, _scope: &InstrumentationScope) {} fn force_flush(&self) -> LogResult<()> { Ok(()) @@ -58,7 +60,7 @@ impl LogProcessor for NoopProcessor { struct CloningProcessor; impl LogProcessor for CloningProcessor { - fn emit(&self, data: &mut LogRecord, _library: &InstrumentationLibrary) { + fn emit(&self, data: &mut LogRecord, _scope: &InstrumentationScope) { let _data_cloned = data.clone(); } @@ -73,8 +75,8 @@ impl LogProcessor for CloningProcessor { #[derive(Debug)] struct SendToChannelProcessor { - sender: std::sync::mpsc::Sender<(LogRecord, InstrumentationLibrary)>, - receiver: Arc>>, + sender: std::sync::mpsc::Sender<(LogRecord, InstrumentationScope)>, + receiver: Arc>>, } impl SendToChannelProcessor { @@ -101,8 +103,8 @@ impl SendToChannelProcessor { } impl LogProcessor for SendToChannelProcessor { - fn emit(&self, record: &mut LogRecord, library: &InstrumentationLibrary) { - let res = self.sender.send((record.clone(), library.clone())); + fn emit(&self, record: &mut LogRecord, scope: &InstrumentationScope) { + let res = self.sender.send((record.clone(), scope.clone())); if res.is_err() { println!("Error sending log data to channel {0}", res.err().unwrap()); } diff --git a/opentelemetry-sdk/benches/metric.rs b/opentelemetry-sdk/benches/metric.rs index 88143bccff..112b3f780f 100644 --- a/opentelemetry-sdk/benches/metric.rs +++ b/opentelemetry-sdk/benches/metric.rs @@ -3,16 +3,14 @@ use std::sync::{Arc, Weak}; use criterion::{criterion_group, criterion_main, Bencher, Criterion}; use opentelemetry::{ - metrics::{Counter, Histogram, MeterProvider as _, Result}, + metrics::{Counter, Histogram, MeterProvider as _}, Key, KeyValue, }; use opentelemetry_sdk::{ metrics::{ - data::{ResourceMetrics, Temporality}, - new_view, - reader::{MetricReader, TemporalitySelector}, - Aggregation, Instrument, InstrumentKind, ManualReader, Pipeline, SdkMeterProvider, Stream, - View, + data::ResourceMetrics, new_view, reader::MetricReader, Aggregation, Instrument, + InstrumentKind, ManualReader, MetricResult, Pipeline, SdkMeterProvider, Stream, + Temporality, View, }, Resource, }; @@ -20,49 +18,25 @@ use opentelemetry_sdk::{ #[derive(Clone, Debug)] struct SharedReader(Arc); -impl TemporalitySelector for SharedReader { - fn temporality(&self, kind: InstrumentKind) -> Temporality { - self.0.temporality(kind) - } -} - impl MetricReader for SharedReader { fn register_pipeline(&self, pipeline: Weak) { self.0.register_pipeline(pipeline) } - fn collect(&self, rm: &mut ResourceMetrics) -> Result<()> { + fn collect(&self, rm: &mut ResourceMetrics) -> MetricResult<()> { self.0.collect(rm) } - fn force_flush(&self) -> Result<()> { + fn force_flush(&self) -> MetricResult<()> { self.0.force_flush() } - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { self.0.shutdown() } -} - -/// Configure delta temporality for all [InstrumentKind] -/// -/// [Temporality::Delta] will be used for all instrument kinds if this -/// [TemporalitySelector] is used. -#[derive(Clone, Default, Debug)] -pub struct DeltaTemporalitySelector { - pub(crate) _private: (), -} - -impl DeltaTemporalitySelector { - /// Create a new default temporality selector. - pub fn new() -> Self { - Self::default() - } -} -impl TemporalitySelector for DeltaTemporalitySelector { - fn temporality(&self, _kind: InstrumentKind) -> Temporality { - Temporality::Delta + fn temporality(&self, kind: InstrumentKind) -> Temporality { + self.0.temporality(kind) } } @@ -140,7 +114,7 @@ fn bench_counter(view: Option>, temporality: &str) -> (SharedReade } else { SharedReader(Arc::new( ManualReader::builder() - .with_temporality_selector(DeltaTemporalitySelector::new()) + .with_temporality(Temporality::Delta) .build(), )) }; @@ -149,26 +123,20 @@ fn bench_counter(view: Option>, temporality: &str) -> (SharedReade builder = builder.with_view(view); } let provider = builder.build(); - let cntr = provider.meter("test").u64_counter("hello").init(); + let cntr = provider.meter("test").u64_counter("hello").build(); (rdr, cntr) } fn counters(c: &mut Criterion) { let (_, cntr) = bench_counter(None, "cumulative"); - let (_, cntr2) = bench_counter(None, "delta"); - let (_, cntr3) = bench_counter(None, "cumulative"); + let (_, cntr_max) = bench_counter(None, "cumulative"); let mut group = c.benchmark_group("Counter"); group.bench_function("AddNoAttrs", |b| b.iter(|| cntr.add(1, &[]))); - group.bench_function("AddNoAttrsDelta", |b| b.iter(|| cntr2.add(1, &[]))); - group.bench_function("AddOneAttr", |b| { b.iter(|| cntr.add(1, &[KeyValue::new("K", "V")])) }); - group.bench_function("AddOneAttrDelta", |b| { - b.iter(|| cntr2.add(1, &[KeyValue::new("K1", "V1")])) - }); group.bench_function("AddThreeAttr", |b| { b.iter(|| { cntr.add( @@ -181,18 +149,6 @@ fn counters(c: &mut Criterion) { ) }) }); - group.bench_function("AddThreeAttrDelta", |b| { - b.iter(|| { - cntr2.add( - 1, - &[ - KeyValue::new("K2", "V2"), - KeyValue::new("K3", "V3"), - KeyValue::new("K4", "V4"), - ], - ) - }) - }); group.bench_function("AddFiveAttr", |b| { b.iter(|| { cntr.add( @@ -207,20 +163,6 @@ fn counters(c: &mut Criterion) { ) }) }); - group.bench_function("AddFiveAttrDelta", |b| { - b.iter(|| { - cntr2.add( - 1, - &[ - KeyValue::new("K5", "V5"), - KeyValue::new("K6", "V6"), - KeyValue::new("K7", "V7"), - KeyValue::new("K8", "V8"), - KeyValue::new("K9", "V9"), - ], - ) - }) - }); group.bench_function("AddTenAttr", |b| { b.iter(|| { cntr.add( @@ -240,25 +182,6 @@ fn counters(c: &mut Criterion) { ) }) }); - group.bench_function("AddTenAttrDelta", |b| { - b.iter(|| { - cntr2.add( - 1, - &[ - KeyValue::new("K10", "V10"), - KeyValue::new("K11", "V11"), - KeyValue::new("K12", "V12"), - KeyValue::new("K13", "V13"), - KeyValue::new("K14", "V14"), - KeyValue::new("K15", "V15"), - KeyValue::new("K16", "V16"), - KeyValue::new("K17", "V17"), - KeyValue::new("K18", "V18"), - KeyValue::new("K19", "V19"), - ], - ) - }) - }); const MAX_DATA_POINTS: i64 = 2000; let mut max_attributes: Vec = Vec::new(); @@ -268,14 +191,16 @@ fn counters(c: &mut Criterion) { } group.bench_function("AddOneTillMaxAttr", |b| { - b.iter(|| cntr3.add(1, &max_attributes)) + b.iter(|| cntr_max.add(1, &max_attributes)) }); for i in MAX_DATA_POINTS..MAX_DATA_POINTS * 2 { max_attributes.push(KeyValue::new(i.to_string(), i)) } - group.bench_function("AddMaxAttr", |b| b.iter(|| cntr3.add(1, &max_attributes))); + group.bench_function("AddMaxAttr", |b| { + b.iter(|| cntr_max.add(1, &max_attributes)) + }); group.bench_function("AddInvalidAttr", |b| { b.iter(|| cntr.add(1, &[KeyValue::new("", "V"), KeyValue::new("K", "V")])) @@ -316,7 +241,7 @@ fn counters(c: &mut Criterion) { let (rdr, cntr) = bench_counter(None, "cumulative"); let mut rm = ResourceMetrics { - resource: Resource::empty(), + resource: Resource::builder_empty().build(), scope_metrics: Vec::new(), }; @@ -368,7 +293,7 @@ fn bench_histogram(bound_count: usize) -> (SharedReader, Histogram) { let mtr = builder.build().meter("test_meter"); let hist = mtr .u64_histogram(format!("histogram_{}", bound_count)) - .init(); + .build(); (r, hist) } @@ -408,12 +333,12 @@ fn benchmark_collect_histogram(b: &mut Bencher, n: usize) { .meter("sdk/metric/bench/histogram"); for i in 0..n { - let h = mtr.u64_histogram(format!("fake_data_{i}")).init(); + let h = mtr.u64_histogram(format!("fake_data_{i}")).build(); h.record(1, &[]); } let mut rm = ResourceMetrics { - resource: Resource::empty(), + resource: Resource::builder_empty().build(), scope_metrics: Vec::new(), }; diff --git a/opentelemetry-sdk/benches/metrics_counter.rs b/opentelemetry-sdk/benches/metrics_counter.rs index b6951664cd..b4517d379b 100644 --- a/opentelemetry-sdk/benches/metrics_counter.rs +++ b/opentelemetry-sdk/benches/metrics_counter.rs @@ -1,18 +1,19 @@ /* The benchmark results: criterion = "0.5.1" - OS: Ubuntu 22.04.3 LTS (5.15.146.1-microsoft-standard-WSL2) + rustc 1.82.0 (f6e511eec 2024-10-15) + OS: Ubuntu 22.04.3 LTS (5.15.167.4-microsoft-standard-WSL2) Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, RAM: 64.0 GB | Test | Average time| |--------------------------------|-------------| - | Counter_Add_Sorted | 193 ns | - | Counter_Add_Unsorted | 209 ns | - | Counter_Overflow | 898 ns | + | Counter_Add_Sorted | 172 ns | + | Counter_Add_Unsorted | 183 ns | + | Counter_Overflow | 562 ns | | ThreadLocal_Random_Generator_5 | 37 ns | */ -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use opentelemetry::{ metrics::{Counter, MeterProvider as _}, KeyValue, @@ -44,7 +45,7 @@ fn create_counter(name: &'static str) -> Counter { let meter = meter_provider.meter("benchmarks"); println!("Counter_Created"); - meter.u64_counter(name).init() + meter.u64_counter(name).build() } fn criterion_benchmark(c: &mut Criterion) { @@ -57,62 +58,72 @@ fn criterion_benchmark(c: &mut Criterion) { fn counter_add_sorted(c: &mut Criterion) { let counter = create_counter("Counter_Add_Sorted"); c.bench_function("Counter_Add_Sorted", |b| { - b.iter(|| { - // 4*4*10*10 = 1600 time series. - let rands = CURRENT_RNG.with(|rng| { - let mut rng = rng.borrow_mut(); - [ - rng.gen_range(0..4), - rng.gen_range(0..4), - rng.gen_range(0..10), - rng.gen_range(0..10), - ] - }); - let index_first_attribute = rands[0]; - let index_second_attribute = rands[1]; - let index_third_attribute = rands[2]; - let index_fourth_attribute = rands[3]; - counter.add( - 1, - &[ - KeyValue::new("attribute1", ATTRIBUTE_VALUES[index_first_attribute]), - KeyValue::new("attribute2", ATTRIBUTE_VALUES[index_second_attribute]), - KeyValue::new("attribute3", ATTRIBUTE_VALUES[index_third_attribute]), - KeyValue::new("attribute4", ATTRIBUTE_VALUES[index_fourth_attribute]), - ], - ); - }); + b.iter_batched( + || { + // 4*4*10*10 = 1600 time series. + CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..4), + rng.gen_range(0..4), + rng.gen_range(0..10), + rng.gen_range(0..10), + ] + }) + }, + |rands| { + let index_first_attribute = rands[0]; + let index_second_attribute = rands[1]; + let index_third_attribute = rands[2]; + let index_fourth_attribute = rands[3]; + counter.add( + 1, + &[ + KeyValue::new("attribute1", ATTRIBUTE_VALUES[index_first_attribute]), + KeyValue::new("attribute2", ATTRIBUTE_VALUES[index_second_attribute]), + KeyValue::new("attribute3", ATTRIBUTE_VALUES[index_third_attribute]), + KeyValue::new("attribute4", ATTRIBUTE_VALUES[index_fourth_attribute]), + ], + ); + }, + BatchSize::SmallInput, + ); }); } fn counter_add_unsorted(c: &mut Criterion) { let counter = create_counter("Counter_Add_Unsorted"); c.bench_function("Counter_Add_Unsorted", |b| { - b.iter(|| { - // 4*4*10*10 = 1600 time series. - let rands = CURRENT_RNG.with(|rng| { - let mut rng = rng.borrow_mut(); - [ - rng.gen_range(0..4), - rng.gen_range(0..4), - rng.gen_range(0..10), - rng.gen_range(0..10), - ] - }); - let index_first_attribute = rands[0]; - let index_second_attribute = rands[1]; - let index_third_attribute = rands[2]; - let index_fourth_attribute = rands[3]; - counter.add( - 1, - &[ - KeyValue::new("attribute2", ATTRIBUTE_VALUES[index_second_attribute]), - KeyValue::new("attribute3", ATTRIBUTE_VALUES[index_third_attribute]), - KeyValue::new("attribute1", ATTRIBUTE_VALUES[index_first_attribute]), - KeyValue::new("attribute4", ATTRIBUTE_VALUES[index_fourth_attribute]), - ], - ); - }); + b.iter_batched( + || { + // 4*4*10*10 = 1600 time series. + CURRENT_RNG.with(|rng| { + let mut rng = rng.borrow_mut(); + [ + rng.gen_range(0..4), + rng.gen_range(0..4), + rng.gen_range(0..10), + rng.gen_range(0..10), + ] + }) + }, + |rands| { + let index_first_attribute = rands[0]; + let index_second_attribute = rands[1]; + let index_third_attribute = rands[2]; + let index_fourth_attribute = rands[3]; + counter.add( + 1, + &[ + KeyValue::new("attribute2", ATTRIBUTE_VALUES[index_second_attribute]), + KeyValue::new("attribute3", ATTRIBUTE_VALUES[index_third_attribute]), + KeyValue::new("attribute1", ATTRIBUTE_VALUES[index_first_attribute]), + KeyValue::new("attribute4", ATTRIBUTE_VALUES[index_fourth_attribute]), + ], + ); + }, + BatchSize::SmallInput, + ); }); } diff --git a/opentelemetry-sdk/benches/metrics_gauge.rs b/opentelemetry-sdk/benches/metrics_gauge.rs index b63c8a7b52..c61095e53e 100644 --- a/opentelemetry-sdk/benches/metrics_gauge.rs +++ b/opentelemetry-sdk/benches/metrics_gauge.rs @@ -1,12 +1,13 @@ /* The benchmark results: criterion = "0.5.1" - OS: Ubuntu 22.04.4 LTS (5.15.153.1-microsoft-standard-WSL2) - Hardware: Intel(R) Xeon(R) Platinum 8370C CPU @ 2.80GHz, 16vCPUs, + rustc 1.82.0 (f6e511eec 2024-10-15) + OS: Ubuntu 22.04.4 LTS (5.15.167.4-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, RAM: 64.0 GB | Test | Average time| |--------------------------------|-------------| - | Gauge_Add | 178.37 ns | + | Gauge_Add | 187.49 ns | */ use criterion::{criterion_group, criterion_main, Criterion}; @@ -39,7 +40,7 @@ fn create_gauge() -> Gauge { .build(); let meter = meter_provider.meter("benchmarks"); - meter.u64_gauge("gauge_bench").init() + meter.u64_gauge("gauge_bench").build() } fn criterion_benchmark(c: &mut Criterion) { diff --git a/opentelemetry-sdk/benches/metrics_histogram.rs b/opentelemetry-sdk/benches/metrics_histogram.rs index 517877e673..c6d5751dd6 100644 --- a/opentelemetry-sdk/benches/metrics_histogram.rs +++ b/opentelemetry-sdk/benches/metrics_histogram.rs @@ -1,12 +1,13 @@ /* The benchmark results: criterion = "0.5.1" - OS: Ubuntu 22.04.4 LTS (5.15.153.1-microsoft-standard-WSL2) - Hardware: Intel(R) Xeon(R) Platinum 8370C CPU @ 2.80GHz, 16vCPUs, + rustc 1.82.0 (f6e511eec 2024-10-15) + OS: Ubuntu 22.04.4 LTS (5.15.167.4-microsoft-standard-WSL2) + Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, RAM: 64.0 GB | Test | Average time| |--------------------------------|-------------| - | Histogram_Record | 193.04 ns | + | Histogram_Record | 225.04 ns | */ @@ -16,6 +17,7 @@ use opentelemetry::{ KeyValue, }; use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; +#[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; use rand::{ rngs::{self}, @@ -41,7 +43,7 @@ fn create_histogram(name: &'static str) -> Histogram { .build(); let meter = meter_provider.meter("benchmarks"); - meter.u64_histogram(name).init() + meter.u64_histogram(name).build() } fn criterion_benchmark(c: &mut Criterion) { diff --git a/opentelemetry-sdk/benches/span_builder.rs b/opentelemetry-sdk/benches/span_builder.rs index 10fd4addfe..6f0c828b07 100644 --- a/opentelemetry-sdk/benches/span_builder.rs +++ b/opentelemetry-sdk/benches/span_builder.rs @@ -54,7 +54,7 @@ fn span_builder_benchmark_group(c: &mut Criterion) { fn not_sampled_provider() -> (sdktrace::TracerProvider, sdktrace::Tracer) { let provider = sdktrace::TracerProvider::builder() - .with_config(sdktrace::Config::default().with_sampler(sdktrace::Sampler::AlwaysOff)) + .with_sampler(sdktrace::Sampler::AlwaysOff) .with_simple_exporter(NoopExporter) .build(); let tracer = provider.tracer("not-sampled"); diff --git a/opentelemetry-sdk/benches/trace.rs b/opentelemetry-sdk/benches/trace.rs index 9022c1fe3f..36f6acec4a 100644 --- a/opentelemetry-sdk/benches/trace.rs +++ b/opentelemetry-sdk/benches/trace.rs @@ -70,7 +70,7 @@ fn trace_benchmark_group(c: &mut Criterion, name: &str group.bench_function("always-sample", |b| { let provider = sdktrace::TracerProvider::builder() - .with_config(sdktrace::Config::default().with_sampler(sdktrace::Sampler::AlwaysOn)) + .with_sampler(sdktrace::Sampler::AlwaysOn) .with_simple_exporter(VoidExporter) .build(); let always_sample = provider.tracer("always-sample"); @@ -80,7 +80,7 @@ fn trace_benchmark_group(c: &mut Criterion, name: &str group.bench_function("never-sample", |b| { let provider = sdktrace::TracerProvider::builder() - .with_config(sdktrace::Config::default().with_sampler(sdktrace::Sampler::AlwaysOff)) + .with_sampler(sdktrace::Sampler::AlwaysOff) .with_simple_exporter(VoidExporter) .build(); let never_sample = provider.tracer("never-sample"); diff --git a/opentelemetry-sdk/src/error.rs b/opentelemetry-sdk/src/error.rs new file mode 100644 index 0000000000..115da17b78 --- /dev/null +++ b/opentelemetry-sdk/src/error.rs @@ -0,0 +1,46 @@ +//! Wrapper for error from trace, logs and metrics part of open telemetry. +use std::sync::PoisonError; + +#[cfg(feature = "logs")] +use crate::logs::LogError; +#[cfg(feature = "metrics")] +use crate::metrics::MetricError; +use opentelemetry::propagation::PropagationError; +#[cfg(feature = "trace")] +use opentelemetry::trace::TraceError; + +/// Wrapper for error from both tracing and metrics part of open telemetry. +#[derive(thiserror::Error, Debug)] +#[non_exhaustive] +pub enum Error { + #[cfg(feature = "trace")] + #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] + #[error(transparent)] + /// Failed to export traces. + Trace(#[from] TraceError), + #[cfg(feature = "metrics")] + #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] + #[error(transparent)] + /// An issue raised by the metrics module. + Metric(#[from] MetricError), + + #[cfg(feature = "logs")] + #[cfg_attr(docsrs, doc(cfg(feature = "logs")))] + #[error(transparent)] + /// Failed to export logs. + Log(#[from] LogError), + + #[error(transparent)] + /// Error happens when injecting and extracting information using propagators. + Propagation(#[from] PropagationError), + + #[error("{0}")] + /// Other types of failures not covered by the variants above. + Other(String), +} + +impl From> for Error { + fn from(err: PoisonError) -> Self { + Error::Other(err.to_string()) + } +} diff --git a/opentelemetry-sdk/src/export/logs/mod.rs b/opentelemetry-sdk/src/export/logs/mod.rs index 8056f28222..5e2168a7ce 100644 --- a/opentelemetry-sdk/src/export/logs/mod.rs +++ b/opentelemetry-sdk/src/export/logs/mod.rs @@ -1,29 +1,27 @@ //! Log exporters use crate::logs::LogRecord; +use crate::logs::{LogError, LogResult}; use crate::Resource; use async_trait::async_trait; -#[cfg(feature = "logs_level_enabled")] +#[cfg(feature = "spec_unstable_logs_enabled")] use opentelemetry::logs::Severity; -use opentelemetry::{ - logs::{LogError, LogResult}, - InstrumentationLibrary, -}; +use opentelemetry::InstrumentationScope; use std::fmt::Debug; /// A batch of log records to be exported by a `LogExporter`. /// /// The `LogBatch` struct holds a collection of log records along with their associated -/// instrumentation libraries. This structure is used to group log records together for efficient +/// instrumentation scopes. This structure is used to group log records together for efficient /// export operations. /// /// # Type Parameters -/// - `'a`: The lifetime of the references to the log records and instrumentation libraries. +/// - `'a`: The lifetime of the references to the log records and instrumentation scopes. /// #[derive(Debug)] pub struct LogBatch<'a> { /// The data field contains a slice of tuples, where each tuple consists of a reference to - /// a `LogRecord` and a reference to an `InstrumentationLibrary`. - data: &'a [(&'a LogRecord, &'a InstrumentationLibrary)], + /// a `LogRecord` and a reference to an `InstrumentationScope`. + data: &'a [(&'a LogRecord, &'a InstrumentationScope)], } impl<'a> LogBatch<'a> { @@ -32,32 +30,31 @@ impl<'a> LogBatch<'a> { /// # Arguments /// /// * `data` - A slice of tuples, where each tuple consists of a reference to a `LogRecord` - /// and a reference to an `InstrumentationLibrary`. These tuples represent the log records - /// and their associated instrumentation libraries to be exported. + /// and a reference to an `InstrumentationScope`. These tuples represent the log records + /// and their associated instrumentation scopes to be exported. /// /// # Returns /// - /// A `LogBatch` instance containing the provided log records and instrumentation libraries. + /// A `LogBatch` instance containing the provided log records and instrumentation scopes. /// /// Note - this is not a public function, and should not be used directly. This would be /// made private in the future. - - pub fn new(data: &'a [(&'a LogRecord, &'a InstrumentationLibrary)]) -> LogBatch<'a> { + pub fn new(data: &'a [(&'a LogRecord, &'a InstrumentationScope)]) -> LogBatch<'a> { LogBatch { data } } } impl LogBatch<'_> { - /// Returns an iterator over the log records and instrumentation libraries in the batch. + /// Returns an iterator over the log records and instrumentation scopes in the batch. /// /// Each item yielded by the iterator is a tuple containing references to a `LogRecord` - /// and an `InstrumentationLibrary`. + /// and an `InstrumentationScope`. /// /// # Returns /// - /// An iterator that yields references to the `LogRecord` and `InstrumentationLibrary` in the batch. + /// An iterator that yields references to the `LogRecord` and `InstrumentationScope` in the batch. /// - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.data .iter() .map(|(record, library)| (*record, *library)) @@ -67,16 +64,16 @@ impl LogBatch<'_> { /// `LogExporter` defines the interface that log exporters should implement. #[async_trait] pub trait LogExporter: Send + Sync + Debug { - /// Exports a batch of log records and their associated instrumentation libraries. + /// Exports a batch of log records and their associated instrumentation scopes. /// /// The `export` method is responsible for sending a batch of log records to an external /// destination. It takes a `LogBatch` as an argument, which contains references to the - /// log records and their corresponding instrumentation libraries. The method returns + /// log records and their corresponding instrumentation scopes. The method returns /// a `LogResult` indicating the success or failure of the export operation. /// /// # Arguments /// - /// * `batch` - A `LogBatch` containing the log records and instrumentation libraries + /// * `batch` - A `LogBatch` containing the log records and instrumentation scopes /// to be exported. /// /// # Returns @@ -84,10 +81,10 @@ pub trait LogExporter: Send + Sync + Debug { /// A `LogResult<()>`, which is a result type indicating either a successful export (with /// `Ok(())`) or an error (`Err(LogError)`) if the export operation failed. /// - async fn export(&mut self, batch: LogBatch<'_>) -> LogResult<()>; + async fn export(&self, batch: LogBatch<'_>) -> LogResult<()>; /// Shuts down the exporter. fn shutdown(&mut self) {} - #[cfg(feature = "logs_level_enabled")] + #[cfg(feature = "spec_unstable_logs_enabled")] /// Chek if logs are enabled. fn event_enabled(&self, _level: Severity, _target: &str, _name: &str) -> bool { // By default, all logs are enabled diff --git a/opentelemetry-sdk/src/export/mod.rs b/opentelemetry-sdk/src/export/mod.rs index c59a7028a5..21dc2b570c 100644 --- a/opentelemetry-sdk/src/export/mod.rs +++ b/opentelemetry-sdk/src/export/mod.rs @@ -8,4 +8,8 @@ pub mod logs; #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] pub mod trace; -pub use opentelemetry::ExportError; +/// Trait for errors returned by exporters +pub trait ExportError: std::error::Error + Send + Sync + 'static { + /// The name of exporter that returned this error + fn exporter_name(&self) -> &'static str; +} diff --git a/opentelemetry-sdk/src/export/trace.rs b/opentelemetry-sdk/src/export/trace.rs index 4b43e00c36..c606d85b1a 100644 --- a/opentelemetry-sdk/src/export/trace.rs +++ b/opentelemetry-sdk/src/export/trace.rs @@ -2,7 +2,7 @@ use crate::Resource; use futures_util::future::BoxFuture; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status, TraceError}; -use opentelemetry::KeyValue; +use opentelemetry::{InstrumentationScope, KeyValue}; use std::borrow::Cow; use std::fmt::Debug; use std::time::SystemTime; @@ -95,6 +95,6 @@ pub struct SpanData { pub links: crate::trace::SpanLinks, /// Span status pub status: Status, - /// Instrumentation library that produced this span - pub instrumentation_lib: crate::InstrumentationLibrary, + /// Instrumentation scope that produced this span + pub instrumentation_scope: InstrumentationScope, } diff --git a/opentelemetry-sdk/src/instrumentation.rs b/opentelemetry-sdk/src/instrumentation.rs deleted file mode 100644 index 75a74efa73..0000000000 --- a/opentelemetry-sdk/src/instrumentation.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub use opentelemetry::InstrumentationLibrary; - -/// A logical unit of the application code with which the emitted telemetry can -/// be associated. -pub type Scope = InstrumentationLibrary; diff --git a/opentelemetry-sdk/src/lib.rs b/opentelemetry-sdk/src/lib.rs index 5ce2be9474..4afda7deb7 100644 --- a/opentelemetry-sdk/src/lib.rs +++ b/opentelemetry-sdk/src/lib.rs @@ -1,6 +1,6 @@ //! Implements the [`SDK`] component of [OpenTelemetry]. //! -//! *Compiler support: [requires `rustc` 1.65+][msrv]* +//! *[Supported Rust Versions](#supported-rust-versions)* //! //! [`SDK`]: https://opentelemetry.io/docs/specs/otel/overview/#sdk //! [OpenTelemetry]: https://opentelemetry.io/docs/what-is-opentelemetry/ @@ -30,7 +30,7 @@ //! }); //! //! // Shutdown trace pipeline -//! global::shutdown_tracer_provider(); +//! provider.shutdown().expect("TracerProvider should shutdown successfully") //! # } //! } //! # } @@ -60,7 +60,7 @@ //! let meter = global::meter("my_service"); //! //! // create an instrument -//! let counter = meter.u64_counter("my_counter").init(); +//! let counter = meter.u64_counter("my_counter").build(); //! //! // record a measurement //! counter.add(1, &[KeyValue::new("http.client_ip", "83.164.160.102")]); @@ -89,7 +89,7 @@ //! //! For `logs` the following feature flags are available: //! -//! * `logs_level_enabled`: control the log level +//! * `spec_unstable_logs_enabled`: control the log level //! //! Support for recording and exporting telemetry asynchronously and perform //! metrics aggregation can be added via the following flags: @@ -122,7 +122,7 @@ pub mod export; pub(crate) mod growable_array; -mod instrumentation; + #[cfg(feature = "logs")] #[cfg_attr(docsrs, doc(cfg(feature = "logs")))] pub mod logs; @@ -146,6 +146,7 @@ pub mod trace; #[doc(hidden)] pub mod util; -pub use instrumentation::{InstrumentationLibrary, Scope}; #[doc(inline)] pub use resource::Resource; + +pub mod error; diff --git a/opentelemetry-sdk/src/logs/error.rs b/opentelemetry-sdk/src/logs/error.rs new file mode 100644 index 0000000000..4f33ba6dbf --- /dev/null +++ b/opentelemetry-sdk/src/logs/error.rs @@ -0,0 +1,63 @@ +use crate::export::ExportError; + +use std::{sync::PoisonError, time::Duration}; +use thiserror::Error; + +/// Describe the result of operations in log SDK. +pub type LogResult = Result; + +#[derive(Error, Debug)] +#[non_exhaustive] +/// Errors returned by the log SDK. +pub enum LogError { + /// Export failed with the error returned by the exporter. + #[error("Exporter {0} encountered the following errors: {name}", name = .0.exporter_name())] + ExportFailed(Box), + + /// Export failed to finish after certain period and processor stopped the export. + #[error("Exporter timed out after {} seconds", .0.as_secs())] + ExportTimedOut(Duration), + + /// Processor is already shutdown + #[error("{0} already shutdown")] + AlreadyShutdown(String), + + /// Mutex lock poisoning + #[error("mutex lock poisioning for {0}")] + MutexPoisoned(String), + + /// Other errors propagated from log SDK that weren't covered above. + #[error(transparent)] + Other(#[from] Box), +} + +impl From for LogError +where + T: ExportError, +{ + fn from(err: T) -> Self { + LogError::ExportFailed(Box::new(err)) + } +} + +impl From for LogError { + fn from(err_msg: String) -> Self { + LogError::Other(Box::new(Custom(err_msg))) + } +} + +impl From<&'static str> for LogError { + fn from(err_msg: &'static str) -> Self { + LogError::Other(Box::new(Custom(err_msg.into()))) + } +} + +impl From> for LogError { + fn from(err: PoisonError) -> Self { + LogError::Other(err.to_string().into()) + } +} +/// Wrap type for string +#[derive(Error, Debug)] +#[error("{0}")] +struct Custom(String); diff --git a/opentelemetry-sdk/src/logs/log_emitter.rs b/opentelemetry-sdk/src/logs/log_emitter.rs index 7463b19059..479ca36dd2 100644 --- a/opentelemetry-sdk/src/logs/log_emitter.rs +++ b/opentelemetry-sdk/src/logs/log_emitter.rs @@ -1,82 +1,78 @@ use super::{BatchLogProcessor, LogProcessor, LogRecord, SimpleLogProcessor, TraceContext}; -use crate::{export::logs::LogExporter, runtime::RuntimeChannel, Resource}; -use opentelemetry::{ - global, - logs::{LogError, LogResult}, - trace::TraceContextExt, - Context, InstrumentationLibrary, -}; +use crate::{export::logs::LogExporter, Resource}; +use crate::{logs::LogError, logs::LogResult}; +use opentelemetry::{otel_debug, otel_info, trace::TraceContextExt, Context, InstrumentationScope}; -#[cfg(feature = "logs_level_enabled")] +#[cfg(feature = "spec_unstable_logs_enabled")] use opentelemetry::logs::Severity; +use std::time::SystemTime; use std::{ borrow::Cow, - sync::{atomic::Ordering, Arc}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, OnceLock, + }, }; -use std::{sync::atomic::AtomicBool, time::SystemTime}; - -use once_cell::sync::Lazy; // a no nop logger provider used as placeholder when the provider is shutdown -static NOOP_LOGGER_PROVIDER: Lazy = Lazy::new(|| LoggerProvider { - inner: Arc::new(LoggerProviderInner { - processors: Vec::new(), - resource: Resource::empty(), - }), - is_shutdown: Arc::new(AtomicBool::new(true)), -}); +// TODO - replace it with LazyLock once it is stable +static NOOP_LOGGER_PROVIDER: OnceLock = OnceLock::new(); + +#[inline] +fn noop_logger_provider() -> &'static LoggerProvider { + NOOP_LOGGER_PROVIDER.get_or_init(|| LoggerProvider { + inner: Arc::new(LoggerProviderInner { + processors: Vec::new(), + resource: Resource::empty(), + is_shutdown: AtomicBool::new(true), + }), + }) +} #[derive(Debug, Clone)] -/// Creator for `Logger` instances. +/// Handles the creation and coordination of [`Logger`]s. +/// +/// All `Logger`s created by a `LoggerProvider` will share the same +/// [`Resource`] and have their created log records processed by the +/// configured log processors. This is a clonable handle to the `LoggerProvider` +/// itself, and cloning it will create a new reference, not a new instance of a +/// `LoggerProvider`. Dropping the last reference will trigger the shutdown of +/// the provider, ensuring that all remaining logs are flushed and no further +/// logs are processed. Shutdown can also be triggered manually by calling +/// the [`shutdown`](LoggerProvider::shutdown) method. +/// +/// [`Logger`]: opentelemetry::logs::Logger +/// [`Resource`]: crate::Resource pub struct LoggerProvider { inner: Arc, - is_shutdown: Arc, } -/// Default logger name if empty string is provided. -const DEFAULT_COMPONENT_NAME: &str = "rust.opentelemetry.io/sdk/logger"; - impl opentelemetry::logs::LoggerProvider for LoggerProvider { type Logger = Logger; - /// Create a new versioned `Logger` instance. - fn versioned_logger( - &self, - name: impl Into>, - version: Option>, - schema_url: Option>, - attributes: Option>, - ) -> Logger { - let name = name.into(); - - let component_name = if name.is_empty() { - Cow::Borrowed(DEFAULT_COMPONENT_NAME) - } else { - name - }; - - let mut builder = self.logger_builder(component_name); - - if let Some(v) = version { - builder = builder.with_version(v); - } - if let Some(s) = schema_url { - builder = builder.with_schema_url(s); - } - if let Some(a) = attributes { - builder = builder.with_attributes(a); - } - - builder.build() + fn logger(&self, name: impl Into>) -> Self::Logger { + let scope = InstrumentationScope::builder(name).build(); + self.logger_with_scope(scope) } - fn library_logger(&self, library: Arc) -> Self::Logger { + fn logger_with_scope(&self, scope: InstrumentationScope) -> Self::Logger { // If the provider is shutdown, new logger will refer a no-op logger provider. - if self.is_shutdown.load(Ordering::Relaxed) { - return Logger::new(library, NOOP_LOGGER_PROVIDER.clone()); + if self.inner.is_shutdown.load(Ordering::Relaxed) { + otel_debug!( + name: "LoggerProvider.NoOpLoggerReturned", + logger_name = scope.name(), + ); + return Logger::new(scope, noop_logger_provider().clone()); } - Logger::new(library, self.clone()) + if scope.name().is_empty() { + otel_info!(name: "LoggerNameEmpty", message = "Logger name is empty; consider providing a meaningful name. Logger will function normally and the provided name will be used as-is."); + }; + otel_debug!( + name: "LoggerProvider.NewLoggerReturned", + logger_name = scope.name(), + ); + Logger::new(scope, self.clone()) } } @@ -104,27 +100,24 @@ impl LoggerProvider { /// Shuts down this `LoggerProvider` pub fn shutdown(&self) -> LogResult<()> { + otel_debug!( + name: "LoggerProvider.ShutdownInvokedByUser", + ); if self + .inner .is_shutdown .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) .is_ok() { // propagate the shutdown signal to processors - // it's up to the processor to properly block new logs after shutdown - let mut errs = vec![]; - for processor in &self.inner.processors { - if let Err(err) = processor.shutdown() { - errs.push(err); - } - } - + let errs = self.inner.shutdown(); if errs.is_empty() { Ok(()) } else { Err(LogError::Other(format!("{errs:?}").into())) } } else { - Err(LogError::Other("logger provider already shut down".into())) + Err(LogError::AlreadyShutdown("LoggerProvider".to_string())) } } } @@ -133,15 +126,54 @@ impl LoggerProvider { struct LoggerProviderInner { processors: Vec>, resource: Resource, + is_shutdown: AtomicBool, } -impl Drop for LoggerProviderInner { - fn drop(&mut self) { - for processor in &mut self.processors { +impl LoggerProviderInner { + /// Shuts down the `LoggerProviderInner` and returns any errors. + pub(crate) fn shutdown(&self) -> Vec { + let mut errs = vec![]; + for processor in &self.processors { if let Err(err) = processor.shutdown() { - global::handle_error(err); + // Log at debug level because: + // - The error is also returned to the user for handling (if applicable) + // - Or the error occurs during `LoggerProviderInner::Drop` as part of telemetry shutdown, + // which is non-actionable by the user + match err { + // specific handling for mutex poisioning + LogError::MutexPoisoned(_) => { + otel_debug!( + name: "LoggerProvider.Drop.ShutdownMutexPoisoned", + ); + } + _ => { + otel_debug!( + name: "LoggerProvider.Drop.ShutdownError", + error = format!("{err}") + ); + } + } + errs.push(err); } } + errs + } +} + +impl Drop for LoggerProviderInner { + fn drop(&mut self) { + if !self.is_shutdown.load(Ordering::Relaxed) { + otel_info!( + name: "LoggerProvider.Drop", + message = "Last reference of LoggerProvider dropped, initiating shutdown." + ); + let _ = self.shutdown(); // errors are handled within shutdown + } else { + otel_debug!( + name: "LoggerProvider.Drop.AlreadyShutdown", + message = "LoggerProvider was already shut down; drop will not attempt shutdown again." + ); + } } } @@ -156,18 +188,14 @@ impl Builder { /// The `LogExporter` that this provider should use. pub fn with_simple_exporter(self, exporter: T) -> Self { let mut processors = self.processors; - processors.push(Box::new(SimpleLogProcessor::new(Box::new(exporter)))); + processors.push(Box::new(SimpleLogProcessor::new(exporter))); Builder { processors, ..self } } /// The `LogExporter` setup using a default `BatchLogProcessor` that this provider should use. - pub fn with_batch_exporter( - self, - exporter: T, - runtime: R, - ) -> Self { - let batch = BatchLogProcessor::builder(exporter, runtime).build(); + pub fn with_batch_exporter(self, exporter: T) -> Self { + let batch = BatchLogProcessor::builder(exporter).build(); self.with_log_processor(batch) } @@ -189,20 +217,24 @@ impl Builder { /// Create a new provider from this configuration. pub fn build(self) -> LoggerProvider { - let resource = self.resource.unwrap_or_default(); + let resource = self.resource.unwrap_or(Resource::builder().build()); let logger_provider = LoggerProvider { inner: Arc::new(LoggerProviderInner { processors: self.processors, resource, + is_shutdown: AtomicBool::new(false), }), - is_shutdown: Arc::new(AtomicBool::new(false)), }; // invoke set_resource on all the processors for processor in logger_provider.log_processors() { processor.set_resource(logger_provider.resource()); } + + otel_debug!( + name: "LoggerProvider.Built", + ); logger_provider } } @@ -212,29 +244,18 @@ impl Builder { /// /// [`LogRecord`]: opentelemetry::logs::LogRecord pub struct Logger { - instrumentation_lib: Arc, + scope: InstrumentationScope, provider: LoggerProvider, } impl Logger { - pub(crate) fn new( - instrumentation_lib: Arc, - provider: LoggerProvider, - ) -> Self { - Logger { - instrumentation_lib, - provider, - } - } - - /// LoggerProvider associated with this logger. - pub fn provider(&self) -> &LoggerProvider { - &self.provider + pub(crate) fn new(scope: InstrumentationScope, provider: LoggerProvider) -> Self { + Logger { scope, provider } } - /// Instrumentation library information of this logger. - pub fn instrumentation_library(&self) -> &InstrumentationLibrary { - &self.instrumentation_lib + #[cfg(test)] + pub(crate) fn instrumentation_scope(&self) -> &InstrumentationScope { + &self.scope } } @@ -247,38 +268,36 @@ impl opentelemetry::logs::Logger for Logger { /// Emit a `LogRecord`. fn emit(&self, mut record: Self::LogRecord) { - let provider = self.provider(); + let provider = &self.provider; let processors = provider.log_processors(); - let trace_context = Context::map_current(|cx| { - cx.has_active_span() - .then(|| TraceContext::from(cx.span().span_context())) - }); //let mut log_record = record; - if let Some(ref trace_context) = trace_context { - record.trace_context = Some(trace_context.clone()); + if record.trace_context.is_none() { + let trace_context = Context::map_current(|cx| { + cx.has_active_span() + .then(|| TraceContext::from(cx.span().span_context())) + }); + + if let Some(ref trace_context) = trace_context { + record.trace_context = Some(trace_context.clone()); + } } if record.observed_timestamp.is_none() { record.observed_timestamp = Some(SystemTime::now()); } for p in processors { - p.emit(&mut record, self.instrumentation_library()); + p.emit(&mut record, &self.scope); } } - #[cfg(feature = "logs_level_enabled")] + #[cfg(feature = "spec_unstable_logs_enabled")] fn event_enabled(&self, level: Severity, target: &str) -> bool { - let provider = self.provider(); + let provider = &self.provider; let mut enabled = false; for processor in provider.log_processors() { - enabled = enabled - || processor.event_enabled( - level, - target, - self.instrumentation_library().name.as_ref(), - ); + enabled = enabled || processor.event_enabled(level, target, self.scope.name().as_ref()); } enabled } @@ -286,13 +305,18 @@ impl opentelemetry::logs::Logger for Logger { #[cfg(test)] mod tests { - use crate::resource::{ - SERVICE_NAME, TELEMETRY_SDK_LANGUAGE, TELEMETRY_SDK_NAME, TELEMETRY_SDK_VERSION, + use crate::{ + resource::{ + SERVICE_NAME, TELEMETRY_SDK_LANGUAGE, TELEMETRY_SDK_NAME, TELEMETRY_SDK_VERSION, + }, + testing::logs::InMemoryLogExporter, + trace::TracerProvider, + Resource, }; - use crate::Resource; use super::*; - use opentelemetry::logs::{Logger, LoggerProvider as _}; + use opentelemetry::logs::{AnyValue, LogRecord as _, Logger as _, LoggerProvider as _}; + use opentelemetry::trace::{SpanId, TraceId, Tracer as _, TracerProvider as _}; use opentelemetry::{Key, KeyValue, Value}; use std::fmt::{Debug, Formatter}; use std::sync::atomic::AtomicU64; @@ -320,7 +344,7 @@ mod tests { } impl LogProcessor for ShutdownTestLogProcessor { - fn emit(&self, _data: &mut LogRecord, _library: &InstrumentationLibrary) { + fn emit(&self, _data: &mut LogRecord, _scope: &InstrumentationScope) { self.is_shutdown .lock() .map(|is_shutdown| { @@ -385,10 +409,11 @@ mod tests { // If user provided a resource, use that. let custom_config_provider = super::LoggerProvider::builder() - .with_resource(Resource::new(vec![KeyValue::new( - SERVICE_NAME, - "test_service", - )])) + .with_resource( + Resource::builder_empty() + .with_service_name("test_service") + .build(), + ) .build(); assert_resource(&custom_config_provider, SERVICE_NAME, Some("test_service")); assert_eq!(custom_config_provider.resource().len(), 1); @@ -417,10 +442,14 @@ mod tests { Some("my-custom-key=env-val,k2=value2"), || { let user_provided_resource_config_provider = super::LoggerProvider::builder() - .with_resource(Resource::default().merge(&mut Resource::new(vec![ - KeyValue::new("my-custom-key", "my-custom-value"), - KeyValue::new("my-custom-key2", "my-custom-value2"), - ]))) + .with_resource( + Resource::builder() + .with_attributes([ + KeyValue::new("my-custom-key", "my-custom-value"), + KeyValue::new("my-custom-key2", "my-custom-value2"), + ]) + .build(), + ) .build(); assert_resource( &user_provided_resource_config_provider, @@ -454,6 +483,72 @@ mod tests { assert_eq!(no_service_name.resource().len(), 0); } + #[test] + fn trace_context_test() { + let exporter = InMemoryLogExporter::default(); + + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + + let logger = logger_provider.logger("test-logger"); + + let tracer_provider = TracerProvider::builder().build(); + + let tracer = tracer_provider.tracer("test-tracer"); + + tracer.in_span("test-span", |cx| { + let ambient_ctxt = cx.span().span_context().clone(); + let explicit_ctxt = TraceContext { + trace_id: TraceId::from_u128(13), + span_id: SpanId::from_u64(14), + trace_flags: None, + }; + + let mut ambient_ctxt_record = logger.create_log_record(); + ambient_ctxt_record.set_body(AnyValue::String("ambient".into())); + + let mut explicit_ctxt_record = logger.create_log_record(); + explicit_ctxt_record.set_body(AnyValue::String("explicit".into())); + explicit_ctxt_record.set_trace_context( + explicit_ctxt.trace_id, + explicit_ctxt.span_id, + explicit_ctxt.trace_flags, + ); + + logger.emit(ambient_ctxt_record); + logger.emit(explicit_ctxt_record); + + let emitted = exporter.get_emitted_logs().unwrap(); + + assert_eq!( + Some(AnyValue::String("ambient".into())), + emitted[0].record.body + ); + assert_eq!( + ambient_ctxt.trace_id(), + emitted[0].record.trace_context.as_ref().unwrap().trace_id + ); + assert_eq!( + ambient_ctxt.span_id(), + emitted[0].record.trace_context.as_ref().unwrap().span_id + ); + + assert_eq!( + Some(AnyValue::String("explicit".into())), + emitted[1].record.body + ); + assert_eq!( + explicit_ctxt.trace_id, + emitted[1].record.trace_context.as_ref().unwrap().trace_id + ); + assert_eq!( + explicit_ctxt.span_id, + emitted[1].record.trace_context.as_ref().unwrap().span_id + ); + }); + } + #[test] fn shutdown_test() { let counter = Arc::new(AtomicU64::new(0)); @@ -531,6 +626,125 @@ mod tests { assert!(!*flush_called.lock().unwrap()); } + #[test] + fn drop_test_with_multiple_providers() { + let shutdown_called = Arc::new(Mutex::new(false)); + let flush_called = Arc::new(Mutex::new(false)); + { + // Create a shared LoggerProviderInner and use it across multiple providers + let shared_inner = Arc::new(LoggerProviderInner { + processors: vec![Box::new(LazyLogProcessor::new( + shutdown_called.clone(), + flush_called.clone(), + ))], + resource: Resource::empty(), + is_shutdown: AtomicBool::new(false), + }); + + { + let logger_provider1 = LoggerProvider { + inner: shared_inner.clone(), + }; + let logger_provider2 = LoggerProvider { + inner: shared_inner.clone(), + }; + + let logger1 = logger_provider1.logger("test-logger1"); + let logger2 = logger_provider2.logger("test-logger2"); + + logger1.emit(logger1.create_log_record()); + logger2.emit(logger1.create_log_record()); + + // LoggerProviderInner should not be dropped yet, since both providers and `shared_inner` + // are still holding a reference. + } + // At this point, both `logger_provider1` and `logger_provider2` are dropped, + // but `shared_inner` still holds a reference, so `LoggerProviderInner` is NOT dropped yet. + } + // Verify shutdown was called during the drop of the shared LoggerProviderInner + assert!(*shutdown_called.lock().unwrap()); + // Verify flush was not called during drop + assert!(!*flush_called.lock().unwrap()); + } + + #[test] + fn drop_after_shutdown_test_with_multiple_providers() { + let shutdown_called = Arc::new(Mutex::new(0)); // Count the number of times shutdown is called + let flush_called = Arc::new(Mutex::new(false)); + + // Create a shared LoggerProviderInner and use it across multiple providers + let shared_inner = Arc::new(LoggerProviderInner { + processors: vec![Box::new(CountingShutdownProcessor::new( + shutdown_called.clone(), + flush_called.clone(), + ))], + resource: Resource::empty(), + is_shutdown: AtomicBool::new(false), + }); + + // Create a scope to test behavior when providers are dropped + { + let logger_provider1 = LoggerProvider { + inner: shared_inner.clone(), + }; + let logger_provider2 = LoggerProvider { + inner: shared_inner.clone(), + }; + + // Explicitly shut down the logger provider + let shutdown_result = logger_provider1.shutdown(); + assert!(shutdown_result.is_ok()); + + // Verify that shutdown was called exactly once + assert_eq!(*shutdown_called.lock().unwrap(), 1); + + // LoggerProvider2 should observe the shutdown state but not trigger another shutdown + let shutdown_result2 = logger_provider2.shutdown(); + assert!(shutdown_result2.is_err()); + + // Both logger providers will be dropped at the end of this scope + } + + // Verify that shutdown was only called once, even after drop + assert_eq!(*shutdown_called.lock().unwrap(), 1); + } + + #[test] + fn test_empty_logger_name() { + let exporter = InMemoryLogExporter::default(); + let logger_provider = LoggerProvider::builder() + .with_simple_exporter(exporter.clone()) + .build(); + let logger = logger_provider.logger(""); + let mut record = logger.create_log_record(); + record.set_body("Testing empty logger name".into()); + logger.emit(record); + + // Create a logger using a scope with an empty name + let scope = InstrumentationScope::builder("").build(); + let scoped_logger = logger_provider.logger_with_scope(scope); + let mut scoped_record = scoped_logger.create_log_record(); + scoped_record.set_body("Testing empty logger scope name".into()); + scoped_logger.emit(scoped_record); + + // Assert: Verify that the emitted logs are processed correctly + let emitted_logs = exporter.get_emitted_logs().unwrap(); + assert_eq!(emitted_logs.len(), 2); + // Assert the first log + assert_eq!( + emitted_logs[0].clone().record.body, + Some(AnyValue::String("Testing empty logger name".into())) + ); + assert_eq!(logger.scope.name(), ""); + + // Assert the second log created through the scope + assert_eq!( + emitted_logs[1].clone().record.body, + Some(AnyValue::String("Testing empty logger scope name".into())) + ); + assert_eq!(scoped_logger.scope.name(), ""); + } + #[derive(Debug)] pub(crate) struct LazyLogProcessor { shutdown_called: Arc>, @@ -550,7 +764,7 @@ mod tests { } impl LogProcessor for LazyLogProcessor { - fn emit(&self, _data: &mut LogRecord, _library: &InstrumentationLibrary) { + fn emit(&self, _data: &mut LogRecord, _scope: &InstrumentationScope) { // nothing to do. } @@ -564,4 +778,36 @@ mod tests { Ok(()) } } + + #[derive(Debug)] + struct CountingShutdownProcessor { + shutdown_count: Arc>, + flush_called: Arc>, + } + + impl CountingShutdownProcessor { + fn new(shutdown_count: Arc>, flush_called: Arc>) -> Self { + CountingShutdownProcessor { + shutdown_count, + flush_called, + } + } + } + + impl LogProcessor for CountingShutdownProcessor { + fn emit(&self, _data: &mut LogRecord, _scope: &InstrumentationScope) { + // nothing to do + } + + fn force_flush(&self) -> LogResult<()> { + *self.flush_called.lock().unwrap() = true; + Ok(()) + } + + fn shutdown(&self) -> LogResult<()> { + let mut count = self.shutdown_count.lock().unwrap(); + *count += 1; + Ok(()) + } + } } diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index b90214521e..dab1dde2ac 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -1,28 +1,33 @@ use crate::{ export::logs::{ExportResult, LogBatch, LogExporter}, - logs::LogRecord, - runtime::{RuntimeChannel, TrySend}, + logs::{LogError, LogRecord, LogResult}, Resource, }; -use futures_channel::oneshot; -use futures_util::{ - future::{self, Either}, - {pin_mut, stream, StreamExt as _}, -}; -#[cfg(feature = "logs_level_enabled")] +use std::sync::mpsc::{self, RecvTimeoutError, SyncSender}; + +#[cfg(feature = "spec_unstable_logs_enabled")] use opentelemetry::logs::Severity; -use opentelemetry::{ - global, - logs::{LogError, LogResult}, - InstrumentationLibrary, -}; -use std::sync::atomic::AtomicBool; +use opentelemetry::{otel_debug, otel_error, otel_warn, InstrumentationScope}; + +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::{cmp::min, env, sync::Mutex}; use std::{ fmt::{self, Debug, Formatter}, str::FromStr, sync::Arc, + thread, time::Duration, + time::Instant, +}; + +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +use crate::runtime::{RuntimeChannel, TrySend}; +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +use futures_channel::oneshot; +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +use futures_util::{ + future::{self, Either}, + {pin_mut, stream, StreamExt as _}, }; /// Delay interval between two consecutive exports. @@ -48,23 +53,23 @@ const OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT: usize = 512; pub trait LogProcessor: Send + Sync + Debug { /// Called when a log record is ready to processed and exported. /// - /// This method receives a mutable reference to `LogData`. If the processor + /// This method receives a mutable reference to `LogRecord`. If the processor /// needs to handle the export asynchronously, it should clone the data to /// ensure it can be safely processed without lifetime issues. Any changes /// made to the log data in this method will be reflected in the next log /// processor in the chain. /// /// # Parameters - /// - `record`: A mutable reference to `LogData` representing the log record. - /// - `instrumentation`: The instrumentation library associated with the log record. - fn emit(&self, data: &mut LogRecord, instrumentation: &InstrumentationLibrary); + /// - `record`: A mutable reference to `LogRecord` representing the log record. + /// - `instrumentation`: The instrumentation scope associated with the log record. + fn emit(&self, data: &mut LogRecord, instrumentation: &InstrumentationScope); /// Force the logs lying in the cache to be exported. fn force_flush(&self) -> LogResult<()>; /// Shuts down the processor. /// After shutdown returns the log processor should stop processing any logs. /// It's up to the implementation on when to drop the LogProcessor. fn shutdown(&self) -> LogResult<()>; - #[cfg(feature = "logs_level_enabled")] + #[cfg(feature = "spec_unstable_logs_enabled")] /// Check if logging is enabled fn event_enabled(&self, _level: Severity, _target: &str, _name: &str) -> bool { // By default, all logs are enabled @@ -80,13 +85,13 @@ pub trait LogProcessor: Send + Sync + Debug { /// debugging and testing. For scenarios requiring higher /// performance/throughput, consider using [BatchLogProcessor]. #[derive(Debug)] -pub struct SimpleLogProcessor { - exporter: Mutex>, +pub struct SimpleLogProcessor { + exporter: Mutex, is_shutdown: AtomicBool, } -impl SimpleLogProcessor { - pub(crate) fn new(exporter: Box) -> Self { +impl SimpleLogProcessor { + pub(crate) fn new(exporter: T) -> Self { SimpleLogProcessor { exporter: Mutex::new(exporter), is_shutdown: AtomicBool::new(false), @@ -94,23 +99,40 @@ impl SimpleLogProcessor { } } -impl LogProcessor for SimpleLogProcessor { - fn emit(&self, record: &mut LogRecord, instrumentation: &InstrumentationLibrary) { +impl LogProcessor for SimpleLogProcessor { + fn emit(&self, record: &mut LogRecord, instrumentation: &InstrumentationScope) { // noop after shutdown if self.is_shutdown.load(std::sync::atomic::Ordering::Relaxed) { + // this is a warning, as the user is trying to log after the processor has been shutdown + otel_warn!( + name: "SimpleLogProcessor.Emit.ProcessorShutdown", + ); return; } let result = self .exporter .lock() - .map_err(|_| LogError::Other("simple logprocessor mutex poison".into())) - .and_then(|mut exporter| { + .map_err(|_| LogError::MutexPoisoned("SimpleLogProcessor".into())) + .and_then(|exporter| { let log_tuple = &[(record as &LogRecord, instrumentation)]; futures_executor::block_on(exporter.export(LogBatch::new(log_tuple))) }); - if let Err(err) = result { - global::handle_error(err); + // Handle errors with specific static names + match result { + Err(LogError::MutexPoisoned(_)) => { + // logging as debug as this is not a user error + otel_debug!( + name: "SimpleLogProcessor.Emit.MutexPoisoning", + ); + } + Err(err) => { + otel_error!( + name: "SimpleLogProcessor.Emit.ExportError", + error = format!("{}",err) + ); + } + _ => {} } } @@ -125,9 +147,7 @@ impl LogProcessor for SimpleLogProcessor { exporter.shutdown(); Ok(()) } else { - Err(LogError::Other( - "simple logprocessor mutex poison during shutdown".into(), - )) + Err(LogError::MutexPoisoned("SimpleLogProcessor".into())) } } @@ -138,13 +158,358 @@ impl LogProcessor for SimpleLogProcessor { } } +/// Messages sent between application thread and batch log processor's work thread. +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +enum BatchMessage { + /// Export logs, usually called when the log is emitted. + ExportLog(Box<(LogRecord, InstrumentationScope)>), + /// Flush the current buffer to the backend, it can be triggered by + /// pre configured interval or a call to `force_push` function. + // Flush(Option>), + /// ForceFlush flushes the current buffer to the backend. + ForceFlush(mpsc::SyncSender), + /// Shut down the worker thread, push all logs in buffer to the backend. + Shutdown(mpsc::SyncSender), + /// Set the resource for the exporter. + SetResource(Arc), +} + +/// A [`LogProcessor`] that buffers log records and reports +/// them at a pre-configured interval from a dedicated background thread. +pub struct BatchLogProcessor { + message_sender: SyncSender, + handle: Mutex>>, + forceflush_timeout: Duration, + shutdown_timeout: Duration, + is_shutdown: AtomicBool, + + // Track dropped logs - we'll log this at shutdown + dropped_logs_count: AtomicUsize, + + // Track the maximum queue size that was configured for this processor + max_queue_size: usize, +} + +impl Debug for BatchLogProcessor { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("BatchLogProcessor") + .field("message_sender", &self.message_sender) + .finish() + } +} + +impl LogProcessor for BatchLogProcessor { + fn emit(&self, record: &mut LogRecord, instrumentation: &InstrumentationScope) { + // noop after shutdown + if self.is_shutdown.load(std::sync::atomic::Ordering::Relaxed) { + otel_warn!( + name: "BatchLogProcessor.Emit.ProcessorShutdown", + message = "BatchLogProcessor has been shutdown. No further logs will be emitted." + ); + return; + } + + let result = self + .message_sender + .try_send(BatchMessage::ExportLog(Box::new(( + record.clone(), + instrumentation.clone(), + )))); + + // TODO - Implement throttling to prevent error flooding when the queue is full or closed. + if result.is_err() { + // Increment dropped logs count. The first time we have to drop a log, + // emit a warning. + if self.dropped_logs_count.fetch_add(1, Ordering::Relaxed) == 0 { + otel_warn!(name: "BatchLogProcessor.LogDroppingStarted", + message = "BatchLogProcessor dropped a LogRecord due to queue full/internal errors. No further log will be emitted for further drops until Shutdown. During Shutdown time, a log will be emitted with exact count of total logs dropped."); + } + } + } + + fn force_flush(&self) -> LogResult<()> { + if self.is_shutdown.load(std::sync::atomic::Ordering::Relaxed) { + return LogResult::Err(LogError::Other( + "BatchLogProcessor is already shutdown".into(), + )); + } + let (sender, receiver) = mpsc::sync_channel(1); + self.message_sender + .try_send(BatchMessage::ForceFlush(sender)) + .map_err(|err| LogError::Other(err.into()))?; + + receiver + .recv_timeout(self.forceflush_timeout) + .map_err(|err| { + if err == RecvTimeoutError::Timeout { + LogError::ExportTimedOut(self.forceflush_timeout) + } else { + LogError::Other(err.into()) + } + })? + } + + fn shutdown(&self) -> LogResult<()> { + // test and set is_shutdown flag if it is not set + if self + .is_shutdown + .swap(true, std::sync::atomic::Ordering::Relaxed) + { + otel_warn!( + name: "BatchLogProcessor.Shutdown.ProcessorShutdown", + message = "BatchLogProcessor has been shutdown. No further logs will be emitted." + ); + return LogResult::Err(LogError::AlreadyShutdown( + "BatchLogProcessor is already shutdown".into(), + )); + } + + let dropped_logs = self.dropped_logs_count.load(Ordering::Relaxed); + let max_queue_size = self.max_queue_size; + if dropped_logs > 0 { + otel_warn!( + name: "BatchLogProcessor.LogsDropped", + dropped_logs_count = dropped_logs, + max_queue_size = max_queue_size, + message = "Logs were dropped due to a queue being full or other error. The count represents the total count of log records dropped in the lifetime of this BatchLogProcessor. Consider increasing the queue size and/or decrease delay between intervals." + ); + } + + let (sender, receiver) = mpsc::sync_channel(1); + self.message_sender + .try_send(BatchMessage::Shutdown(sender)) + .map_err(|err| LogError::Other(err.into()))?; + + receiver + .recv_timeout(self.shutdown_timeout) + .map(|_| { + // join the background thread after receiving back the shutdown signal + if let Some(handle) = self.handle.lock().unwrap().take() { + handle.join().unwrap(); + } + LogResult::Ok(()) + }) + .map_err(|err| match err { + RecvTimeoutError::Timeout => { + otel_error!( + name: "BatchLogProcessor.Shutdown.Timeout", + message = "BatchLogProcessor shutdown timing out." + ); + LogError::ExportTimedOut(self.shutdown_timeout) + } + _ => { + otel_error!( + name: "BatchLogProcessor.Shutdown.Error", + error = format!("{}", err) + ); + LogError::Other(err.into()) + } + })? + } + + fn set_resource(&self, resource: &Resource) { + let resource = Arc::new(resource.clone()); + let _ = self + .message_sender + .try_send(BatchMessage::SetResource(resource)); + } +} + +impl BatchLogProcessor { + pub(crate) fn new(mut exporter: Box, config: BatchConfig) -> Self { + let (message_sender, message_receiver) = mpsc::sync_channel(config.max_queue_size); + let max_queue_size = config.max_queue_size; + + let handle = thread::Builder::new() + .name("OpenTelemetry.Logs.BatchProcessor".to_string()) + .spawn(move || { + let mut last_export_time = Instant::now(); + let mut logs = Vec::new(); + logs.reserve(config.max_export_batch_size); + + loop { + let remaining_time_option = config + .scheduled_delay + .checked_sub(last_export_time.elapsed()); + let remaining_time = match remaining_time_option { + Some(remaining_time) => remaining_time, + None => config.scheduled_delay, + }; + + match message_receiver.recv_timeout(remaining_time) { + Ok(BatchMessage::ExportLog(log)) => { + logs.push(log); + if logs.len() == config.max_export_batch_size + || last_export_time.elapsed() >= config.scheduled_delay + { + let _ = export_with_timeout_sync( + remaining_time, + exporter.as_mut(), + logs.split_off(0), + &mut last_export_time, + ); + } + } + Ok(BatchMessage::ForceFlush(sender)) => { + let result = export_with_timeout_sync( + remaining_time, + exporter.as_mut(), + logs.split_off(0), + &mut last_export_time, + ); + let _ = sender.send(result); + } + Ok(BatchMessage::Shutdown(sender)) => { + let result = export_with_timeout_sync( + remaining_time, + exporter.as_mut(), + logs.split_off(0), + &mut last_export_time, + ); + let _ = sender.send(result); + + // + // break out the loop and return from the current background thread. + // + break; + } + Ok(BatchMessage::SetResource(resource)) => { + exporter.set_resource(&resource); + } + Err(RecvTimeoutError::Timeout) => { + let _ = export_with_timeout_sync( + remaining_time, + exporter.as_mut(), + logs.split_off(0), + &mut last_export_time, + ); + } + Err(err) => { + // TODO: this should not happen! Log the error and continue for now. + otel_error!( + name: "BatchLogProcessor.InternalError", + error = format!("{}", err) + ); + } + } + } + }) + .expect("Thread spawn failed."); //TODO: Handle thread spawn failure + + // Return batch processor with link to worker + BatchLogProcessor { + message_sender, + handle: Mutex::new(Some(handle)), + forceflush_timeout: Duration::from_secs(5), // TODO: make this configurable + shutdown_timeout: Duration::from_secs(5), // TODO: make this configurable + is_shutdown: AtomicBool::new(false), + dropped_logs_count: AtomicUsize::new(0), + max_queue_size, + } + } + + /// Create a new batch processor builder + pub fn builder(exporter: E) -> BatchLogProcessorBuilder + where + E: LogExporter, + { + BatchLogProcessorBuilder { + exporter, + config: Default::default(), + } + } +} + +#[allow(clippy::vec_box)] +fn export_with_timeout_sync( + _: Duration, // TODO, enforcing timeout in exporter. + exporter: &mut E, + batch: Vec>, + last_export_time: &mut Instant, +) -> ExportResult +where + E: LogExporter + ?Sized, +{ + *last_export_time = Instant::now(); + + if batch.is_empty() { + return LogResult::Ok(()); + } + + let log_vec: Vec<(&LogRecord, &InstrumentationScope)> = batch + .iter() + .map(|log_data| (&log_data.0, &log_data.1)) + .collect(); + let export = exporter.export(LogBatch::new(log_vec.as_slice())); + let export_result = futures_executor::block_on(export); + + match export_result { + Ok(_) => LogResult::Ok(()), + Err(err) => { + otel_error!( + name: "BatchLogProcessor.ExportError", + error = format!("{}", err) + ); + LogResult::Err(err) + } + } +} + +/// +/// A builder for creating [`BatchLogProcessor`] instances. +/// +#[derive(Debug)] +pub struct BatchLogProcessorBuilder { + exporter: E, + config: BatchConfig, +} + +impl BatchLogProcessorBuilder +where + E: LogExporter + 'static, +{ + /// Set the BatchConfig for [`BatchLogProcessorBuilder`] + pub fn with_batch_config(self, config: BatchConfig) -> Self { + BatchLogProcessorBuilder { config, ..self } + } + + /// Build a batch processor + pub fn build(self) -> BatchLogProcessor { + BatchLogProcessor::new(Box::new(self.exporter), self.config) + } +} + +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +enum BatchMessageWithAsyncRuntime { + /// Export logs, usually called when the log is emitted. + ExportLog((LogRecord, InstrumentationScope)), + /// Flush the current buffer to the backend, it can be triggered by + /// pre configured interval or a call to `force_push` function. + Flush(Option>), + /// Shut down the worker thread, push all logs in buffer to the backend. + Shutdown(oneshot::Sender), + /// Set the resource for the exporter. + SetResource(Arc), +} + /// A [`LogProcessor`] that asynchronously buffers log records and reports /// them at a pre-configured interval. -pub struct BatchLogProcessor { - message_sender: R::Sender, +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +pub struct BatchLogProcessorWithAsyncRuntime { + message_sender: R::Sender, + + // Track dropped logs - we'll log this at shutdown + dropped_logs_count: AtomicUsize, + + // Track the maximum queue size that was configured for this processor + max_queue_size: usize, } -impl Debug for BatchLogProcessor { +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +impl Debug for BatchLogProcessorWithAsyncRuntime { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("BatchLogProcessor") .field("message_sender", &self.message_sender) @@ -152,22 +517,31 @@ impl Debug for BatchLogProcessor { } } -impl LogProcessor for BatchLogProcessor { - fn emit(&self, record: &mut LogRecord, instrumentation: &InstrumentationLibrary) { - let result = self.message_sender.try_send(BatchMessage::ExportLog(( - record.clone(), - instrumentation.clone(), - ))); - - if let Err(err) = result { - global::handle_error(LogError::Other(err.into())); +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +impl LogProcessor for BatchLogProcessorWithAsyncRuntime { + fn emit(&self, record: &mut LogRecord, instrumentation: &InstrumentationScope) { + let result = self + .message_sender + .try_send(BatchMessageWithAsyncRuntime::ExportLog(( + record.clone(), + instrumentation.clone(), + ))); + + // TODO - Implement throttling to prevent error flooding when the queue is full or closed. + if result.is_err() { + // Increment dropped logs count. The first time we have to drop a log, + // emit a warning. + if self.dropped_logs_count.fetch_add(1, Ordering::Relaxed) == 0 { + otel_warn!(name: "BatchLogProcessor.LogDroppingStarted", + message = "BatchLogProcessor dropped a LogRecord due to queue full/internal errors. No further log will be emitted for further drops until Shutdown. During Shutdown time, a log will be emitted with exact count of total logs dropped."); + } } } fn force_flush(&self) -> LogResult<()> { let (res_sender, res_receiver) = oneshot::channel(); self.message_sender - .try_send(BatchMessage::Flush(Some(res_sender))) + .try_send(BatchMessageWithAsyncRuntime::Flush(Some(res_sender))) .map_err(|err| LogError::Other(err.into()))?; futures_executor::block_on(res_receiver) @@ -176,9 +550,20 @@ impl LogProcessor for BatchLogProcessor { } fn shutdown(&self) -> LogResult<()> { + let dropped_logs = self.dropped_logs_count.load(Ordering::Relaxed); + let max_queue_size = self.max_queue_size; + if dropped_logs > 0 { + otel_warn!( + name: "BatchLogProcessor.LogsDropped", + dropped_logs_count = dropped_logs, + max_queue_size = max_queue_size, + message = "Logs were dropped due to a queue being full or other error. The count represents the total count of log records dropped in the lifetime of this BatchLogProcessor. Consider increasing the queue size and/or decrease delay between intervals." + ); + } + let (res_sender, res_receiver) = oneshot::channel(); self.message_sender - .try_send(BatchMessage::Shutdown(res_sender)) + .try_send(BatchMessageWithAsyncRuntime::Shutdown(res_sender)) .map_err(|err| LogError::Other(err.into()))?; futures_executor::block_on(res_receiver) @@ -190,15 +575,17 @@ impl LogProcessor for BatchLogProcessor { let resource = Arc::new(resource.clone()); let _ = self .message_sender - .try_send(BatchMessage::SetResource(resource)); + .try_send(BatchMessageWithAsyncRuntime::SetResource(resource)); } } -impl BatchLogProcessor { +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +impl BatchLogProcessorWithAsyncRuntime { pub(crate) fn new(mut exporter: Box, config: BatchConfig, runtime: R) -> Self { let (message_sender, message_receiver) = runtime.batch_message_channel(config.max_queue_size); let inner_runtime = runtime.clone(); + let max_queue_size = config.max_queue_size; // Spawn worker process via user-defined spawn function. runtime.spawn(Box::pin(async move { @@ -207,7 +594,7 @@ impl BatchLogProcessor { let ticker = inner_runtime .interval(config.scheduled_delay) .skip(1) // The ticker is fired immediately, so we should skip the first one to align with the interval. - .map(|_| BatchMessage::Flush(None)); + .map(|_| BatchMessageWithAsyncRuntime::Flush(None)); let timeout_runtime = inner_runtime.clone(); let mut logs = Vec::new(); let mut messages = Box::pin(stream::select(message_receiver, ticker)); @@ -215,9 +602,8 @@ impl BatchLogProcessor { while let Some(message) = messages.next().await { match message { // Log has finished, add to buffer of pending logs. - BatchMessage::ExportLog(log) => { + BatchMessageWithAsyncRuntime::ExportLog(log) => { logs.push(log); - if logs.len() == config.max_export_batch_size { let result = export_with_timeout( config.max_export_timeout, @@ -228,12 +614,15 @@ impl BatchLogProcessor { .await; if let Err(err) = result { - global::handle_error(err); + otel_error!( + name: "BatchLogProcessor.Export.Error", + error = format!("{}", err) + ); } } } // Log batch interval time reached or a force flush has been invoked, export current spans. - BatchMessage::Flush(res_channel) => { + BatchMessageWithAsyncRuntime::Flush(res_channel) => { let result = export_with_timeout( config.max_export_timeout, exporter.as_mut(), @@ -243,18 +632,16 @@ impl BatchLogProcessor { .await; if let Some(channel) = res_channel { - if let Err(result) = channel.send(result) { - global::handle_error(LogError::from(format!( - "failed to send flush result: {:?}", - result - ))); + if let Err(send_error) = channel.send(result) { + otel_debug!( + name: "BatchLogProcessor.Flush.SendResultError", + error = format!("{:?}", send_error), + ); } - } else if let Err(err) = result { - global::handle_error(err); } } // Stream has terminated or processor is shutdown, return to finish execution. - BatchMessage::Shutdown(ch) => { + BatchMessageWithAsyncRuntime::Shutdown(ch) => { let result = export_with_timeout( config.max_export_timeout, exporter.as_mut(), @@ -265,18 +652,16 @@ impl BatchLogProcessor { exporter.shutdown(); - if let Err(result) = ch.send(result) { - global::handle_error(LogError::from(format!( - "failed to send batch processor shutdown result: {:?}", - result - ))); + if let Err(send_error) = ch.send(result) { + otel_debug!( + name: "BatchLogProcessor.Shutdown.SendResultError", + error = format!("{:?}", send_error), + ); } - break; } - // propagate the resource - BatchMessage::SetResource(resource) => { + BatchMessageWithAsyncRuntime::SetResource(resource) => { exporter.set_resource(&resource); } } @@ -284,15 +669,19 @@ impl BatchLogProcessor { })); // Return batch processor with link to worker - BatchLogProcessor { message_sender } + BatchLogProcessorWithAsyncRuntime { + message_sender, + dropped_logs_count: AtomicUsize::new(0), + max_queue_size, + } } /// Create a new batch processor builder - pub fn builder(exporter: E, runtime: R) -> BatchLogProcessorBuilder + pub fn builder(exporter: E, runtime: R) -> BatchLogProcessorWithAsyncRuntimeBuilder where E: LogExporter, { - BatchLogProcessorBuilder { + BatchLogProcessorWithAsyncRuntimeBuilder { exporter, config: Default::default(), runtime, @@ -300,11 +689,12 @@ impl BatchLogProcessor { } } +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] async fn export_with_timeout( time_out: Duration, exporter: &mut E, runtime: &R, - batch: Vec<(LogRecord, InstrumentationLibrary)>, + batch: Vec<(LogRecord, InstrumentationScope)>, ) -> ExportResult where R: RuntimeChannel, @@ -315,7 +705,7 @@ where } // TBD - Can we avoid this conversion as it involves heap allocation with new vector? - let log_vec: Vec<(&LogRecord, &InstrumentationLibrary)> = batch + let log_vec: Vec<(&LogRecord, &InstrumentationScope)> = batch .iter() .map(|log_data| (&log_data.0, &log_data.1)) .collect(); @@ -332,6 +722,7 @@ where /// Batch log processor configuration. /// Use [`BatchConfigBuilder`] to configure your own instance of [`BatchConfig`]. #[derive(Debug)] +#[allow(dead_code)] pub struct BatchConfig { /// The maximum queue size to buffer logs for delayed processing. If the /// queue gets full it drops the logs. The default value of is 2048. @@ -469,46 +860,33 @@ impl BatchConfigBuilder { } } -/// A builder for creating [`BatchLogProcessor`] instances. +/// A builder for creating [`BatchLogProcessorWithAsyncRuntime`] instances. /// #[derive(Debug)] -pub struct BatchLogProcessorBuilder { +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +pub struct BatchLogProcessorWithAsyncRuntimeBuilder { exporter: E, config: BatchConfig, runtime: R, } -impl BatchLogProcessorBuilder +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +impl BatchLogProcessorWithAsyncRuntimeBuilder where E: LogExporter + 'static, R: RuntimeChannel, { - /// Set the BatchConfig for [`BatchLogProcessorBuilder`] + /// Set the BatchConfig for [`BatchLogProcessorWithAsyncRuntimeBuilder`] pub fn with_batch_config(self, config: BatchConfig) -> Self { - BatchLogProcessorBuilder { config, ..self } + BatchLogProcessorWithAsyncRuntimeBuilder { config, ..self } } /// Build a batch processor - pub fn build(self) -> BatchLogProcessor { - BatchLogProcessor::new(Box::new(self.exporter), self.config, self.runtime) + pub fn build(self) -> BatchLogProcessorWithAsyncRuntime { + BatchLogProcessorWithAsyncRuntime::new(Box::new(self.exporter), self.config, self.runtime) } } -/// Messages sent between application thread and batch log processor's work thread. -#[allow(clippy::large_enum_variant)] -#[derive(Debug)] -enum BatchMessage { - /// Export logs, usually called when the log is emitted. - ExportLog((LogRecord, InstrumentationLibrary)), - /// Flush the current buffer to the backend, it can be triggered by - /// pre configured interval or a call to `force_push` function. - Flush(Option>), - /// Shut down the worker thread, push all logs in buffer to the backend. - Shutdown(oneshot::Sender), - /// Set the resource for the exporter. - SetResource(Arc), -} - #[cfg(all(test, feature = "testing", feature = "logs"))] mod tests { use super::{ @@ -517,7 +895,8 @@ mod tests { }; use crate::export::logs::{LogBatch, LogExporter}; use crate::logs::LogRecord; - use crate::testing::logs::InMemoryLogsExporterBuilder; + use crate::logs::LogResult; + use crate::testing::logs::InMemoryLogExporterBuilder; use crate::{ logs::{ log_processor::{ @@ -526,20 +905,24 @@ mod tests { }, BatchConfig, BatchConfigBuilder, LogProcessor, LoggerProvider, SimpleLogProcessor, }, - runtime, - testing::logs::InMemoryLogsExporter, + testing::logs::InMemoryLogExporter, Resource, }; use async_trait::async_trait; use opentelemetry::logs::AnyValue; use opentelemetry::logs::LogRecord as _; use opentelemetry::logs::{Logger, LoggerProvider as _}; - use opentelemetry::InstrumentationLibrary; - use opentelemetry::Key; - use opentelemetry::{logs::LogResult, KeyValue}; + use opentelemetry::KeyValue; + use opentelemetry::{InstrumentationScope, Key}; + use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + use super::BatchLogProcessorWithAsyncRuntime; + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + use crate::runtime; + #[derive(Debug, Clone)] struct MockLogExporter { resource: Arc>>, @@ -547,7 +930,7 @@ mod tests { #[async_trait] impl LogExporter for MockLogExporter { - async fn export(&mut self, _batch: LogBatch<'_>) -> LogResult<()> { + async fn export(&self, _batch: LogBatch<'_>) -> LogResult<()> { Ok(()) } @@ -673,8 +1056,7 @@ mod tests { (OTEL_BLRP_EXPORT_TIMEOUT, Some("2046")), ]; temp_env::with_vars(env_vars.clone(), || { - let builder = - BatchLogProcessor::builder(InMemoryLogsExporter::default(), runtime::Tokio); + let builder = BatchLogProcessor::builder(InMemoryLogExporter::default()); assert_eq!(builder.config.max_export_batch_size, 500); assert_eq!( @@ -694,8 +1076,7 @@ mod tests { env_vars.push((OTEL_BLRP_MAX_QUEUE_SIZE, Some("120"))); temp_env::with_vars(env_vars, || { - let builder = - BatchLogProcessor::builder(InMemoryLogsExporter::default(), runtime::Tokio); + let builder = BatchLogProcessor::builder(InMemoryLogExporter::default()); assert_eq!(builder.config.max_export_batch_size, 120); assert_eq!(builder.config.max_queue_size, 120); }); @@ -710,8 +1091,8 @@ mod tests { .with_max_queue_size(4) .build(); - let builder = BatchLogProcessor::builder(InMemoryLogsExporter::default(), runtime::Tokio) - .with_batch_config(expected); + let builder = + BatchLogProcessor::builder(InMemoryLogExporter::default()).with_batch_config(expected); let actual = &builder.config; assert_eq!(actual.max_export_batch_size, 1); @@ -725,16 +1106,20 @@ mod tests { let exporter = MockLogExporter { resource: Arc::new(Mutex::new(None)), }; - let processor = SimpleLogProcessor::new(Box::new(exporter.clone())); + let processor = SimpleLogProcessor::new(exporter.clone()); let _ = LoggerProvider::builder() .with_log_processor(processor) - .with_resource(Resource::new(vec![ - KeyValue::new("k1", "v1"), - KeyValue::new("k2", "v3"), - KeyValue::new("k3", "v3"), - KeyValue::new("k4", "v4"), - KeyValue::new("k5", "v5"), - ])) + .with_resource( + Resource::builder_empty() + .with_attributes([ + KeyValue::new("k1", "v1"), + KeyValue::new("k2", "v3"), + KeyValue::new("k3", "v3"), + KeyValue::new("k4", "v4"), + KeyValue::new("k5", "v5"), + ]) + .build(), + ) .build(); assert_eq!(exporter.get_resource().unwrap().into_iter().count(), 5); } @@ -744,22 +1129,25 @@ mod tests { let exporter = MockLogExporter { resource: Arc::new(Mutex::new(None)), }; - let processor = BatchLogProcessor::new( - Box::new(exporter.clone()), - BatchConfig::default(), - runtime::Tokio, - ); + let processor = BatchLogProcessor::new(Box::new(exporter.clone()), BatchConfig::default()); let provider = LoggerProvider::builder() .with_log_processor(processor) - .with_resource(Resource::new(vec![ - KeyValue::new("k1", "v1"), - KeyValue::new("k2", "v3"), - KeyValue::new("k3", "v3"), - KeyValue::new("k4", "v4"), - KeyValue::new("k5", "v5"), - ])) + .with_resource( + Resource::builder_empty() + .with_attributes([ + KeyValue::new("k1", "v1"), + KeyValue::new("k2", "v3"), + KeyValue::new("k3", "v3"), + KeyValue::new("k4", "v4"), + KeyValue::new("k5", "v5"), + ]) + .build(), + ) .build(); - tokio::time::sleep(Duration::from_secs(2)).await; // set resource in batch span processor is not blocking. Should we make it blocking? + + // wait for the batch processor to process the resource. + tokio::time::sleep(Duration::from_millis(100)).await; + assert_eq!(exporter.get_resource().unwrap().into_iter().count(), 5); let _ = provider.shutdown(); } @@ -768,17 +1156,13 @@ mod tests { async fn test_batch_shutdown() { // assert we will receive an error // setup - let exporter = InMemoryLogsExporterBuilder::default() + let exporter = InMemoryLogExporterBuilder::default() .keep_records_on_shutdown() .build(); - let processor = BatchLogProcessor::new( - Box::new(exporter.clone()), - BatchConfig::default(), - runtime::Tokio, - ); + let processor = BatchLogProcessor::new(Box::new(exporter.clone()), BatchConfig::default()); - let mut record: LogRecord = Default::default(); - let instrumentation: InstrumentationLibrary = Default::default(); + let mut record = LogRecord::default(); + let instrumentation = InstrumentationScope::default(); processor.emit(&mut record, &instrumentation); processor.force_flush().unwrap(); @@ -790,13 +1174,13 @@ mod tests { #[test] fn test_simple_shutdown() { - let exporter = InMemoryLogsExporterBuilder::default() + let exporter = InMemoryLogExporterBuilder::default() .keep_records_on_shutdown() .build(); - let processor = SimpleLogProcessor::new(Box::new(exporter.clone())); + let processor = SimpleLogProcessor::new(exporter.clone()); let mut record: LogRecord = Default::default(); - let instrumentation: InstrumentationLibrary = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); processor.emit(&mut record, &instrumentation); @@ -812,13 +1196,63 @@ mod tests { assert_eq!(1, exporter.get_emitted_logs().unwrap().len()) } + #[tokio::test(flavor = "current_thread")] + async fn test_batch_log_processor_shutdown_under_async_runtime_current_flavor_multi_thread() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = BatchLogProcessor::new(Box::new(exporter.clone()), BatchConfig::default()); + + processor.shutdown().unwrap(); + } + + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + #[tokio::test(flavor = "current_thread")] + #[ignore = "See issue https://github.com/open-telemetry/opentelemetry-rust/issues/1968"] + async fn test_batch_log_processor_with_async_runtime_shutdown_under_async_runtime_current_flavor_multi_thread( + ) { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = BatchLogProcessorWithAsyncRuntime::new( + Box::new(exporter.clone()), + BatchConfig::default(), + runtime::Tokio, + ); + + // + // deadloack happens in shutdown with tokio current_thread runtime + // + processor.shutdown().unwrap(); + } + + #[tokio::test(flavor = "current_thread")] + async fn test_batch_log_processor_shutdown_with_async_runtime_current_flavor_current_thread() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = BatchLogProcessor::new(Box::new(exporter.clone()), BatchConfig::default()); + + processor.shutdown().unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_batch_log_processor_shutdown_with_async_runtime_multi_flavor_multi_thread() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = BatchLogProcessor::new(Box::new(exporter.clone()), BatchConfig::default()); + + processor.shutdown().unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_batch_log_processor_shutdown_with_async_runtime_multi_flavor_current_thread() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = BatchLogProcessor::new(Box::new(exporter.clone()), BatchConfig::default()); + + processor.shutdown().unwrap(); + } + #[derive(Debug)] struct FirstProcessor { - pub(crate) logs: Arc>>, + pub(crate) logs: Arc>>, } impl LogProcessor for FirstProcessor { - fn emit(&self, record: &mut LogRecord, instrumentation: &InstrumentationLibrary) { + fn emit(&self, record: &mut LogRecord, instrumentation: &InstrumentationScope) { // add attribute record.add_attribute( Key::from_static_str("processed_by"), @@ -849,11 +1283,11 @@ mod tests { #[derive(Debug)] struct SecondProcessor { - pub(crate) logs: Arc>>, + pub(crate) logs: Arc>>, } impl LogProcessor for SecondProcessor { - fn emit(&self, record: &mut LogRecord, instrumentation: &InstrumentationLibrary) { + fn emit(&self, record: &mut LogRecord, instrumentation: &InstrumentationScope) { assert!(record.attributes_contains( &Key::from_static_str("processed_by"), &AnyValue::String("FirstProcessor".into()) @@ -984,4 +1418,232 @@ mod tests { == AnyValue::String("Updated by FirstProcessor".into()) ); } + + #[test] + fn test_simple_processor_sync_exporter_without_runtime() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: LogRecord = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.get_emitted_logs().unwrap().len(), 1); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn test_simple_processor_sync_exporter_with_runtime() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: LogRecord = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.get_emitted_logs().unwrap().len(), 1); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_simple_processor_sync_exporter_with_multi_thread_runtime() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = Arc::new(SimpleLogProcessor::new(exporter.clone())); + + let mut handles = vec![]; + for _ in 0..10 { + let processor_clone = Arc::clone(&processor); + let handle = tokio::spawn(async move { + let mut record: LogRecord = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); + processor_clone.emit(&mut record, &instrumentation); + }); + handles.push(handle); + } + + for handle in handles { + handle.await.unwrap(); + } + + assert_eq!(exporter.get_emitted_logs().unwrap().len(), 10); + } + + #[tokio::test(flavor = "current_thread")] + async fn test_simple_processor_sync_exporter_with_current_thread_runtime() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: LogRecord = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.get_emitted_logs().unwrap().len(), 1); + } + + #[derive(Debug, Clone)] + struct LogExporterThatRequiresTokio { + export_count: Arc, + } + + impl LogExporterThatRequiresTokio { + /// Creates a new instance of `LogExporterThatRequiresTokio`. + fn new() -> Self { + LogExporterThatRequiresTokio { + export_count: Arc::new(AtomicUsize::new(0)), + } + } + + /// Returns the number of logs stored in the exporter. + fn len(&self) -> usize { + self.export_count.load(Ordering::Acquire) + } + } + + #[async_trait::async_trait] + impl LogExporter for LogExporterThatRequiresTokio { + async fn export(&self, batch: LogBatch<'_>) -> LogResult<()> { + // Simulate minimal dependency on tokio by sleeping asynchronously for a short duration + tokio::time::sleep(Duration::from_millis(50)).await; + + for _ in batch.iter() { + self.export_count.fetch_add(1, Ordering::Acquire); + } + Ok(()) + } + } + + #[test] + fn test_simple_processor_async_exporter_without_runtime() { + // Use `catch_unwind` to catch the panic caused by missing Tokio runtime + let result = std::panic::catch_unwind(|| { + let exporter = LogExporterThatRequiresTokio::new(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: LogRecord = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); + + // This will panic because an tokio async operation within exporter without a runtime. + processor.emit(&mut record, &instrumentation); + }); + + // Verify that the panic occurred and check the panic message for the absence of a Tokio runtime + assert!( + result.is_err(), + "The test should fail due to missing Tokio runtime, but it did not." + ); + let panic_payload = result.unwrap_err(); + let panic_message = panic_payload + .downcast_ref::() + .map(|s| s.as_str()) + .or_else(|| panic_payload.downcast_ref::<&str>().copied()) + .unwrap_or("No panic message"); + + assert!( + panic_message.contains("no reactor running") + || panic_message.contains("must be called from the context of a Tokio 1.x runtime"), + "Expected panic message about missing Tokio runtime, but got: {}", + panic_message + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + #[ignore] + // This test demonstrates a potential deadlock scenario in a multi-threaded Tokio runtime. + // It spawns Tokio tasks equal to the number of runtime worker threads (4) to emit log events. + // Each task attempts to acquire a mutex on the exporter in `SimpleLogProcessor::emit`. + // Only one task obtains the lock, while the others are blocked, waiting for its release. + // + // The task holding the lock invokes the LogExporterThatRequiresTokio, which performs an + // asynchronous operation (e.g., network I/O simulated by `tokio::sleep`). This operation + // requires yielding control back to the Tokio runtime to make progress. + // + // However, all worker threads are occupied: + // - One thread is executing the async exporter operation + // - Three threads are blocked waiting for the mutex + // + // This leads to a deadlock as there are no available threads to drive the async operation + // to completion, preventing the mutex from being released. Consequently, neither the blocked + // tasks nor the exporter can proceed. + async fn test_simple_processor_async_exporter_with_all_runtime_worker_threads_blocked() { + let exporter = LogExporterThatRequiresTokio::new(); + let processor = Arc::new(SimpleLogProcessor::new(exporter.clone())); + + let concurrent_emit = 4; // number of worker threads + + let mut handles = vec![]; + // try send `concurrent_emit` events concurrently + for _ in 0..concurrent_emit { + let processor_clone = Arc::clone(&processor); + let handle = tokio::spawn(async move { + let mut record: LogRecord = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); + processor_clone.emit(&mut record, &instrumentation); + }); + handles.push(handle); + } + + // below code won't get executed + for handle in handles { + handle.await.unwrap(); + } + assert_eq!(exporter.len(), concurrent_emit); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + // This test uses a multi-threaded runtime setup with a single worker thread. Note that even + // though only one worker thread is created, it is distinct from the main thread. The processor + // emits a log event, and the exporter performs an async operation that requires the runtime. + // The single worker thread handles this operation without deadlocking, as long as no other + // tasks occupy the runtime. + async fn test_simple_processor_async_exporter_with_runtime() { + let exporter = LogExporterThatRequiresTokio::new(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: LogRecord = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.len(), 1); + } + + #[tokio::test(flavor = "multi_thread")] + // This test uses a multi-threaded runtime setup with the default number of worker threads. + // The processor emits a log event, and the exporter, which requires the runtime for its async + // operations, can access one of the available worker threads to complete its task. As there + // are multiple threads, the exporter can proceed without blocking other tasks, ensuring the + // test completes successfully. + async fn test_simple_processor_async_exporter_with_multi_thread_runtime() { + let exporter = LogExporterThatRequiresTokio::new(); + + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: LogRecord = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.len(), 1); + } + + #[tokio::test(flavor = "current_thread")] + #[ignore] + // This test uses a current-thread runtime, where all operations run on the main thread. + // The processor emits a log event while the runtime is blocked using `futures::block_on` + // to complete the export operation. The exporter, which performs an async operation and + // requires the runtime, cannot progress because the main thread is already blocked. + // This results in a deadlock, as the runtime cannot move forward. + async fn test_simple_processor_async_exporter_with_current_thread_runtime() { + let exporter = LogExporterThatRequiresTokio::new(); + + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: LogRecord = Default::default(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.len(), 1); + } } diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs index 59b1b9b338..b1341b9e96 100644 --- a/opentelemetry-sdk/src/logs/mod.rs +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -1,8 +1,10 @@ //! # OpenTelemetry Log SDK +mod error; mod log_emitter; mod log_processor; pub(crate) mod record; +pub use error::{LogError, LogResult}; pub use log_emitter::{Builder, Logger, LoggerProvider}; pub use log_processor::{ BatchConfig, BatchConfigBuilder, BatchLogProcessor, BatchLogProcessorBuilder, LogProcessor, @@ -10,23 +12,19 @@ pub use log_processor::{ }; pub use record::{LogRecord, TraceContext}; -use opentelemetry::InstrumentationLibrary; -/// `LogData` represents a single log event without resource context. -#[derive(Clone, Debug)] -pub struct LogData { - /// Log record - pub record: LogRecord, - /// Instrumentation details for the emitter who produced this `LogEvent`. - pub instrumentation: InstrumentationLibrary, -} +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +pub use log_processor::{ + BatchLogProcessorWithAsyncRuntime, BatchLogProcessorWithAsyncRuntimeBuilder, +}; #[cfg(all(test, feature = "testing"))] mod tests { use super::*; - use crate::testing::logs::InMemoryLogsExporter; + use crate::testing::logs::InMemoryLogExporter; use crate::Resource; use opentelemetry::logs::LogRecord; use opentelemetry::logs::{Logger, LoggerProvider as _, Severity}; + use opentelemetry::InstrumentationScope; use opentelemetry::{logs::AnyValue, Key, KeyValue}; use std::borrow::Borrow; use std::collections::HashMap; @@ -34,16 +32,18 @@ mod tests { #[test] fn logging_sdk_test() { // Arrange - let resource = Resource::new(vec![ - KeyValue::new("k1", "v1"), - KeyValue::new("k2", "v2"), - KeyValue::new("k3", "v3"), - KeyValue::new("k4", "v4"), - ]); - let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); + let resource = Resource::builder_empty() + .with_attributes([ + KeyValue::new("k1", "v1"), + KeyValue::new("k2", "v2"), + KeyValue::new("k3", "v3"), + KeyValue::new("k4", "v4"), + ]) + .build(); + let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); let logger_provider = LoggerProvider::builder() .with_resource(resource.clone()) - .with_log_processor(SimpleLogProcessor::new(Box::new(exporter.clone()))) + .with_log_processor(SimpleLogProcessor::new(exporter.clone())) .build(); // Act @@ -87,7 +87,7 @@ mod tests { let log = exported_logs .first() .expect("Atleast one log is expected to be present."); - assert_eq!(log.instrumentation.name, "test-logger"); + assert_eq!(log.instrumentation.name(), "test-logger"); assert_eq!(log.record.severity_number, Some(Severity::Error)); assert_eq!(log.record.attributes_len(), 10); for i in 1..=10 { @@ -102,44 +102,23 @@ mod tests { } #[test] + #[allow(deprecated)] fn logger_attributes() { let provider = LoggerProvider::builder().build(); - let logger = provider - .logger_builder("test_logger") + let scope = InstrumentationScope::builder("test_logger") .with_schema_url("https://opentelemetry.io/schema/1.0.0") .with_attributes(vec![(KeyValue::new("test_k", "test_v"))]) .build(); - let instrumentation_library = logger.instrumentation_library(); - let attributes = &instrumentation_library.attributes; - assert_eq!(instrumentation_library.name, "test_logger"); - assert_eq!( - instrumentation_library.schema_url, - Some("https://opentelemetry.io/schema/1.0.0".into()) - ); - assert_eq!(attributes.len(), 1); - assert_eq!(attributes[0].key, "test_k".into()); - assert_eq!(attributes[0].value, "test_v".into()); - } - #[test] - #[allow(deprecated)] - fn versioned_logger_options() { - let provider = LoggerProvider::builder().build(); - let logger = provider.versioned_logger( - "test_logger", - Some("v1.2.3".into()), - Some("https://opentelemetry.io/schema/1.0.0".into()), - Some(vec![(KeyValue::new("test_k", "test_v"))]), - ); - let instrumentation_library = logger.instrumentation_library(); - let attributes = &instrumentation_library.attributes; - assert_eq!(instrumentation_library.version, Some("v1.2.3".into())); + let logger = provider.logger_with_scope(scope); + let instrumentation_scope = logger.instrumentation_scope(); + assert_eq!(instrumentation_scope.name(), "test_logger"); assert_eq!( - instrumentation_library.schema_url, - Some("https://opentelemetry.io/schema/1.0.0".into()) + instrumentation_scope.schema_url(), + Some("https://opentelemetry.io/schema/1.0.0") ); - assert_eq!(attributes.len(), 1); - assert_eq!(attributes[0].key, "test_k".into()); - assert_eq!(attributes[0].value, "test_v".into()); + assert!(instrumentation_scope + .attributes() + .eq(&[KeyValue::new("test_k", "test_v")])); } } diff --git a/opentelemetry-sdk/src/logs/record.rs b/opentelemetry-sdk/src/logs/record.rs index fc815074fd..3524f977cd 100644 --- a/opentelemetry-sdk/src/logs/record.rs +++ b/opentelemetry-sdk/src/logs/record.rs @@ -25,28 +25,28 @@ pub(crate) type LogRecordAttributes = /// is provided to `LogExporter`s as input. pub struct LogRecord { /// Event name. Optional as not all the logging API support it. - pub event_name: Option<&'static str>, + pub(crate) event_name: Option<&'static str>, /// Target of the log record - pub target: Option>, + pub(crate) target: Option>, /// Record timestamp - pub timestamp: Option, + pub(crate) timestamp: Option, /// Timestamp for when the record was observed by OpenTelemetry - pub observed_timestamp: Option, + pub(crate) observed_timestamp: Option, /// Trace context for logs associated with spans - pub trace_context: Option, + pub(crate) trace_context: Option, /// The original severity string from the source - pub severity_text: Option<&'static str>, + pub(crate) severity_text: Option<&'static str>, /// The corresponding severity value, normalized - pub severity_number: Option, + pub(crate) severity_number: Option, /// Record body - pub body: Option, + pub(crate) body: Option, /// Additional attributes associated with this record pub(crate) attributes: LogRecordAttributes, @@ -103,10 +103,72 @@ impl opentelemetry::logs::LogRecord for LogRecord { { self.attributes.push(Some((key.into(), value.into()))); } + + fn set_trace_context( + &mut self, + trace_id: TraceId, + span_id: SpanId, + trace_flags: Option, + ) { + self.trace_context = Some(TraceContext { + trace_id, + span_id, + trace_flags, + }); + } } impl LogRecord { + /// Returns the event name + #[inline] + pub fn event_name(&self) -> Option<&'static str> { + self.event_name + } + + /// Returns the target + #[inline] + pub fn target(&self) -> Option<&Cow<'static, str>> { + self.target.as_ref() + } + + /// Returns the timestamp + #[inline] + pub fn timestamp(&self) -> Option { + self.timestamp + } + + /// Returns the observed timestamp + #[inline] + pub fn observed_timestamp(&self) -> Option { + self.observed_timestamp + } + + /// Returns the trace context + #[inline] + pub fn trace_context(&self) -> Option<&TraceContext> { + self.trace_context.as_ref() + } + + /// Returns the severity text + #[inline] + pub fn severity_text(&self) -> Option<&'static str> { + self.severity_text + } + + /// Returns the severity number + #[inline] + pub fn severity_number(&self) -> Option { + self.severity_number + } + + /// Returns the body + #[inline] + pub fn body(&self) -> Option<&AnyValue> { + self.body.as_ref() + } + /// Provides an iterator over the attributes. + #[inline] pub fn attributes_iter(&self) -> impl Iterator { self.attributes.iter().filter_map(|opt| opt.as_ref()) } @@ -217,7 +279,7 @@ impl From<&SpanContext> for TraceContext { } } -#[cfg(test)] +#[cfg(all(test, feature = "testing"))] mod tests { use super::*; use opentelemetry::logs::{AnyValue, Severity}; diff --git a/opentelemetry-sdk/src/metrics/aggregation.rs b/opentelemetry-sdk/src/metrics/aggregation.rs index 561aa00c4d..1788b6b264 100644 --- a/opentelemetry-sdk/src/metrics/aggregation.rs +++ b/opentelemetry-sdk/src/metrics/aggregation.rs @@ -1,7 +1,7 @@ use std::fmt; use crate::metrics::internal::{EXPO_MAX_SCALE, EXPO_MIN_SCALE}; -use opentelemetry::metrics::{MetricsError, Result}; +use crate::metrics::{MetricError, MetricResult}; /// The way recorded measurements are summarized. #[derive(Clone, Debug, PartialEq)] @@ -109,7 +109,7 @@ impl fmt::Display for Aggregation { impl Aggregation { /// Validate that this aggregation has correct configuration - pub fn validate(&self) -> Result<()> { + pub fn validate(&self) -> MetricResult<()> { match self { Aggregation::Drop => Ok(()), Aggregation::Default => Ok(()), @@ -118,7 +118,7 @@ impl Aggregation { Aggregation::ExplicitBucketHistogram { boundaries, .. } => { for x in boundaries.windows(2) { if x[0] >= x[1] { - return Err(MetricsError::Config(format!( + return Err(MetricError::Config(format!( "aggregation: explicit bucket histogram: non-monotonic boundaries: {:?}", boundaries, ))); @@ -129,13 +129,13 @@ impl Aggregation { } Aggregation::Base2ExponentialHistogram { max_scale, .. } => { if *max_scale > EXPO_MAX_SCALE { - return Err(MetricsError::Config(format!( + return Err(MetricError::Config(format!( "aggregation: exponential histogram: max scale ({}) is greater than 20", max_scale, ))); } if *max_scale < EXPO_MIN_SCALE { - return Err(MetricsError::Config(format!( + return Err(MetricError::Config(format!( "aggregation: exponential histogram: max scale ({}) is less than -10", max_scale, ))); @@ -153,17 +153,17 @@ mod tests { internal::{EXPO_MAX_SCALE, EXPO_MIN_SCALE}, Aggregation, }; - use opentelemetry::metrics::{MetricsError, Result}; + use crate::metrics::{MetricError, MetricResult}; #[test] fn validate_aggregation() { struct TestCase { name: &'static str, input: Aggregation, - check: Box) -> bool>, + check: Box) -> bool>, } - let ok = Box::new(|result: Result<()>| result.is_ok()); - let config_error = Box::new(|result| matches!(result, Err(MetricsError::Config(_)))); + let ok = Box::new(|result: MetricResult<()>| result.is_ok()); + let config_error = Box::new(|result| matches!(result, Err(MetricError::Config(_)))); let test_cases: Vec = vec![ TestCase { diff --git a/opentelemetry-sdk/src/metrics/data/mod.rs b/opentelemetry-sdk/src/metrics/data/mod.rs index 64a1879d26..b1b198d73c 100644 --- a/opentelemetry-sdk/src/metrics/data/mod.rs +++ b/opentelemetry-sdk/src/metrics/data/mod.rs @@ -2,28 +2,26 @@ use std::{any, borrow::Cow, fmt, time::SystemTime}; -use opentelemetry::KeyValue; +use opentelemetry::{InstrumentationScope, KeyValue}; -use crate::{instrumentation::Scope, Resource}; +use crate::Resource; -pub use self::temporality::Temporality; - -mod temporality; +use super::Temporality; /// A collection of [ScopeMetrics] and the associated [Resource] that created them. #[derive(Debug)] pub struct ResourceMetrics { /// The entity that collected the metrics. pub resource: Resource, - /// The collection of metrics with unique [Scope]s. + /// The collection of metrics with unique [InstrumentationScope]s. pub scope_metrics: Vec, } /// A collection of metrics produced by a meter. #[derive(Default, Debug)] pub struct ScopeMetrics { - /// The [Scope] that the meter was created with. - pub scope: Scope, + /// The [InstrumentationScope] that the meter was created with. + pub scope: InstrumentationScope, /// The list of aggregations created by the meter. pub metrics: Vec, } @@ -55,35 +53,40 @@ pub trait Aggregation: fmt::Debug + any::Any + Send + Sync { fn as_mut(&mut self) -> &mut dyn any::Any; } -/// A measurement of the current value of an instrument. -#[derive(Debug)] -pub struct Gauge { - /// Represents individual aggregated measurements with unique attributes. - pub data_points: Vec>, +/// DataPoint is a single data point in a time series. +#[derive(Debug, PartialEq)] +pub struct GaugeDataPoint { + /// Attributes is the set of key value pairs that uniquely identify the + /// time series. + pub attributes: Vec, + /// The value of this data point. + pub value: T, + /// The sampled [Exemplar]s collected during the time series. + pub exemplars: Vec>, } -impl Aggregation for Gauge { - fn as_any(&self) -> &dyn any::Any { - self - } - fn as_mut(&mut self) -> &mut dyn any::Any { - self +impl Clone for GaugeDataPoint { + fn clone(&self) -> Self { + Self { + attributes: self.attributes.clone(), + value: self.value, + exemplars: self.exemplars.clone(), + } } } -/// Represents the sum of all measurements of values from an instrument. +/// A measurement of the current value of an instrument. #[derive(Debug)] -pub struct Sum { +pub struct Gauge { /// Represents individual aggregated measurements with unique attributes. - pub data_points: Vec>, - /// Describes if the aggregation is reported as the change from the last report - /// time, or the cumulative changes since a fixed start time. - pub temporality: Temporality, - /// Whether this aggregation only increases or decreases. - pub is_monotonic: bool, + pub data_points: Vec>, + /// The time when the time series was started. + pub start_time: Option, + /// The time when the time series was recorded. + pub time: SystemTime, } -impl Aggregation for Sum { +impl Aggregation for Gauge { fn as_any(&self) -> &dyn any::Any { self } @@ -93,38 +96,61 @@ impl Aggregation for Sum { } /// DataPoint is a single data point in a time series. -#[derive(Debug)] -pub struct DataPoint { +#[derive(Debug, PartialEq)] +pub struct SumDataPoint { /// Attributes is the set of key value pairs that uniquely identify the /// time series. pub attributes: Vec, - /// The time when the time series was started. - pub start_time: Option, - /// The time when the time series was recorded. - pub time: Option, /// The value of this data point. pub value: T, /// The sampled [Exemplar]s collected during the time series. pub exemplars: Vec>, } -impl Clone for DataPoint { +impl Clone for SumDataPoint { fn clone(&self) -> Self { Self { attributes: self.attributes.clone(), - start_time: self.start_time, - time: self.time, value: self.value, exemplars: self.exemplars.clone(), } } } +/// Represents the sum of all measurements of values from an instrument. +#[derive(Debug)] +pub struct Sum { + /// Represents individual aggregated measurements with unique attributes. + pub data_points: Vec>, + /// The time when the time series was started. + pub start_time: SystemTime, + /// The time when the time series was recorded. + pub time: SystemTime, + /// Describes if the aggregation is reported as the change from the last report + /// time, or the cumulative changes since a fixed start time. + pub temporality: Temporality, + /// Whether this aggregation only increases or decreases. + pub is_monotonic: bool, +} + +impl Aggregation for Sum { + fn as_any(&self) -> &dyn any::Any { + self + } + fn as_mut(&mut self) -> &mut dyn any::Any { + self + } +} + /// Represents the histogram of all measurements of values from an instrument. #[derive(Debug)] pub struct Histogram { /// Individual aggregated measurements with unique attributes. pub data_points: Vec>, + /// The time when the time series was started. + pub start_time: SystemTime, + /// The time when the time series was recorded. + pub time: SystemTime, /// Describes if the aggregation is reported as the change from the last report /// time, or the cumulative changes since a fixed start time. pub temporality: Temporality, @@ -140,15 +166,10 @@ impl Aggregation for Histogram { } /// A single histogram data point in a time series. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct HistogramDataPoint { /// The set of key value pairs that uniquely identify the time series. pub attributes: Vec, - /// The time when the time series was started. - pub start_time: SystemTime, - /// The time when the time series was recorded. - pub time: SystemTime, - /// The number of updates this histogram has been calculated with. pub count: u64, /// The upper bounds of the buckets of the histogram. @@ -173,8 +194,6 @@ impl Clone for HistogramDataPoint { fn clone(&self) -> Self { Self { attributes: self.attributes.clone(), - start_time: self.start_time, - time: self.time, count: self.count, bounds: self.bounds.clone(), bucket_counts: self.bucket_counts.clone(), @@ -191,7 +210,10 @@ impl Clone for HistogramDataPoint { pub struct ExponentialHistogram { /// The individual aggregated measurements with unique attributes. pub data_points: Vec>, - + /// When the time series was started. + pub start_time: SystemTime, + /// The time when the time series was recorded. + pub time: SystemTime, /// Describes if the aggregation is reported as the change from the last report /// time, or the cumulative changes since a fixed start time. pub temporality: Temporality, @@ -207,14 +229,10 @@ impl Aggregation for ExponentialHistogram } /// A single exponential histogram data point in a time series. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct ExponentialHistogramDataPoint { /// The set of key value pairs that uniquely identify the time series. pub attributes: Vec, - /// When the time series was started. - pub start_time: SystemTime, - /// The time when the time series was recorded. - pub time: SystemTime, /// The number of updates this histogram has been calculated with. pub count: usize, @@ -255,6 +273,24 @@ pub struct ExponentialHistogramDataPoint { pub exemplars: Vec>, } +impl Clone for ExponentialHistogramDataPoint { + fn clone(&self) -> Self { + Self { + attributes: self.attributes.clone(), + count: self.count, + min: self.min, + max: self.max, + sum: self.sum, + scale: self.scale, + zero_count: self.zero_count, + positive_bucket: self.positive_bucket.clone(), + negative_bucket: self.negative_bucket.clone(), + zero_threshold: self.zero_threshold, + exemplars: self.exemplars.clone(), + } + } +} + /// A set of bucket counts, encoded in a contiguous array of counts. #[derive(Debug, PartialEq)] pub struct ExponentialBucket { @@ -268,8 +304,17 @@ pub struct ExponentialBucket { pub counts: Vec, } +impl Clone for ExponentialBucket { + fn clone(&self) -> Self { + Self { + offset: self.offset, + counts: self.counts.clone(), + } + } +} + /// A measurement sampled from a time series providing a typical example. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct Exemplar { /// The attributes recorded with the measurement but filtered out of the /// time series' aggregated data. @@ -299,3 +344,75 @@ impl Clone for Exemplar { } } } + +#[cfg(test)] +mod tests { + + use super::{Exemplar, ExponentialHistogramDataPoint, HistogramDataPoint, SumDataPoint}; + + use opentelemetry::KeyValue; + + #[test] + fn validate_cloning_data_points() { + let data_type = SumDataPoint { + attributes: vec![KeyValue::new("key", "value")], + value: 0u32, + exemplars: vec![Exemplar { + filtered_attributes: vec![], + time: std::time::SystemTime::now(), + value: 0u32, + span_id: [0; 8], + trace_id: [0; 16], + }], + }; + assert_eq!(data_type.clone(), data_type); + + let histogram_data_point = HistogramDataPoint { + attributes: vec![KeyValue::new("key", "value")], + count: 0, + bounds: vec![], + bucket_counts: vec![], + min: None, + max: None, + sum: 0u32, + exemplars: vec![Exemplar { + filtered_attributes: vec![], + time: std::time::SystemTime::now(), + value: 0u32, + span_id: [0; 8], + trace_id: [0; 16], + }], + }; + assert_eq!(histogram_data_point.clone(), histogram_data_point); + + let exponential_histogram_data_point = ExponentialHistogramDataPoint { + attributes: vec![KeyValue::new("key", "value")], + count: 0, + min: None, + max: None, + sum: 0u32, + scale: 0, + zero_count: 0, + positive_bucket: super::ExponentialBucket { + offset: 0, + counts: vec![], + }, + negative_bucket: super::ExponentialBucket { + offset: 0, + counts: vec![], + }, + zero_threshold: 0.0, + exemplars: vec![Exemplar { + filtered_attributes: vec![], + time: std::time::SystemTime::now(), + value: 0u32, + span_id: [0; 8], + trace_id: [0; 16], + }], + }; + assert_eq!( + exponential_histogram_data_point.clone(), + exponential_histogram_data_point + ); + } +} diff --git a/opentelemetry-sdk/src/metrics/data/temporality.rs b/opentelemetry-sdk/src/metrics/data/temporality.rs deleted file mode 100644 index df64d6d4d7..0000000000 --- a/opentelemetry-sdk/src/metrics/data/temporality.rs +++ /dev/null @@ -1,16 +0,0 @@ -/// Defines the window that an aggregation was calculated over. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -#[non_exhaustive] -pub enum Temporality { - /// A measurement interval that continues to expand forward in time from a - /// starting point. - /// - /// New measurements are added to all previous measurements since a start time. - Cumulative, - - /// A measurement interval that resets each cycle. - /// - /// Measurements from one cycle are recorded independently, measurements from - /// other cycles do not affect them. - Delta, -} diff --git a/opentelemetry-sdk/src/metrics/error.rs b/opentelemetry-sdk/src/metrics/error.rs new file mode 100644 index 0000000000..cb8afcab0e --- /dev/null +++ b/opentelemetry-sdk/src/metrics/error.rs @@ -0,0 +1,40 @@ +use std::result; +use std::sync::PoisonError; +use thiserror::Error; + +use crate::export::ExportError; + +/// A specialized `Result` type for metric operations. +pub type MetricResult = result::Result; + +/// Errors returned by the metrics API. +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum MetricError { + /// Other errors not covered by specific cases. + #[error("Metrics error: {0}")] + Other(String), + /// Invalid configuration + #[error("Config error {0}")] + Config(String), + /// Fail to export metrics + #[error("Metrics exporter {0} failed with {name}", name = .0.exporter_name())] + ExportErr(Box), + /// Invalid instrument configuration such invalid instrument name, invalid instrument description, invalid instrument unit, etc. + /// See [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#general-characteristics) + /// for full list of requirements. + #[error("Invalid instrument configuration: {0}")] + InvalidInstrumentConfiguration(&'static str), +} + +impl From for MetricError { + fn from(err: T) -> Self { + MetricError::ExportErr(Box::new(err)) + } +} + +impl From> for MetricError { + fn from(err: PoisonError) -> Self { + MetricError::Other(err.to_string()) + } +} diff --git a/opentelemetry-sdk/src/metrics/exporter.rs b/opentelemetry-sdk/src/metrics/exporter.rs index c49aaa75dd..33c1fcb6be 100644 --- a/opentelemetry-sdk/src/metrics/exporter.rs +++ b/opentelemetry-sdk/src/metrics/exporter.rs @@ -1,29 +1,34 @@ //! Interfaces for exporting metrics use async_trait::async_trait; -use opentelemetry::metrics::Result; +use crate::metrics::MetricResult; -use crate::metrics::{data::ResourceMetrics, reader::TemporalitySelector}; +use crate::metrics::data::ResourceMetrics; + +use super::Temporality; /// Exporter handles the delivery of metric data to external receivers. /// /// This is the final component in the metric push pipeline. #[async_trait] -pub trait PushMetricsExporter: TemporalitySelector + Send + Sync + 'static { +pub trait PushMetricExporter: Send + Sync + 'static { /// Export serializes and transmits metric data to a receiver. /// /// All retry logic must be contained in this function. The SDK does not /// implement any retry logic. All errors returned by this function are /// considered unrecoverable and will be reported to a configured error /// Handler. - async fn export(&self, metrics: &mut ResourceMetrics) -> Result<()>; + async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()>; /// Flushes any metric data held by an exporter. - async fn force_flush(&self) -> Result<()>; + async fn force_flush(&self) -> MetricResult<()>; /// Releases any held computational resources. /// /// After Shutdown is called, calls to Export will perform no operation and /// instead will return an error indicating the shutdown state. - fn shutdown(&self) -> Result<()>; + fn shutdown(&self) -> MetricResult<()>; + + /// Access the [Temporality] of the MetricExporter. + fn temporality(&self) -> Temporality; } diff --git a/opentelemetry-sdk/src/metrics/instrument.rs b/opentelemetry-sdk/src/metrics/instrument.rs index 8f4797f8f8..e1e7e7b45c 100644 --- a/opentelemetry-sdk/src/metrics/instrument.rs +++ b/opentelemetry-sdk/src/metrics/instrument.rs @@ -1,14 +1,13 @@ -use std::{any::Any, borrow::Cow, collections::HashSet, hash::Hash, sync::Arc}; +use std::{borrow::Cow, collections::HashSet, sync::Arc}; use opentelemetry::{ - metrics::{AsyncInstrument, SyncCounter, SyncGauge, SyncHistogram, SyncUpDownCounter}, - Key, KeyValue, + metrics::{AsyncInstrument, SyncInstrument}, + InstrumentationScope, Key, KeyValue, }; -use crate::{ - instrumentation::Scope, - metrics::{aggregation::Aggregation, internal::Measure}, -}; +use crate::metrics::{aggregation::Aggregation, internal::Measure}; + +use super::Temporality; /// The identifier of a group of instruments that all perform the same function. #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] @@ -37,6 +36,35 @@ pub enum InstrumentKind { ObservableGauge, } +impl InstrumentKind { + /// Select the [Temporality] preference based on [InstrumentKind] + /// + /// [exporter-docs]: https://github.com/open-telemetry/opentelemetry-specification/blob/a1c13d59bb7d0fb086df2b3e1eaec9df9efef6cc/specification/metrics/sdk_exporters/otlp.md#additional-configuration + pub(crate) fn temporality_preference(&self, temporality: Temporality) -> Temporality { + match temporality { + Temporality::Cumulative => Temporality::Cumulative, + Temporality::Delta => match self { + Self::Counter + | Self::Histogram + | Self::ObservableCounter + | Self::Gauge + | Self::ObservableGauge => Temporality::Delta, + Self::UpDownCounter | InstrumentKind::ObservableUpDownCounter => { + Temporality::Cumulative + } + }, + Temporality::LowMemory => match self { + Self::Counter | InstrumentKind::Histogram => Temporality::Delta, + Self::ObservableCounter + | Self::Gauge + | Self::ObservableGauge + | Self::UpDownCounter + | Self::ObservableUpDownCounter => Temporality::Cumulative, + }, + } + } +} + /// Describes properties an instrument is created with, also used for filtering /// in [View](crate::metrics::View)s. /// @@ -55,6 +83,7 @@ pub enum InstrumentKind { /// ``` #[derive(Clone, Default, Debug, PartialEq)] #[non_exhaustive] +#[allow(unreachable_pub)] pub struct Instrument { /// The human-readable identifier of the instrument. pub name: Cow<'static, str>, @@ -65,9 +94,10 @@ pub struct Instrument { /// Unit is the unit of measurement recorded by the instrument. pub unit: Cow<'static, str>, /// The instrumentation that created the instrument. - pub scope: Scope, + pub scope: InstrumentationScope, } +#[cfg(feature = "spec_unstable_metrics_views")] impl Instrument { /// Create a new instrument with default values pub fn new() -> Self { @@ -93,7 +123,7 @@ impl Instrument { } /// Set the instrument scope. - pub fn scope(mut self, scope: Scope) -> Self { + pub fn scope(mut self, scope: InstrumentationScope) -> Self { self.scope = scope; self } @@ -104,7 +134,7 @@ impl Instrument { && self.description == "" && self.kind.is_none() && self.unit == "" - && self.scope == Scope::default() + && self.scope == InstrumentationScope::default() } pub(crate) fn matches(&self, other: &Instrument) -> bool { @@ -132,13 +162,11 @@ impl Instrument { } pub(crate) fn matches_scope(&self, other: &Instrument) -> bool { - (self.scope.name.is_empty() || self.scope.name.as_ref() == other.scope.name.as_ref()) - && (self.scope.version.is_none() - || self.scope.version.as_ref().map(AsRef::as_ref) - == other.scope.version.as_ref().map(AsRef::as_ref)) - && (self.scope.schema_url.is_none() - || self.scope.schema_url.as_ref().map(AsRef::as_ref) - == other.scope.schema_url.as_ref().map(AsRef::as_ref)) + (self.scope.name().is_empty() || self.scope.name() == other.scope.name()) + && (self.scope.version().is_none() + || self.scope.version().as_ref() == other.scope.version().as_ref()) + && (self.scope.schema_url().is_none() + || self.scope.schema_url().as_ref() == other.scope.schema_url().as_ref()) } } @@ -159,6 +187,7 @@ impl Instrument { /// ``` #[derive(Default, Debug)] #[non_exhaustive] +#[allow(unreachable_pub)] pub struct Stream { /// The human-readable identifier of the stream. pub name: Cow<'static, str>, @@ -176,6 +205,7 @@ pub struct Stream { pub allowed_attribute_keys: Option>>, } +#[cfg(feature = "spec_unstable_metrics_views")] impl Stream { /// Create a new stream with empty values. pub fn new() -> Self { @@ -252,32 +282,8 @@ pub(crate) struct ResolvedMeasures { pub(crate) measures: Vec>>, } -impl SyncCounter for ResolvedMeasures { - fn add(&self, val: T, attrs: &[KeyValue]) { - for measure in &self.measures { - measure.call(val, attrs) - } - } -} - -impl SyncUpDownCounter for ResolvedMeasures { - fn add(&self, val: T, attrs: &[KeyValue]) { - for measure in &self.measures { - measure.call(val, attrs) - } - } -} - -impl SyncGauge for ResolvedMeasures { - fn record(&self, val: T, attrs: &[KeyValue]) { - for measure in &self.measures { - measure.call(val, attrs) - } - } -} - -impl SyncHistogram for ResolvedMeasures { - fn record(&self, val: T, attrs: &[KeyValue]) { +impl SyncInstrument for ResolvedMeasures { + fn measure(&self, val: T, attrs: &[KeyValue]) { for measure in &self.measures { measure.call(val, attrs) } @@ -301,8 +307,4 @@ impl AsyncInstrument for Observable { measure.call(measurement, attrs) } } - - fn as_any(&self) -> Arc { - Arc::new(self.clone()) - } } diff --git a/opentelemetry-sdk/src/metrics/internal/aggregate.rs b/opentelemetry-sdk/src/metrics/internal/aggregate.rs index e004240c60..aaad464747 100644 --- a/opentelemetry-sdk/src/metrics/internal/aggregate.rs +++ b/opentelemetry-sdk/src/metrics/internal/aggregate.rs @@ -1,19 +1,25 @@ -use std::{marker, sync::Arc}; +use std::{ + marker, + mem::replace, + ops::DerefMut, + sync::{Arc, Mutex}, + time::SystemTime, +}; use opentelemetry::KeyValue; -use crate::metrics::data::{Aggregation, Gauge, Temporality}; +use crate::metrics::{data::Aggregation, Temporality}; use super::{ exponential_histogram::ExpoHistogram, histogram::Histogram, last_value::LastValue, precomputed_sum::PrecomputedSum, sum::Sum, Number, }; -const STREAM_CARDINALITY_LIMIT: u32 = 2000; +pub(crate) const STREAM_CARDINALITY_LIMIT: usize = 2000; /// Checks whether aggregator has hit cardinality limit for metric streams pub(crate) fn is_under_cardinality_limit(size: usize) -> bool { - size < STREAM_CARDINALITY_LIMIT as usize + size < STREAM_CARDINALITY_LIMIT } /// Receives measurements to be aggregated. @@ -53,76 +59,94 @@ where } } +pub(crate) struct AggregateTime { + pub start: SystemTime, + pub current: SystemTime, +} + +/// Initialized [`AggregateTime`] for specific [`Temporality`] +pub(crate) struct AggregateTimeInitiator(Mutex); + +impl AggregateTimeInitiator { + pub(crate) fn delta(&self) -> AggregateTime { + let current_time = SystemTime::now(); + let start_time = self + .0 + .lock() + .map(|mut start| replace(start.deref_mut(), current_time)) + .unwrap_or(current_time); + AggregateTime { + start: start_time, + current: current_time, + } + } + + pub(crate) fn cumulative(&self) -> AggregateTime { + let current_time = SystemTime::now(); + let start_time = self.0.lock().map(|start| *start).unwrap_or(current_time); + AggregateTime { + start: start_time, + current: current_time, + } + } +} + +impl Default for AggregateTimeInitiator { + fn default() -> Self { + Self(Mutex::new(SystemTime::now())) + } +} + +type Filter = Arc bool + Send + Sync>; + +/// Applies filter on provided attribute set +/// No-op, if filter is not set +#[derive(Clone)] +pub(crate) struct AttributeSetFilter { + filter: Option, +} + +impl AttributeSetFilter { + pub(crate) fn new(filter: Option) -> Self { + Self { filter } + } + + pub(crate) fn apply(&self, attrs: &[KeyValue], run: impl FnOnce(&[KeyValue])) { + if let Some(filter) = &self.filter { + let filtered_attrs: Vec = + attrs.iter().filter(|kv| filter(kv)).cloned().collect(); + run(&filtered_attrs); + } else { + run(attrs); + }; + } +} + /// Builds aggregate functions pub(crate) struct AggregateBuilder { /// The temporality used for the returned aggregate functions. - /// - /// If this is not provided, a default of cumulative will be used (except for the - /// last-value aggregate function where delta is the only appropriate - /// temporality). - temporality: Option, + temporality: Temporality, /// The attribute filter the aggregate function will use on the input of /// measurements. - filter: Option, + filter: AttributeSetFilter, _marker: marker::PhantomData, } -type Filter = Arc bool + Send + Sync>; - impl AggregateBuilder { - pub(crate) fn new(temporality: Option, filter: Option) -> Self { + pub(crate) fn new(temporality: Temporality, filter: Option) -> Self { AggregateBuilder { temporality, - filter, + filter: AttributeSetFilter::new(filter), _marker: marker::PhantomData, } } - /// Wraps the passed in measure with an attribute filtering function. - fn filter(&self, f: impl Measure) -> impl Measure { - let filter = self.filter.clone(); - move |n, attrs: &[KeyValue]| { - if let Some(filter) = &filter { - let filtered_attrs: Vec = - attrs.iter().filter(|kv| filter(kv)).cloned().collect(); - f.call(n, &filtered_attrs); - } else { - f.call(n, attrs); - }; - } - } - /// Builds a last-value aggregate function input and output. pub(crate) fn last_value(&self) -> (impl Measure, impl ComputeAggregation) { - let lv_filter = Arc::new(LastValue::new()); - let lv_agg = Arc::clone(&lv_filter); - let t = self.temporality; - - ( - self.filter(move |n, a: &[KeyValue]| lv_filter.measure(n, a)), - move |dest: Option<&mut dyn Aggregation>| { - let g = dest.and_then(|d| d.as_mut().downcast_mut::>()); - let mut new_agg = if g.is_none() { - Some(Gauge { - data_points: vec![], - }) - } else { - None - }; - let g = g.unwrap_or_else(|| new_agg.as_mut().expect("present if g is none")); - - match t { - Some(Temporality::Delta) => { - lv_agg.compute_aggregation_delta(&mut g.data_points) - } - _ => lv_agg.compute_aggregation_cumulative(&mut g.data_points), - } - - (g.data_points.len(), new_agg.map(|a| Box::new(a) as Box<_>)) - }, - ) + let lv = Arc::new(LastValue::new(self.temporality, self.filter.clone())); + (lv.clone(), lv) } /// Builds a precomputed sum aggregate function input and output. @@ -130,32 +154,20 @@ impl AggregateBuilder { &self, monotonic: bool, ) -> (impl Measure, impl ComputeAggregation) { - let s = Arc::new(PrecomputedSum::new(monotonic)); - let agg_sum = Arc::clone(&s); - let t = self.temporality; - - ( - self.filter(move |n, a: &[KeyValue]| s.measure(n, a)), - move |dest: Option<&mut dyn Aggregation>| match t { - Some(Temporality::Delta) => agg_sum.delta(dest), - _ => agg_sum.cumulative(dest), - }, - ) + let s = Arc::new(PrecomputedSum::new( + self.temporality, + self.filter.clone(), + monotonic, + )); + + (s.clone(), s) } /// Builds a sum aggregate function input and output. pub(crate) fn sum(&self, monotonic: bool) -> (impl Measure, impl ComputeAggregation) { - let s = Arc::new(Sum::new(monotonic)); - let agg_sum = Arc::clone(&s); - let t = self.temporality; - - ( - self.filter(move |n, a: &[KeyValue]| s.measure(n, a)), - move |dest: Option<&mut dyn Aggregation>| match t { - Some(Temporality::Delta) => agg_sum.delta(dest), - _ => agg_sum.cumulative(dest), - }, - ) + let s = Arc::new(Sum::new(self.temporality, self.filter.clone(), monotonic)); + + (s.clone(), s) } /// Builds a histogram aggregate function input and output. @@ -165,17 +177,15 @@ impl AggregateBuilder { record_min_max: bool, record_sum: bool, ) -> (impl Measure, impl ComputeAggregation) { - let h = Arc::new(Histogram::new(boundaries, record_min_max, record_sum)); - let agg_h = Arc::clone(&h); - let t = self.temporality; - - ( - self.filter(move |n, a: &[KeyValue]| h.measure(n, a)), - move |dest: Option<&mut dyn Aggregation>| match t { - Some(Temporality::Delta) => agg_h.delta(dest), - _ => agg_h.cumulative(dest), - }, - ) + let h = Arc::new(Histogram::new( + self.temporality, + self.filter.clone(), + boundaries, + record_min_max, + record_sum, + )); + + (h.clone(), h) } /// Builds an exponential histogram aggregate function input and output. @@ -187,29 +197,23 @@ impl AggregateBuilder { record_sum: bool, ) -> (impl Measure, impl ComputeAggregation) { let h = Arc::new(ExpoHistogram::new( + self.temporality, + self.filter.clone(), max_size, max_scale, record_min_max, record_sum, )); - let agg_h = Arc::clone(&h); - let t = self.temporality; - - ( - self.filter(move |n, a: &[KeyValue]| h.measure(n, a)), - move |dest: Option<&mut dyn Aggregation>| match t { - Some(Temporality::Delta) => agg_h.delta(dest), - _ => agg_h.cumulative(dest), - }, - ) + + (h.clone(), h) } } #[cfg(test)] mod tests { use crate::metrics::data::{ - DataPoint, ExponentialBucket, ExponentialHistogram, ExponentialHistogramDataPoint, - Histogram, HistogramDataPoint, Sum, + ExponentialBucket, ExponentialHistogram, ExponentialHistogramDataPoint, Gauge, + GaugeDataPoint, Histogram, HistogramDataPoint, Sum, SumDataPoint, }; use std::{time::SystemTime, vec}; @@ -217,15 +221,16 @@ mod tests { #[test] fn last_value_aggregation() { - let (measure, agg) = AggregateBuilder::::new(None, None).last_value(); + let (measure, agg) = + AggregateBuilder::::new(Temporality::Cumulative, None).last_value(); let mut a = Gauge { - data_points: vec![DataPoint { + data_points: vec![GaugeDataPoint { attributes: vec![KeyValue::new("a", 1)], - start_time: Some(SystemTime::now()), - time: Some(SystemTime::now()), value: 1u64, exemplars: vec![], }], + start_time: Some(SystemTime::now()), + time: SystemTime::now(), }; let new_attributes = [KeyValue::new("b", 2)]; measure.call(2, &new_attributes[..]); @@ -243,24 +248,22 @@ mod tests { fn precomputed_sum_aggregation() { for temporality in [Temporality::Delta, Temporality::Cumulative] { let (measure, agg) = - AggregateBuilder::::new(Some(temporality), None).precomputed_sum(true); + AggregateBuilder::::new(temporality, None).precomputed_sum(true); let mut a = Sum { data_points: vec![ - DataPoint { + SumDataPoint { attributes: vec![KeyValue::new("a1", 1)], - start_time: Some(SystemTime::now()), - time: Some(SystemTime::now()), value: 1u64, exemplars: vec![], }, - DataPoint { + SumDataPoint { attributes: vec![KeyValue::new("a2", 1)], - start_time: Some(SystemTime::now()), - time: Some(SystemTime::now()), value: 2u64, exemplars: vec![], }, ], + start_time: SystemTime::now(), + time: SystemTime::now(), temporality: if temporality == Temporality::Delta { Temporality::Cumulative } else { @@ -286,24 +289,22 @@ mod tests { #[test] fn sum_aggregation() { for temporality in [Temporality::Delta, Temporality::Cumulative] { - let (measure, agg) = AggregateBuilder::::new(Some(temporality), None).sum(true); + let (measure, agg) = AggregateBuilder::::new(temporality, None).sum(true); let mut a = Sum { data_points: vec![ - DataPoint { + SumDataPoint { attributes: vec![KeyValue::new("a1", 1)], - start_time: Some(SystemTime::now()), - time: Some(SystemTime::now()), value: 1u64, exemplars: vec![], }, - DataPoint { + SumDataPoint { attributes: vec![KeyValue::new("a2", 1)], - start_time: Some(SystemTime::now()), - time: Some(SystemTime::now()), value: 2u64, exemplars: vec![], }, ], + start_time: SystemTime::now(), + time: SystemTime::now(), temporality: if temporality == Temporality::Delta { Temporality::Cumulative } else { @@ -329,13 +330,11 @@ mod tests { #[test] fn explicit_bucket_histogram_aggregation() { for temporality in [Temporality::Delta, Temporality::Cumulative] { - let (measure, agg) = AggregateBuilder::::new(Some(temporality), None) + let (measure, agg) = AggregateBuilder::::new(temporality, None) .explicit_bucket_histogram(vec![1.0], true, true); let mut a = Histogram { data_points: vec![HistogramDataPoint { attributes: vec![KeyValue::new("a1", 1)], - start_time: SystemTime::now(), - time: SystemTime::now(), count: 2, bounds: vec![1.0, 2.0], bucket_counts: vec![0, 1, 1], @@ -344,6 +343,8 @@ mod tests { sum: 3u64, exemplars: vec![], }], + start_time: SystemTime::now(), + time: SystemTime::now(), temporality: if temporality == Temporality::Delta { Temporality::Cumulative } else { @@ -372,13 +373,11 @@ mod tests { #[test] fn exponential_histogram_aggregation() { for temporality in [Temporality::Delta, Temporality::Cumulative] { - let (measure, agg) = AggregateBuilder::::new(Some(temporality), None) + let (measure, agg) = AggregateBuilder::::new(temporality, None) .exponential_bucket_histogram(4, 20, true, true); let mut a = ExponentialHistogram { data_points: vec![ExponentialHistogramDataPoint { attributes: vec![KeyValue::new("a1", 1)], - start_time: SystemTime::now(), - time: SystemTime::now(), count: 2, min: None, max: None, @@ -396,6 +395,8 @@ mod tests { zero_threshold: 1.0, exemplars: vec![], }], + start_time: SystemTime::now(), + time: SystemTime::now(), temporality: if temporality == Temporality::Delta { Temporality::Cumulative } else { diff --git a/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs b/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs index c23b441663..b54d78bace 100644 --- a/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs +++ b/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs @@ -1,14 +1,22 @@ -use std::{collections::HashMap, f64::consts::LOG2_E, sync::Mutex, time::SystemTime}; +use std::{ + f64::consts::LOG2_E, + mem::replace, + ops::DerefMut, + sync::{Arc, Mutex}, +}; -use once_cell::sync::Lazy; -use opentelemetry::{metrics::MetricsError, KeyValue}; +use opentelemetry::{otel_debug, KeyValue}; +use std::sync::OnceLock; -use crate::{ - metrics::data::{self, Aggregation, Temporality}, - metrics::AttributeSet, +use crate::metrics::{ + data::{self, Aggregation}, + Temporality, }; -use super::Number; +use super::{ + aggregate::{AggregateTimeInitiator, AttributeSetFilter}, + Aggregator, ComputeAggregation, Measure, Number, ValueMap, +}; pub(crate) const EXPO_MAX_SCALE: i8 = 20; pub(crate) const EXPO_MIN_SCALE: i8 = -10; @@ -16,33 +24,26 @@ pub(crate) const EXPO_MIN_SCALE: i8 = -10; /// A single data point in an exponential histogram. #[derive(Debug, PartialEq)] struct ExpoHistogramDataPoint { + max_size: i32, count: usize, min: T, max: T, sum: T, - - max_size: i32, - record_min_max: bool, - record_sum: bool, - scale: i8, - pos_buckets: ExpoBuckets, neg_buckets: ExpoBuckets, zero_count: u64, } impl ExpoHistogramDataPoint { - fn new(max_size: i32, max_scale: i8, record_min_max: bool, record_sum: bool) -> Self { + fn new(config: &BucketConfig) -> Self { ExpoHistogramDataPoint { + max_size: config.max_size, count: 0, min: T::max(), max: T::min(), sum: T::default(), - max_size, - record_min_max, - record_sum, - scale: max_scale, + scale: config.max_scale, pos_buckets: ExpoBuckets::default(), neg_buckets: ExpoBuckets::default(), zero_count: 0, @@ -57,17 +58,13 @@ impl ExpoHistogramDataPoint { fn record(&mut self, v: T) { self.count += 1; - if self.record_min_max { - if v < self.min { - self.min = v; - } - if v > self.max { - self.max = v; - } + if v < self.min { + self.min = v; } - if self.record_sum { - self.sum += v; + if v > self.max { + self.max = v; } + self.sum += v; let abs_v = v.into_float().abs(); @@ -100,9 +97,18 @@ impl ExpoHistogramDataPoint { if (self.scale - scale_delta as i8) < EXPO_MIN_SCALE { // With a scale of -10 there is only two buckets for the whole range of f64 values. // This can only happen if there is a max size of 1. - opentelemetry::global::handle_error(MetricsError::Other( - "exponential histogram scale underflow".into(), - )); + + // TODO - to check if this should be logged as an error if this is auto-recoverable. + otel_debug!( + name: "ExponentialHistogramDataPoint.Scale.Underflow", + current_scale = self.scale, + scale_delta = scale_delta, + max_size = self.max_size, + min_scale = EXPO_MIN_SCALE, + value = format!("{:?}", v), + message = "The measurement will be dropped due to scale underflow. Check the histogram configuration" + ); + return; } // Downscale @@ -133,7 +139,7 @@ impl ExpoHistogramDataPoint { } return (exp - correction) >> -self.scale; } - (exp << self.scale) + (frac.ln() * SCALE_FACTORS[self.scale as usize]) as i32 - 1 + (exp << self.scale) + (frac.ln() * scale_factors()[self.scale as usize]) as i32 - 1 } } @@ -167,32 +173,38 @@ fn scale_change(max_size: i32, bin: i32, start_bin: i32, length: i32) -> u32 { count } -/// Constants used in calculating the logarithm index. -static SCALE_FACTORS: Lazy<[f64; 21]> = Lazy::new(|| { - [ - LOG2_E * 2f64.powi(0), - LOG2_E * 2f64.powi(1), - LOG2_E * 2f64.powi(2), - LOG2_E * 2f64.powi(3), - LOG2_E * 2f64.powi(4), - LOG2_E * 2f64.powi(5), - LOG2_E * 2f64.powi(6), - LOG2_E * 2f64.powi(7), - LOG2_E * 2f64.powi(8), - LOG2_E * 2f64.powi(9), - LOG2_E * 2f64.powi(10), - LOG2_E * 2f64.powi(11), - LOG2_E * 2f64.powi(12), - LOG2_E * 2f64.powi(13), - LOG2_E * 2f64.powi(14), - LOG2_E * 2f64.powi(15), - LOG2_E * 2f64.powi(16), - LOG2_E * 2f64.powi(17), - LOG2_E * 2f64.powi(18), - LOG2_E * 2f64.powi(19), - LOG2_E * 2f64.powi(20), - ] -}); +// TODO - replace it with LazyLock once it is stable +static SCALE_FACTORS: OnceLock<[f64; 21]> = OnceLock::new(); + +/// returns constants used in calculating the logarithm index. +#[inline] +fn scale_factors() -> &'static [f64; 21] { + SCALE_FACTORS.get_or_init(|| { + [ + LOG2_E * 2f64.powi(0), + LOG2_E * 2f64.powi(1), + LOG2_E * 2f64.powi(2), + LOG2_E * 2f64.powi(3), + LOG2_E * 2f64.powi(4), + LOG2_E * 2f64.powi(5), + LOG2_E * 2f64.powi(6), + LOG2_E * 2f64.powi(7), + LOG2_E * 2f64.powi(8), + LOG2_E * 2f64.powi(9), + LOG2_E * 2f64.powi(10), + LOG2_E * 2f64.powi(11), + LOG2_E * 2f64.powi(12), + LOG2_E * 2f64.powi(13), + LOG2_E * 2f64.powi(14), + LOG2_E * 2f64.powi(15), + LOG2_E * 2f64.powi(16), + LOG2_E * 2f64.powi(17), + LOG2_E * 2f64.powi(18), + LOG2_E * 2f64.powi(19), + LOG2_E * 2f64.powi(20), + ] + }) +} /// Breaks the number into a normalized fraction and a base-2 exponent. /// @@ -306,76 +318,85 @@ impl ExpoBuckets { } } +impl Aggregator for Mutex> +where + T: Number, +{ + type InitConfig = BucketConfig; + + type PreComputedValue = T; + + fn create(init: &BucketConfig) -> Self { + Mutex::new(ExpoHistogramDataPoint::new(init)) + } + + fn update(&self, value: T) { + let mut this = match self.lock() { + Ok(guard) => guard, + Err(_) => return, + }; + this.record(value); + } + + fn clone_and_reset(&self, init: &BucketConfig) -> Self { + let mut current = self.lock().unwrap_or_else(|err| err.into_inner()); + let cloned = replace(current.deref_mut(), ExpoHistogramDataPoint::new(init)); + Mutex::new(cloned) + } +} + +#[derive(Debug, Clone, Copy, PartialEq)] +struct BucketConfig { + max_size: i32, + max_scale: i8, +} + /// An aggregator that summarizes a set of measurements as an exponential /// histogram. /// /// Each histogram is scoped by attributes and the aggregation cycle the /// measurements were made in. -pub(crate) struct ExpoHistogram { +pub(crate) struct ExpoHistogram { + value_map: ValueMap>>, + init_time: AggregateTimeInitiator, + temporality: Temporality, + filter: AttributeSetFilter, record_sum: bool, record_min_max: bool, - max_size: i32, - max_scale: i8, - - values: Mutex>>, - - start: Mutex, } impl ExpoHistogram { /// Create a new exponential histogram. pub(crate) fn new( + temporality: Temporality, + filter: AttributeSetFilter, max_size: u32, max_scale: i8, record_min_max: bool, record_sum: bool, ) -> Self { ExpoHistogram { + value_map: ValueMap::new(BucketConfig { + max_size: max_size as i32, + max_scale, + }), + init_time: AggregateTimeInitiator::default(), + temporality, + filter, record_sum, record_min_max, - max_size: max_size as i32, - max_scale, - values: Mutex::new(HashMap::default()), - start: Mutex::new(SystemTime::now()), } } - pub(crate) fn measure(&self, value: T, attrs: &[KeyValue]) { - let f_value = value.into_float(); - // Ignore NaN and infinity. - if f_value.is_infinite() || f_value.is_nan() { - return; - } - - let attrs: AttributeSet = attrs.into(); - if let Ok(mut values) = self.values.lock() { - let v = values.entry(attrs).or_insert_with(|| { - ExpoHistogramDataPoint::new( - self.max_size, - self.max_scale, - self.record_min_max, - self.record_sum, - ) - }); - v.record(value) - } - } - - pub(crate) fn delta( - &self, - dest: Option<&mut dyn Aggregation>, - ) -> (usize, Option>) { - let t = SystemTime::now(); - let start = self - .start - .lock() - .map(|s| *s) - .unwrap_or_else(|_| SystemTime::now()); + fn delta(&self, dest: Option<&mut dyn Aggregation>) -> (usize, Option>) { + let time = self.init_time.delta(); let h = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if h.is_none() { Some(data::ExponentialHistogram { data_points: vec![], + start_time: time.start, + time: time.current, temporality: Temporality::Delta, }) } else { @@ -383,76 +404,56 @@ impl ExpoHistogram { }; let h = h.unwrap_or_else(|| new_agg.as_mut().expect("present if h is none")); h.temporality = Temporality::Delta; - h.data_points.clear(); - - let mut values = match self.values.lock() { - Ok(g) => g, - Err(_) => return (0, None), - }; - - let n = values.len(); - if n > h.data_points.capacity() { - h.data_points.reserve_exact(n - h.data_points.capacity()); - } - - for (a, b) in values.drain() { - h.data_points.push(data::ExponentialHistogramDataPoint { - attributes: a - .iter() - .map(|(k, v)| KeyValue::new(k.clone(), v.clone())) - .collect(), - start_time: start, - time: t, - count: b.count, - min: if self.record_min_max { - Some(b.min) - } else { - None - }, - max: if self.record_min_max { - Some(b.max) - } else { - None - }, - sum: if self.record_sum { b.sum } else { T::default() }, - scale: b.scale, - zero_count: b.zero_count, - positive_bucket: data::ExponentialBucket { - offset: b.pos_buckets.start_bin, - counts: b.pos_buckets.counts.clone(), - }, - negative_bucket: data::ExponentialBucket { - offset: b.neg_buckets.start_bin, - counts: b.neg_buckets.counts.clone(), - }, - zero_threshold: 0.0, - exemplars: vec![], + h.start_time = time.start; + h.time = time.current; + + self.value_map + .collect_and_reset(&mut h.data_points, |attributes, attr| { + let b = attr.into_inner().unwrap_or_else(|err| err.into_inner()); + data::ExponentialHistogramDataPoint { + attributes, + count: b.count, + min: if self.record_min_max { + Some(b.min) + } else { + None + }, + max: if self.record_min_max { + Some(b.max) + } else { + None + }, + sum: if self.record_sum { b.sum } else { T::default() }, + scale: b.scale, + zero_count: b.zero_count, + positive_bucket: data::ExponentialBucket { + offset: b.pos_buckets.start_bin, + counts: b.pos_buckets.counts, + }, + negative_bucket: data::ExponentialBucket { + offset: b.neg_buckets.start_bin, + counts: b.neg_buckets.counts, + }, + zero_threshold: 0.0, + exemplars: vec![], + } }); - } - // The delta collection cycle resets. - if let Ok(mut start) = self.start.lock() { - *start = t; - } - - (n, new_agg.map(|a| Box::new(a) as Box<_>)) + (h.data_points.len(), new_agg.map(|a| Box::new(a) as Box<_>)) } - pub(crate) fn cumulative( + fn cumulative( &self, dest: Option<&mut dyn Aggregation>, ) -> (usize, Option>) { - let t = SystemTime::now(); - let start = self - .start - .lock() - .map(|s| *s) - .unwrap_or_else(|_| SystemTime::now()); + let time = self.init_time.cumulative(); let h = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if h.is_none() { Some(data::ExponentialHistogram { data_points: vec![], + start_time: time.start, + time: time.current, temporality: Temporality::Cumulative, }) } else { @@ -460,66 +461,78 @@ impl ExpoHistogram { }; let h = h.unwrap_or_else(|| new_agg.as_mut().expect("present if h is none")); h.temporality = Temporality::Cumulative; + h.start_time = time.start; + h.time = time.current; + + self.value_map + .collect_readonly(&mut h.data_points, |attributes, attr| { + let b = attr.lock().unwrap_or_else(|err| err.into_inner()); + data::ExponentialHistogramDataPoint { + attributes, + count: b.count, + min: if self.record_min_max { + Some(b.min) + } else { + None + }, + max: if self.record_min_max { + Some(b.max) + } else { + None + }, + sum: if self.record_sum { b.sum } else { T::default() }, + scale: b.scale, + zero_count: b.zero_count, + positive_bucket: data::ExponentialBucket { + offset: b.pos_buckets.start_bin, + counts: b.pos_buckets.counts.clone(), + }, + negative_bucket: data::ExponentialBucket { + offset: b.neg_buckets.start_bin, + counts: b.neg_buckets.counts.clone(), + }, + zero_threshold: 0.0, + exemplars: vec![], + } + }); - let values = match self.values.lock() { - Ok(g) => g, - Err(_) => return (0, None), - }; - h.data_points.clear(); + (h.data_points.len(), new_agg.map(|a| Box::new(a) as Box<_>)) + } +} - let n = values.len(); - if n > h.data_points.capacity() { - h.data_points.reserve_exact(n - h.data_points.capacity()); +impl Measure for Arc> +where + T: Number, +{ + fn call(&self, measurement: T, attrs: &[KeyValue]) { + let f_value = measurement.into_float(); + // Ignore NaN and infinity. + // Only makes sense if T is f64, maybe this could be no-op for other cases? + if !f_value.is_finite() { + return; } - // TODO: This will use an unbounded amount of memory if there - // are unbounded number of attribute sets being aggregated. Attribute - // sets that become "stale" need to be forgotten so this will not - // overload the system. - for (a, b) in values.iter() { - h.data_points.push(data::ExponentialHistogramDataPoint { - attributes: a - .iter() - .map(|(k, v)| KeyValue::new(k.clone(), v.clone())) - .collect(), - start_time: start, - time: t, - count: b.count, - min: if self.record_min_max { - Some(b.min) - } else { - None - }, - max: if self.record_min_max { - Some(b.max) - } else { - None - }, - sum: if self.record_sum { b.sum } else { T::default() }, - scale: b.scale, - zero_count: b.zero_count, - positive_bucket: data::ExponentialBucket { - offset: b.pos_buckets.start_bin, - counts: b.pos_buckets.counts.clone(), - }, - negative_bucket: data::ExponentialBucket { - offset: b.neg_buckets.start_bin, - counts: b.neg_buckets.counts.clone(), - }, - zero_threshold: 0.0, - exemplars: vec![], - }); - } + self.filter.apply(attrs, |filtered| { + self.value_map.measure(measurement, filtered); + }) + } +} - (n, new_agg.map(|a| Box::new(a) as Box<_>)) +impl ComputeAggregation for Arc> +where + T: Number, +{ + fn call(&self, dest: Option<&mut dyn Aggregation>) -> (usize, Option>) { + match self.temporality { + Temporality::Delta => self.delta(dest), + _ => self.cumulative(dest), + } } } #[cfg(test)] mod tests { - use std::ops::Neg; - - use opentelemetry::KeyValue; + use std::{ops::Neg, time::SystemTime}; use crate::metrics::internal::{self, AggregateBuilder}; @@ -621,7 +634,10 @@ mod tests { ]; for test in test_cases { - let mut dp = ExpoHistogramDataPoint::::new(test.max_size, 20, true, true); + let mut dp = ExpoHistogramDataPoint::::new(&BucketConfig { + max_size: test.max_size, + max_scale: 20, + }); for v in test.values { dp.record(v); dp.record(-v); @@ -634,7 +650,6 @@ mod tests { } fn run_min_max_sum_f64() { - let alice = &[KeyValue::new("user", "alice")][..]; struct Expected { min: f64, max: f64, @@ -680,13 +695,18 @@ mod tests { ]; for test in test_cases { - let h = ExpoHistogram::new(4, 20, true, true); + let h = Arc::new(ExpoHistogram::new( + Temporality::Cumulative, + AttributeSetFilter::new(None), + 4, + 20, + true, + true, + )); for v in test.values { - h.measure(v, alice); + Measure::call(&h, v, &[]); } - let values = h.values.lock().unwrap(); - let alice: AttributeSet = alice.into(); - let dp = values.get(&alice).unwrap(); + let dp = h.value_map.no_attribute_tracker.lock().unwrap(); assert_eq!(test.expected.max, dp.max); assert_eq!(test.expected.min, dp.min); @@ -696,7 +716,6 @@ mod tests { } fn run_min_max_sum>() { - let alice = &[KeyValue::new("user", "alice")][..]; struct Expected { min: T, max: T, @@ -732,13 +751,18 @@ mod tests { ]; for test in test_cases { - let h = ExpoHistogram::new(4, 20, true, true); + let h = Arc::new(ExpoHistogram::new( + Temporality::Cumulative, + AttributeSetFilter::new(None), + 4, + 20, + true, + true, + )); for v in test.values { - h.measure(v, alice); + Measure::call(&h, v, &[]); } - let values = h.values.lock().unwrap(); - let alice: AttributeSet = alice.into(); - let dp = values.get(&alice).unwrap(); + let dp = h.value_map.no_attribute_tracker.lock().unwrap(); assert_eq!(test.expected.max, dp.max); assert_eq!(test.expected.min, dp.min); @@ -821,7 +845,10 @@ mod tests { }, ]; for test in test_cases { - let mut dp = ExpoHistogramDataPoint::new(test.max_size, 20, true, true); + let mut dp = ExpoHistogramDataPoint::new(&BucketConfig { + max_size: test.max_size, + max_scale: 20, + }); for v in test.values { dp.record(v); dp.record(-v); @@ -838,7 +865,11 @@ mod tests { // These bins are calculated from the following formula: // floor( log2( value) * 2^20 ) using an arbitrary precision calculator. - let mut fdp = ExpoHistogramDataPoint::new(4, 20, true, true); + let cfg = BucketConfig { + max_size: 4, + max_scale: 20, + }; + let mut fdp = ExpoHistogramDataPoint::new(&cfg); fdp.record(f64::MAX); assert_eq!( @@ -846,7 +877,7 @@ mod tests { "start bin does not match for large f64 values", ); - let mut fdp = ExpoHistogramDataPoint::new(4, 20, true, true); + let mut fdp = ExpoHistogramDataPoint::new(&cfg); fdp.record(f64::MIN_POSITIVE); assert_eq!( @@ -854,7 +885,7 @@ mod tests { "start bin does not match for small positive values", ); - let mut idp = ExpoHistogramDataPoint::new(4, 20, true, true); + let mut idp = ExpoHistogramDataPoint::new(&cfg); idp.record(i64::MAX); assert_eq!( @@ -1200,12 +1231,13 @@ mod tests { start_bin: 0, counts: vec![], }, - record_min_max: true, - record_sum: true, zero_count: 0, }; - let mut ehdp = ExpoHistogramDataPoint::new(4, 20, true, true); + let mut ehdp = ExpoHistogramDataPoint::new(&BucketConfig { + max_size: 4, + max_scale: 20, + }); ehdp.record(f64::MIN_POSITIVE); ehdp.record(f64::MIN_POSITIVE); ehdp.record(f64::MIN_POSITIVE); @@ -1253,7 +1285,7 @@ mod tests { name: "Delta Single", build: Box::new(move || { box_val( - AggregateBuilder::new(Some(Temporality::Delta), None) + AggregateBuilder::new(Temporality::Delta, None) .exponential_bucket_histogram( max_size, max_scale, @@ -1274,8 +1306,6 @@ mod tests { min: Some(1.into()), max: Some(16.into()), sum: 31.into(), - start_time: SystemTime::now(), - time: SystemTime::now(), scale: -1, positive_bucket: data::ExponentialBucket { offset: -1, @@ -1289,6 +1319,8 @@ mod tests { zero_threshold: 0.0, zero_count: 0, }], + start_time: SystemTime::now(), + time: SystemTime::now(), }, want_count: 1, }, @@ -1296,7 +1328,7 @@ mod tests { name: "Cumulative Single", build: Box::new(move || { box_val( - internal::AggregateBuilder::new(Some(Temporality::Cumulative), None) + internal::AggregateBuilder::new(Temporality::Cumulative, None) .exponential_bucket_histogram( max_size, max_scale, @@ -1322,8 +1354,6 @@ mod tests { offset: -1, counts: vec![1, 4, 1], }, - start_time: SystemTime::now(), - time: SystemTime::now(), negative_bucket: data::ExponentialBucket { offset: 0, counts: vec![], @@ -1332,6 +1362,8 @@ mod tests { zero_threshold: 0.0, zero_count: 0, }], + start_time: SystemTime::now(), + time: SystemTime::now(), }, want_count: 1, }, @@ -1339,7 +1371,7 @@ mod tests { name: "Delta Multiple", build: Box::new(move || { box_val( - internal::AggregateBuilder::new(Some(Temporality::Delta), None) + internal::AggregateBuilder::new(Temporality::Delta, None) .exponential_bucket_histogram( max_size, max_scale, @@ -1368,8 +1400,6 @@ mod tests { offset: -1, counts: vec![1, 4, 1], }, - start_time: SystemTime::now(), - time: SystemTime::now(), negative_bucket: data::ExponentialBucket { offset: 0, counts: vec![], @@ -1378,6 +1408,8 @@ mod tests { zero_threshold: 0.0, zero_count: 0, }], + start_time: SystemTime::now(), + time: SystemTime::now(), }, want_count: 1, }, @@ -1385,7 +1417,7 @@ mod tests { name: "Cumulative Multiple ", build: Box::new(move || { box_val( - internal::AggregateBuilder::new(Some(Temporality::Cumulative), None) + internal::AggregateBuilder::new(Temporality::Cumulative, None) .exponential_bucket_histogram( max_size, max_scale, @@ -1414,8 +1446,6 @@ mod tests { counts: vec![1, 6, 2], }, attributes: vec![], - start_time: SystemTime::now(), - time: SystemTime::now(), negative_bucket: data::ExponentialBucket { offset: 0, counts: vec![], @@ -1424,6 +1454,8 @@ mod tests { zero_threshold: 0.0, zero_count: 0, }], + start_time: SystemTime::now(), + time: SystemTime::now(), }, want_count: 1, }, @@ -1434,6 +1466,8 @@ mod tests { let mut got: Box = Box::new(data::ExponentialHistogram:: { data_points: vec![], + start_time: SystemTime::now(), + time: SystemTime::now(), temporality: Temporality::Delta, }); let mut count = 0; @@ -1444,7 +1478,7 @@ mod tests { count = out_fn.call(Some(got.as_mut())).0 } - assert_aggregation_eq::(Box::new(test.want), got, true, test.name); + assert_aggregation_eq::(Box::new(test.want), got, test.name); assert_eq!(test.want_count, count, "{}", test.name); } } @@ -1452,7 +1486,6 @@ mod tests { fn assert_aggregation_eq( a: Box, b: Box, - ignore_timestamp: bool, test_name: &'static str, ) { assert_eq!( @@ -1471,13 +1504,7 @@ mod tests { test_name ); for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { - assert_data_points_eq( - a, - b, - ignore_timestamp, - "mismatching gauge data points", - test_name, - ); + assert_gauge_data_points_eq(a, b, "mismatching gauge data points", test_name); } } else if let Some(a) = a.as_any().downcast_ref::>() { let b = b.as_any().downcast_ref::>().unwrap(); @@ -1498,13 +1525,7 @@ mod tests { test_name ); for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { - assert_data_points_eq( - a, - b, - ignore_timestamp, - "mismatching sum data points", - test_name, - ); + assert_sum_data_points_eq(a, b, "mismatching sum data points", test_name); } } else if let Some(a) = a.as_any().downcast_ref::>() { let b = b.as_any().downcast_ref::>().unwrap(); @@ -1520,13 +1541,7 @@ mod tests { test_name ); for (a, b) in a.data_points.iter().zip(b.data_points.iter()) { - assert_hist_data_points_eq( - a, - b, - ignore_timestamp, - "mismatching hist data points", - test_name, - ); + assert_hist_data_points_eq(a, b, "mismatching hist data points", test_name); } } else if let Some(a) = a.as_any().downcast_ref::>() { let b = b @@ -1548,7 +1563,6 @@ mod tests { assert_exponential_hist_data_points_eq( a, b, - ignore_timestamp, "mismatching hist data points", test_name, ); @@ -1558,10 +1572,9 @@ mod tests { } } - fn assert_data_points_eq( - a: &data::DataPoint, - b: &data::DataPoint, - ignore_timestamp: bool, + fn assert_sum_data_points_eq( + a: &data::SumDataPoint, + b: &data::SumDataPoint, message: &'static str, test_name: &'static str, ) { @@ -1571,21 +1584,25 @@ mod tests { test_name, message ); assert_eq!(a.value, b.value, "{}: {} value", test_name, message); + } - if !ignore_timestamp { - assert_eq!( - a.start_time, b.start_time, - "{}: {} start time", - test_name, message - ); - assert_eq!(a.time, b.time, "{}: {} time", test_name, message); - } + fn assert_gauge_data_points_eq( + a: &data::GaugeDataPoint, + b: &data::GaugeDataPoint, + message: &'static str, + test_name: &'static str, + ) { + assert_eq!( + a.attributes, b.attributes, + "{}: {} attributes", + test_name, message + ); + assert_eq!(a.value, b.value, "{}: {} value", test_name, message); } fn assert_hist_data_points_eq( a: &data::HistogramDataPoint, b: &data::HistogramDataPoint, - ignore_timestamp: bool, message: &'static str, test_name: &'static str, ) { @@ -1604,21 +1621,11 @@ mod tests { assert_eq!(a.min, b.min, "{}: {} min", test_name, message); assert_eq!(a.max, b.max, "{}: {} max", test_name, message); assert_eq!(a.sum, b.sum, "{}: {} sum", test_name, message); - - if !ignore_timestamp { - assert_eq!( - a.start_time, b.start_time, - "{}: {} start time", - test_name, message - ); - assert_eq!(a.time, b.time, "{}: {} time", test_name, message); - } } fn assert_exponential_hist_data_points_eq( a: &data::ExponentialHistogramDataPoint, b: &data::ExponentialHistogramDataPoint, - ignore_timestamp: bool, message: &'static str, test_name: &'static str, ) { @@ -1649,14 +1656,5 @@ mod tests { "{}: {} neg", test_name, message ); - - if !ignore_timestamp { - assert_eq!( - a.start_time, b.start_time, - "{}: {} start time", - test_name, message - ); - assert_eq!(a.time, b.time, "{}: {} time", test_name, message); - } } } diff --git a/opentelemetry-sdk/src/metrics/internal/histogram.rs b/opentelemetry-sdk/src/metrics/internal/histogram.rs index 089415ba7c..c79f9e4899 100644 --- a/opentelemetry-sdk/src/metrics/internal/histogram.rs +++ b/opentelemetry-sdk/src/metrics/internal/histogram.rs @@ -1,47 +1,49 @@ -use std::collections::HashSet; -use std::sync::atomic::Ordering; +use std::mem::replace; +use std::ops::DerefMut; use std::sync::Arc; -use std::{sync::Mutex, time::SystemTime}; +use std::sync::Mutex; use crate::metrics::data::HistogramDataPoint; -use crate::metrics::data::{self, Aggregation, Temporality}; +use crate::metrics::data::{self, Aggregation}; +use crate::metrics::Temporality; use opentelemetry::KeyValue; -use super::Number; -use super::{AtomicTracker, AtomicallyUpdate, Operation, ValueMap}; - -struct HistogramUpdate; - -impl Operation for HistogramUpdate { - fn update_tracker>(tracker: &AT, value: T, index: usize) { - tracker.update_histogram(index, value); +use super::aggregate::AggregateTimeInitiator; +use super::aggregate::AttributeSetFilter; +use super::ComputeAggregation; +use super::Measure; +use super::ValueMap; +use super::{Aggregator, Number}; + +impl Aggregator for Mutex> +where + T: Number, +{ + type InitConfig = usize; + /// Value and bucket index + type PreComputedValue = (T, usize); + + fn update(&self, (value, index): (T, usize)) { + let mut buckets = self.lock().unwrap_or_else(|err| err.into_inner()); + + buckets.total += value; + buckets.count += 1; + buckets.counts[index] += 1; + if value < buckets.min { + buckets.min = value; + } + if value > buckets.max { + buckets.max = value + } } -} -struct HistogramTracker { - buckets: Mutex>, -} - -impl AtomicTracker for HistogramTracker { - fn update_histogram(&self, index: usize, value: T) { - let mut buckets = match self.buckets.lock() { - Ok(guard) => guard, - Err(_) => return, - }; - - buckets.bin(index, value); - buckets.sum(value); + fn create(count: &usize) -> Self { + Mutex::new(Buckets::::new(*count)) } -} -impl AtomicallyUpdate for HistogramTracker { - type AtomicTracker = HistogramTracker; - - fn new_atomic_tracker(buckets_count: Option) -> Self::AtomicTracker { - let count = buckets_count.unwrap(); - HistogramTracker { - buckets: Mutex::new(Buckets::::new(count)), - } + fn clone_and_reset(&self, count: &usize) -> Self { + let mut current = self.lock().unwrap_or_else(|err| err.into_inner()); + Mutex::new(replace(current.deref_mut(), Buckets::new(*count))) } } @@ -64,88 +66,57 @@ impl Buckets { ..Default::default() } } - - fn sum(&mut self, value: T) { - self.total += value; - } - - fn bin(&mut self, idx: usize, value: T) { - self.counts[idx] += 1; - self.count += 1; - if value < self.min { - self.min = value; - } - if value > self.max { - self.max = value - } - } - - fn reset(&mut self) { - for item in &mut self.counts { - *item = 0; - } - self.count = Default::default(); - self.total = Default::default(); - self.min = T::max(); - self.max = T::min(); - } } /// Summarizes a set of measurements as a histogram with explicitly defined /// buckets. pub(crate) struct Histogram { - value_map: ValueMap, T, HistogramUpdate>, + value_map: ValueMap>>, + init_time: AggregateTimeInitiator, + temporality: Temporality, + filter: AttributeSetFilter, bounds: Vec, record_min_max: bool, record_sum: bool, - start: Mutex, } impl Histogram { - pub(crate) fn new(boundaries: Vec, record_min_max: bool, record_sum: bool) -> Self { - let buckets_count = boundaries.len() + 1; - let mut histogram = Histogram { - value_map: ValueMap::new_with_buckets_count(buckets_count), - bounds: boundaries, + #[allow(unused_mut)] + pub(crate) fn new( + temporality: Temporality, + filter: AttributeSetFilter, + mut bounds: Vec, + record_min_max: bool, + record_sum: bool, + ) -> Self { + #[cfg(feature = "spec_unstable_metrics_views")] + { + // TODO: When views are used, validate this upfront + bounds.retain(|v| !v.is_nan()); + bounds.sort_by(|a, b| a.partial_cmp(b).expect("NaNs filtered out")); + } + + let buckets_count = bounds.len() + 1; + Histogram { + value_map: ValueMap::new(buckets_count), + init_time: AggregateTimeInitiator::default(), + temporality, + filter, + bounds, record_min_max, record_sum, - start: Mutex::new(SystemTime::now()), - }; - - histogram.bounds.retain(|v| !v.is_nan()); - histogram - .bounds - .sort_by(|a, b| a.partial_cmp(b).expect("NaNs filtered out")); - - histogram + } } - pub(crate) fn measure(&self, measurement: T, attrs: &[KeyValue]) { - let f = measurement.into_float(); - - // This search will return an index in the range `[0, bounds.len()]`, where - // it will return `bounds.len()` if value is greater than the last element - // of `bounds`. This aligns with the buckets in that the length of buckets - // is `bounds.len()+1`, with the last bucket representing: - // `(bounds[bounds.len()-1], +∞)`. - let index = self.bounds.partition_point(|&x| x < f); - self.value_map.measure(measurement, attrs, index); - } + fn delta(&self, dest: Option<&mut dyn Aggregation>) -> (usize, Option>) { + let time = self.init_time.delta(); - pub(crate) fn delta( - &self, - dest: Option<&mut dyn Aggregation>, - ) -> (usize, Option>) { - let t = SystemTime::now(); - let start = self - .start - .lock() - .map(|s| *s) - .unwrap_or_else(|_| SystemTime::now()); let h = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if h.is_none() { Some(data::Histogram { data_points: vec![], + start_time: time.start, + time: time.current, temporality: Temporality::Delta, }) } else { @@ -153,28 +124,17 @@ impl Histogram { }; let h = h.unwrap_or_else(|| new_agg.as_mut().expect("present if h is none")); h.temporality = Temporality::Delta; - h.data_points.clear(); - - // Max number of data points need to account for the special casing - // of the no attribute value + overflow attribute. - let n = self.value_map.count.load(Ordering::SeqCst) + 2; - if n > h.data_points.capacity() { - h.data_points.reserve_exact(n - h.data_points.capacity()); - } - - if self - .value_map - .has_no_attribute_value - .swap(false, Ordering::AcqRel) - { - if let Ok(ref mut b) = self.value_map.no_attribute_tracker.buckets.lock() { - h.data_points.push(HistogramDataPoint { - attributes: vec![], - start_time: start, - time: t, + h.start_time = time.start; + h.time = time.current; + + self.value_map + .collect_and_reset(&mut h.data_points, |attributes, aggr| { + let b = aggr.into_inner().unwrap_or_else(|err| err.into_inner()); + HistogramDataPoint { + attributes, count: b.count, bounds: self.bounds.clone(), - bucket_counts: b.counts.clone(), + bucket_counts: b.counts, sum: if self.record_sum { b.total } else { @@ -191,72 +151,23 @@ impl Histogram { None }, exemplars: vec![], - }); - - b.reset(); - } - } - - let mut trackers = match self.value_map.trackers.write() { - Ok(v) => v, - Err(_) => return (0, None), - }; - - let mut seen = HashSet::new(); - for (attrs, tracker) in trackers.drain() { - if seen.insert(Arc::as_ptr(&tracker)) { - if let Ok(b) = tracker.buckets.lock() { - h.data_points.push(HistogramDataPoint { - attributes: attrs.clone(), - start_time: start, - time: t, - count: b.count, - bounds: self.bounds.clone(), - bucket_counts: b.counts.clone(), - sum: if self.record_sum { - b.total - } else { - T::default() - }, - min: if self.record_min_max { - Some(b.min) - } else { - None - }, - max: if self.record_min_max { - Some(b.max) - } else { - None - }, - exemplars: vec![], - }); } - } - } - - // The delta collection cycle resets. - if let Ok(mut start) = self.start.lock() { - *start = t; - } - self.value_map.count.store(0, Ordering::SeqCst); + }); (h.data_points.len(), new_agg.map(|a| Box::new(a) as Box<_>)) } - pub(crate) fn cumulative( + fn cumulative( &self, dest: Option<&mut dyn Aggregation>, ) -> (usize, Option>) { - let t = SystemTime::now(); - let start = self - .start - .lock() - .map(|s| *s) - .unwrap_or_else(|_| SystemTime::now()); + let time = self.init_time.cumulative(); let h = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if h.is_none() { Some(data::Histogram { data_points: vec![], + start_time: time.start, + time: time.current, temporality: Temporality::Cumulative, }) } else { @@ -264,25 +175,14 @@ impl Histogram { }; let h = h.unwrap_or_else(|| new_agg.as_mut().expect("present if h is none")); h.temporality = Temporality::Cumulative; - h.data_points.clear(); - - // Max number of data points need to account for the special casing - // of the no attribute value + overflow attribute. - let n = self.value_map.count.load(Ordering::SeqCst) + 2; - if n > h.data_points.capacity() { - h.data_points.reserve_exact(n - h.data_points.capacity()); - } - - if self - .value_map - .has_no_attribute_value - .load(Ordering::Acquire) - { - if let Ok(b) = &self.value_map.no_attribute_tracker.buckets.lock() { - h.data_points.push(HistogramDataPoint { - attributes: vec![], - start_time: start, - time: t, + h.start_time = time.start; + h.time = time.current; + + self.value_map + .collect_readonly(&mut h.data_points, |attributes, aggr| { + let b = aggr.lock().unwrap_or_else(|err| err.into_inner()); + HistogramDataPoint { + attributes, count: b.count, bounds: self.bounds.clone(), bucket_counts: b.counts.clone(), @@ -302,51 +202,69 @@ impl Histogram { None }, exemplars: vec![], - }); - } - } + } + }); - let trackers = match self.value_map.trackers.write() { - Ok(v) => v, - Err(_) => return (0, None), - }; + (h.data_points.len(), new_agg.map(|a| Box::new(a) as Box<_>)) + } +} - // TODO: This will use an unbounded amount of memory if there - // are unbounded number of attribute sets being aggregated. Attribute - // sets that become "stale" need to be forgotten so this will not - // overload the system. - let mut seen = HashSet::new(); - for (attrs, tracker) in trackers.iter() { - if seen.insert(Arc::as_ptr(tracker)) { - if let Ok(b) = tracker.buckets.lock() { - h.data_points.push(HistogramDataPoint { - attributes: attrs.clone(), - start_time: start, - time: t, - count: b.count, - bounds: self.bounds.clone(), - bucket_counts: b.counts.clone(), - sum: if self.record_sum { - b.total - } else { - T::default() - }, - min: if self.record_min_max { - Some(b.min) - } else { - None - }, - max: if self.record_min_max { - Some(b.max) - } else { - None - }, - exemplars: vec![], - }); - } - } +impl Measure for Arc> +where + T: Number, +{ + fn call(&self, measurement: T, attrs: &[KeyValue]) { + let f = measurement.into_float(); + // This search will return an index in the range `[0, bounds.len()]`, where + // it will return `bounds.len()` if value is greater than the last element + // of `bounds`. This aligns with the buckets in that the length of buckets + // is `bounds.len()+1`, with the last bucket representing: + // `(bounds[bounds.len()-1], +∞)`. + let index = self.bounds.partition_point(|&x| x < f); + + self.filter.apply(attrs, |filtered| { + self.value_map.measure((measurement, index), filtered); + }) + } +} + +impl ComputeAggregation for Arc> +where + T: Number, +{ + fn call(&self, dest: Option<&mut dyn Aggregation>) -> (usize, Option>) { + match self.temporality { + Temporality::Delta => self.delta(dest), + _ => self.cumulative(dest), } + } +} - (h.data_points.len(), new_agg.map(|a| Box::new(a) as Box<_>)) +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn check_buckets_are_selected_correctly() { + let hist = Arc::new(Histogram::::new( + Temporality::Cumulative, + AttributeSetFilter::new(None), + vec![1.0, 3.0, 6.0], + false, + false, + )); + for v in 1..11 { + Measure::call(&hist, v, &[]); + } + let (count, dp) = ComputeAggregation::call(&hist, None); + let dp = dp.unwrap(); + let dp = dp.as_any().downcast_ref::>().unwrap(); + assert_eq!(count, 1); + assert_eq!(dp.data_points[0].count, 10); + assert_eq!(dp.data_points[0].bucket_counts.len(), 4); + assert_eq!(dp.data_points[0].bucket_counts[0], 1); // 1 + assert_eq!(dp.data_points[0].bucket_counts[1], 2); // 2, 3 + assert_eq!(dp.data_points[0].bucket_counts[2], 3); // 4, 5, 6 + assert_eq!(dp.data_points[0].bucket_counts[3], 4); // 7, 8, 9, 10 } } diff --git a/opentelemetry-sdk/src/metrics/internal/last_value.rs b/opentelemetry-sdk/src/metrics/internal/last_value.rs index d1eab4fada..8fbd92f44a 100644 --- a/opentelemetry-sdk/src/metrics/internal/last_value.rs +++ b/opentelemetry-sdk/src/metrics/internal/last_value.rs @@ -1,127 +1,152 @@ -use std::{ - collections::HashSet, - sync::{atomic::Ordering, Arc, Mutex}, - time::SystemTime, -}; +use std::sync::Arc; -use crate::metrics::data::DataPoint; +use crate::metrics::{ + data::{self, Aggregation, GaugeDataPoint}, + Temporality, +}; use opentelemetry::KeyValue; -use super::{Assign, AtomicTracker, Number, ValueMap}; +use super::{ + aggregate::{AggregateTimeInitiator, AttributeSetFilter}, + Aggregator, AtomicTracker, AtomicallyUpdate, ComputeAggregation, Measure, Number, ValueMap, +}; -/// Summarizes a set of measurements as the last one made. -pub(crate) struct LastValue { - value_map: ValueMap, - start: Mutex, +/// this is reused by PrecomputedSum +pub(crate) struct Assign +where + T: AtomicallyUpdate, +{ + pub(crate) value: T::AtomicTracker, } -impl LastValue { - pub(crate) fn new() -> Self { - LastValue { - value_map: ValueMap::new(), - start: Mutex::new(SystemTime::now()), +impl Aggregator for Assign +where + T: Number, +{ + type InitConfig = (); + type PreComputedValue = T; + + fn create(_init: &()) -> Self { + Self { + value: T::new_atomic_tracker(T::default()), } } - pub(crate) fn measure(&self, measurement: T, attrs: &[KeyValue]) { - // The argument index is not applicable to LastValue. - self.value_map.measure(measurement, attrs, 0); + fn update(&self, value: T) { + self.value.store(value) } - pub(crate) fn compute_aggregation_delta(&self, dest: &mut Vec>) { - let t = SystemTime::now(); - let prev_start = self.start.lock().map(|start| *start).unwrap_or(t); - dest.clear(); - - // Max number of data points need to account for the special casing - // of the no attribute value + overflow attribute. - let n = self.value_map.count.load(Ordering::SeqCst) + 2; - if n > dest.capacity() { - dest.reserve_exact(n - dest.capacity()); + fn clone_and_reset(&self, _: &()) -> Self { + Self { + value: T::new_atomic_tracker(self.value.get_and_reset_value()), } + } +} - if self - .value_map - .has_no_attribute_value - .swap(false, Ordering::AcqRel) - { - dest.push(DataPoint { - attributes: vec![], - start_time: Some(prev_start), - time: Some(t), - value: self.value_map.no_attribute_tracker.get_and_reset_value(), - exemplars: vec![], - }); +/// Summarizes a set of measurements as the last one made. +pub(crate) struct LastValue { + value_map: ValueMap>, + init_time: AggregateTimeInitiator, + temporality: Temporality, + filter: AttributeSetFilter, +} + +impl LastValue { + pub(crate) fn new(temporality: Temporality, filter: AttributeSetFilter) -> Self { + LastValue { + value_map: ValueMap::new(()), + init_time: AggregateTimeInitiator::default(), + temporality, + filter, } + } - let mut trackers = match self.value_map.trackers.write() { - Ok(v) => v, - _ => return, + pub(crate) fn delta( + &self, + dest: Option<&mut dyn Aggregation>, + ) -> (usize, Option>) { + let time = self.init_time.delta(); + + let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); + let mut new_agg = if s_data.is_none() { + Some(data::Gauge { + data_points: vec![], + start_time: Some(time.start), + time: time.current, + }) + } else { + None }; + let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); + s_data.start_time = Some(time.start); + s_data.time = time.current; + + self.value_map + .collect_and_reset(&mut s_data.data_points, |attributes, aggr| GaugeDataPoint { + attributes, + value: aggr.value.get_value(), + exemplars: vec![], + }); - let mut seen = HashSet::new(); - for (attrs, tracker) in trackers.drain() { - if seen.insert(Arc::as_ptr(&tracker)) { - dest.push(DataPoint { - attributes: attrs.clone(), - start_time: Some(prev_start), - time: Some(t), - value: tracker.get_value(), - exemplars: vec![], - }); - } - } - - // The delta collection cycle resets. - if let Ok(mut start) = self.start.lock() { - *start = t; - } - self.value_map.count.store(0, Ordering::SeqCst); + ( + s_data.data_points.len(), + new_agg.map(|a| Box::new(a) as Box<_>), + ) } - pub(crate) fn compute_aggregation_cumulative(&self, dest: &mut Vec>) { - let t = SystemTime::now(); - let prev_start = self.start.lock().map(|start| *start).unwrap_or(t); + pub(crate) fn cumulative( + &self, + dest: Option<&mut dyn Aggregation>, + ) -> (usize, Option>) { + let time = self.init_time.cumulative(); + let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); + let mut new_agg = if s_data.is_none() { + Some(data::Gauge { + data_points: vec![], + start_time: Some(time.start), + time: time.current, + }) + } else { + None + }; + let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); - dest.clear(); + s_data.start_time = Some(time.start); + s_data.time = time.current; - // Max number of data points need to account for the special casing - // of the no attribute value + overflow attribute. - let n = self.value_map.count.load(Ordering::SeqCst) + 2; - if n > dest.capacity() { - dest.reserve_exact(n - dest.capacity()); - } - - if self - .value_map - .has_no_attribute_value - .load(Ordering::Acquire) - { - dest.push(DataPoint { - attributes: vec![], - start_time: Some(prev_start), - time: Some(t), - value: self.value_map.no_attribute_tracker.get_value(), + self.value_map + .collect_readonly(&mut s_data.data_points, |attributes, aggr| GaugeDataPoint { + attributes, + value: aggr.value.get_value(), exemplars: vec![], }); - } - let trackers = match self.value_map.trackers.write() { - Ok(v) => v, - _ => return, - }; + ( + s_data.data_points.len(), + new_agg.map(|a| Box::new(a) as Box<_>), + ) + } +} + +impl Measure for Arc> +where + T: Number, +{ + fn call(&self, measurement: T, attrs: &[KeyValue]) { + self.filter.apply(attrs, |filtered| { + self.value_map.measure(measurement, filtered); + }) + } +} - let mut seen = HashSet::new(); - for (attrs, tracker) in trackers.iter() { - if seen.insert(Arc::as_ptr(tracker)) { - dest.push(DataPoint { - attributes: attrs.clone(), - start_time: Some(prev_start), - time: Some(t), - value: tracker.get_value(), - exemplars: vec![], - }); - } +impl ComputeAggregation for Arc> +where + T: Number, +{ + fn call(&self, dest: Option<&mut dyn Aggregation>) -> (usize, Option>) { + match self.temporality { + Temporality::Delta => self.delta(dest), + _ => self.cumulative(dest), } } } diff --git a/opentelemetry-sdk/src/metrics/internal/mod.rs b/opentelemetry-sdk/src/metrics/internal/mod.rs index abc691b2fc..1b5a6a4de5 100644 --- a/opentelemetry-sdk/src/metrics/internal/mod.rs +++ b/opentelemetry-sdk/src/metrics/internal/mod.rs @@ -6,97 +6,95 @@ mod precomputed_sum; mod sum; use core::fmt; -use std::collections::HashMap; -use std::marker::PhantomData; -use std::ops::{Add, AddAssign, Sub}; +use std::collections::{HashMap, HashSet}; +use std::mem::swap; +use std::ops::{Add, AddAssign, DerefMut, Sub}; use std::sync::atomic::{AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering}; -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, OnceLock, RwLock}; -use aggregate::is_under_cardinality_limit; +use aggregate::{is_under_cardinality_limit, STREAM_CARDINALITY_LIMIT}; pub(crate) use aggregate::{AggregateBuilder, ComputeAggregation, Measure}; pub(crate) use exponential_histogram::{EXPO_MAX_SCALE, EXPO_MIN_SCALE}; -use once_cell::sync::Lazy; -use opentelemetry::metrics::MetricsError; -use opentelemetry::{global, KeyValue}; +use opentelemetry::{otel_warn, KeyValue}; -use crate::metrics::AttributeSet; +// TODO Replace it with LazyLock once it is stable +pub(crate) static STREAM_OVERFLOW_ATTRIBUTES: OnceLock> = OnceLock::new(); -pub(crate) static STREAM_OVERFLOW_ATTRIBUTES: Lazy> = - Lazy::new(|| vec![KeyValue::new("otel.metric.overflow", "true")]); - -/// Abstracts the update operation for a measurement. -pub(crate) trait Operation { - fn update_tracker>(tracker: &AT, value: T, index: usize); +#[inline] +fn stream_overflow_attributes() -> &'static Vec { + STREAM_OVERFLOW_ATTRIBUTES.get_or_init(|| vec![KeyValue::new("otel.metric.overflow", "true")]) } -struct Increment; +pub(crate) trait Aggregator { + /// A static configuration that is needed in order to initialize aggregator. + /// E.g. bucket_size at creation time . + type InitConfig; -impl Operation for Increment { - fn update_tracker>(tracker: &AT, value: T, _: usize) { - tracker.add(value); - } -} + /// Some aggregators can do some computations before updating aggregator. + /// This helps to reduce contention for aggregators because it makes + /// [`Aggregator::update`] as short as possible. + type PreComputedValue; -struct Assign; + /// Called everytime a new attribute-set is stored. + fn create(init: &Self::InitConfig) -> Self; -impl Operation for Assign { - fn update_tracker>(tracker: &AT, value: T, _: usize) { - tracker.store(value); - } + /// Called for each measurement. + fn update(&self, value: Self::PreComputedValue); + + /// Return current value and reset this instance + fn clone_and_reset(&self, init: &Self::InitConfig) -> Self; } /// The storage for sums. /// /// This structure is parametrized by an `Operation` that indicates how /// updates to the underlying value trackers should be performed. -pub(crate) struct ValueMap, T: Number, O> { +pub(crate) struct ValueMap +where + A: Aggregator, +{ /// Trackers store the values associated with different attribute sets. - trackers: RwLock, Arc>>, + trackers: RwLock, Arc>>, + + /// Used ONLY by Delta collect. The data type must match the one used in + /// `trackers` to allow mem::swap. Wrapping the type in `OnceLock` to + /// avoid this allocation for Cumulative aggregation. + trackers_for_collect: OnceLock, Arc>>>, + /// Number of different attribute set stored in the `trackers` map. count: AtomicUsize, /// Indicates whether a value with no attributes has been stored. has_no_attribute_value: AtomicBool, /// Tracker for values with no attributes attached. - no_attribute_tracker: AU::AtomicTracker, - /// Buckets Count is only used by Histogram. - buckets_count: Option, - phantom: PhantomData, -} - -impl, T: Number, O> Default for ValueMap { - fn default() -> Self { - ValueMap::new() - } + no_attribute_tracker: A, + /// Configuration for an Aggregator + config: A::InitConfig, } -impl, T: Number, O> ValueMap { - fn new() -> Self { +impl ValueMap +where + A: Aggregator, +{ + fn new(config: A::InitConfig) -> Self { ValueMap { - trackers: RwLock::new(HashMap::new()), + trackers: RwLock::new(HashMap::with_capacity(1 + STREAM_CARDINALITY_LIMIT)), + trackers_for_collect: OnceLock::new(), has_no_attribute_value: AtomicBool::new(false), - no_attribute_tracker: AU::new_atomic_tracker(None), + no_attribute_tracker: A::create(&config), count: AtomicUsize::new(0), - buckets_count: None, - phantom: PhantomData, + config, } } - fn new_with_buckets_count(buckets_count: usize) -> Self { - ValueMap { - trackers: RwLock::new(HashMap::new()), - has_no_attribute_value: AtomicBool::new(false), - no_attribute_tracker: AU::new_atomic_tracker(Some(buckets_count)), - count: AtomicUsize::new(0), - buckets_count: Some(buckets_count), - phantom: PhantomData, - } + #[inline] + fn trackers_for_collect(&self) -> &RwLock, Arc>> { + self.trackers_for_collect + .get_or_init(|| RwLock::new(HashMap::with_capacity(1 + STREAM_CARDINALITY_LIMIT))) } -} -impl, T: Number, O: Operation> ValueMap { - fn measure(&self, measurement: T, attributes: &[KeyValue], index: usize) { + fn measure(&self, value: A::PreComputedValue, attributes: &[KeyValue]) { if attributes.is_empty() { - O::update_tracker(&self.no_attribute_tracker, measurement, index); + self.no_attribute_tracker.update(value); self.has_no_attribute_value.store(true, Ordering::Release); return; } @@ -107,14 +105,14 @@ impl, T: Number, O: Operation> ValueMap { // Try to retrieve and update the tracker with the attributes in the provided order first if let Some(tracker) = trackers.get(attributes) { - O::update_tracker(&**tracker, measurement, index); + tracker.update(value); return; } // Try to retrieve and update the tracker with the attributes sorted. - let sorted_attrs = AttributeSet::from(attributes).into_vec(); + let sorted_attrs = sort_and_dedup(attributes); if let Some(tracker) = trackers.get(sorted_attrs.as_slice()) { - O::update_tracker(&**tracker, measurement, index); + tracker.update(value); return; } @@ -128,47 +126,120 @@ impl, T: Number, O: Operation> ValueMap { // Recheck both the provided and sorted orders after acquiring the write lock // in case another thread has pushed an update in the meantime. if let Some(tracker) = trackers.get(attributes) { - O::update_tracker(&**tracker, measurement, index); + tracker.update(value); } else if let Some(tracker) = trackers.get(sorted_attrs.as_slice()) { - O::update_tracker(&**tracker, measurement, index); + tracker.update(value); } else if is_under_cardinality_limit(self.count.load(Ordering::SeqCst)) { - let new_tracker = Arc::new(AU::new_atomic_tracker(self.buckets_count)); - O::update_tracker(&*new_tracker, measurement, index); + let new_tracker = Arc::new(A::create(&self.config)); + new_tracker.update(value); // Insert tracker with the attributes in the provided and sorted orders trackers.insert(attributes.to_vec(), new_tracker.clone()); trackers.insert(sorted_attrs, new_tracker); self.count.fetch_add(1, Ordering::SeqCst); - } else if let Some(overflow_value) = trackers.get(STREAM_OVERFLOW_ATTRIBUTES.as_slice()) { - O::update_tracker(&**overflow_value, measurement, index); + } else if let Some(overflow_value) = trackers.get(stream_overflow_attributes().as_slice()) { + overflow_value.update(value); + } else { + let new_tracker = A::create(&self.config); + new_tracker.update(value); + trackers.insert(stream_overflow_attributes().clone(), Arc::new(new_tracker)); + otel_warn!( name: "ValueMap.measure", + message = "Maximum data points for metric stream exceeded. Entry added to overflow. Subsequent overflows to same metric until next collect will not be logged." + ); + } + } + + /// Iterate through all attribute sets and populate `DataPoints` in readonly mode. + /// This is used in Cumulative temporality mode, where [`ValueMap`] is not cleared. + pub(crate) fn collect_readonly(&self, dest: &mut Vec, mut map_fn: MapFn) + where + MapFn: FnMut(Vec, &A) -> Res, + { + prepare_data(dest, self.count.load(Ordering::SeqCst)); + if self.has_no_attribute_value.load(Ordering::Acquire) { + dest.push(map_fn(vec![], &self.no_attribute_tracker)); + } + + let Ok(trackers) = self.trackers.read() else { + return; + }; + + let mut seen = HashSet::new(); + for (attrs, tracker) in trackers.iter() { + if seen.insert(Arc::as_ptr(tracker)) { + dest.push(map_fn(attrs.clone(), tracker)); + } + } + } + + /// Iterate through all attribute sets, populate `DataPoints` and reset. + /// This is used in Delta temporality mode, where [`ValueMap`] is reset after collection. + pub(crate) fn collect_and_reset(&self, dest: &mut Vec, mut map_fn: MapFn) + where + MapFn: FnMut(Vec, A) -> Res, + { + prepare_data(dest, self.count.load(Ordering::SeqCst)); + if self.has_no_attribute_value.swap(false, Ordering::AcqRel) { + dest.push(map_fn( + vec![], + self.no_attribute_tracker.clone_and_reset(&self.config), + )); + } + + if let Ok(mut trackers_collect) = self.trackers_for_collect().write() { + if let Ok(mut trackers_current) = self.trackers.write() { + swap(trackers_collect.deref_mut(), trackers_current.deref_mut()); + self.count.store(0, Ordering::SeqCst); + } else { + otel_warn!(name: "MeterProvider.InternalError", message = "Metric collection failed. Report this issue in OpenTelemetry repo.", details ="ValueMap trackers lock poisoned"); + return; + } + + let mut seen = HashSet::new(); + for (attrs, tracker) in trackers_collect.drain() { + if seen.insert(Arc::as_ptr(&tracker)) { + dest.push(map_fn(attrs, tracker.clone_and_reset(&self.config))); + } + } } else { - let new_tracker = AU::new_atomic_tracker(self.buckets_count); - O::update_tracker(&new_tracker, measurement, index); - trackers.insert(STREAM_OVERFLOW_ATTRIBUTES.clone(), Arc::new(new_tracker)); - global::handle_error(MetricsError::Other("Warning: Maximum data points for metric stream exceeded. Entry added to overflow. Subsequent overflows to same metric until next collect will not be logged.".into())); + otel_warn!(name: "MeterProvider.InternalError", message = "Metric collection failed. Report this issue in OpenTelemetry repo.", details ="ValueMap trackers for collect lock poisoned"); } } } +/// Clear and allocate exactly required amount of space for all attribute-sets +fn prepare_data(data: &mut Vec, list_len: usize) { + data.clear(); + let total_len = list_len + 2; // to account for no_attributes case + overflow state + if total_len > data.capacity() { + data.reserve_exact(total_len - data.capacity()); + } +} + +fn sort_and_dedup(attributes: &[KeyValue]) -> Vec { + // Use newly allocated vec here as incoming attributes are immutable so + // cannot sort/de-dup in-place. TODO: This allocation can be avoided by + // leveraging a ThreadLocal vec. + let mut sorted = attributes.to_vec(); + sorted.sort_unstable_by(|a, b| a.key.cmp(&b.key)); + sorted.dedup_by(|a, b| a.key == b.key); + sorted +} + /// Marks a type that can have a value added and retrieved atomically. Required since /// different types have different backing atomic mechanisms -pub(crate) trait AtomicTracker: Sync + Send + 'static { - fn store(&self, _value: T) {} - fn add(&self, _value: T) {} - fn get_value(&self) -> T { - T::default() - } - fn get_and_reset_value(&self) -> T { - T::default() - } - fn update_histogram(&self, _index: usize, _value: T) {} +pub(crate) trait AtomicTracker: Sync + Send + 'static { + fn store(&self, _value: T); + fn add(&self, _value: T); + fn get_value(&self) -> T; + fn get_and_reset_value(&self) -> T; } /// Marks a type that can have an atomic tracker generated for it -pub(crate) trait AtomicallyUpdate { +pub(crate) trait AtomicallyUpdate { type AtomicTracker: AtomicTracker; - fn new_atomic_tracker(buckets_count: Option) -> Self::AtomicTracker; + fn new_atomic_tracker(init: T) -> Self::AtomicTracker; } pub(crate) trait Number: @@ -255,8 +326,8 @@ impl AtomicTracker for AtomicU64 { impl AtomicallyUpdate for u64 { type AtomicTracker = AtomicU64; - fn new_atomic_tracker(_: Option) -> Self::AtomicTracker { - AtomicU64::new(0) + fn new_atomic_tracker(init: u64) -> Self::AtomicTracker { + AtomicU64::new(init) } } @@ -281,8 +352,8 @@ impl AtomicTracker for AtomicI64 { impl AtomicallyUpdate for i64 { type AtomicTracker = AtomicI64; - fn new_atomic_tracker(_: Option) -> Self::AtomicTracker { - AtomicI64::new(0) + fn new_atomic_tracker(init: i64) -> Self::AtomicTracker { + AtomicI64::new(init) } } @@ -291,10 +362,10 @@ pub(crate) struct F64AtomicTracker { } impl F64AtomicTracker { - fn new() -> Self { - let zero_as_u64 = 0.0_f64.to_bits(); + fn new(init: f64) -> Self { + let value_as_u64 = init.to_bits(); F64AtomicTracker { - inner: AtomicU64::new(zero_as_u64), + inner: AtomicU64::new(value_as_u64), } } } @@ -343,8 +414,8 @@ impl AtomicTracker for F64AtomicTracker { impl AtomicallyUpdate for f64 { type AtomicTracker = F64AtomicTracker; - fn new_atomic_tracker(_: Option) -> Self::AtomicTracker { - F64AtomicTracker::new() + fn new_atomic_tracker(init: f64) -> Self::AtomicTracker { + F64AtomicTracker::new(init) } } @@ -354,7 +425,7 @@ mod tests { #[test] fn can_store_u64_atomic_value() { - let atomic = u64::new_atomic_tracker(None); + let atomic = u64::new_atomic_tracker(0); let atomic_tracker = &atomic as &dyn AtomicTracker; let value = atomic.get_value(); @@ -367,7 +438,7 @@ mod tests { #[test] fn can_add_and_get_u64_atomic_value() { - let atomic = u64::new_atomic_tracker(None); + let atomic = u64::new_atomic_tracker(0); atomic.add(15); atomic.add(10); @@ -377,7 +448,7 @@ mod tests { #[test] fn can_reset_u64_atomic_value() { - let atomic = u64::new_atomic_tracker(None); + let atomic = u64::new_atomic_tracker(0); atomic.add(15); let value = atomic.get_and_reset_value(); @@ -389,7 +460,7 @@ mod tests { #[test] fn can_store_i64_atomic_value() { - let atomic = i64::new_atomic_tracker(None); + let atomic = i64::new_atomic_tracker(0); let atomic_tracker = &atomic as &dyn AtomicTracker; let value = atomic.get_value(); @@ -406,7 +477,7 @@ mod tests { #[test] fn can_add_and_get_i64_atomic_value() { - let atomic = i64::new_atomic_tracker(None); + let atomic = i64::new_atomic_tracker(0); atomic.add(15); atomic.add(-10); @@ -416,7 +487,7 @@ mod tests { #[test] fn can_reset_i64_atomic_value() { - let atomic = i64::new_atomic_tracker(None); + let atomic = i64::new_atomic_tracker(0); atomic.add(15); let value = atomic.get_and_reset_value(); @@ -428,7 +499,7 @@ mod tests { #[test] fn can_store_f64_atomic_value() { - let atomic = f64::new_atomic_tracker(None); + let atomic = f64::new_atomic_tracker(0.0); let atomic_tracker = &atomic as &dyn AtomicTracker; let value = atomic.get_value(); @@ -445,7 +516,7 @@ mod tests { #[test] fn can_add_and_get_f64_atomic_value() { - let atomic = f64::new_atomic_tracker(None); + let atomic = f64::new_atomic_tracker(0.0); atomic.add(15.3); atomic.add(10.4); @@ -456,7 +527,7 @@ mod tests { #[test] fn can_reset_f64_atomic_value() { - let atomic = f64::new_atomic_tracker(None); + let atomic = f64::new_atomic_tracker(0.0); atomic.add(15.5); let value = atomic.get_and_reset_value(); diff --git a/opentelemetry-sdk/src/metrics/internal/precomputed_sum.rs b/opentelemetry-sdk/src/metrics/internal/precomputed_sum.rs index 060c7baaa6..6421d85f94 100644 --- a/opentelemetry-sdk/src/metrics/internal/precomputed_sum.rs +++ b/opentelemetry-sdk/src/metrics/internal/precomputed_sum.rs @@ -1,48 +1,52 @@ use opentelemetry::KeyValue; -use crate::metrics::data::{self, Aggregation, DataPoint, Temporality}; +use crate::metrics::data::{self, Aggregation, SumDataPoint}; +use crate::metrics::Temporality; -use super::{Assign, AtomicTracker, Number, ValueMap}; -use std::{ - collections::{HashMap, HashSet}, - sync::{atomic::Ordering, Arc, Mutex}, - time::SystemTime, -}; +use super::aggregate::{AggregateTimeInitiator, AttributeSetFilter}; +use super::{last_value::Assign, AtomicTracker, Number, ValueMap}; +use super::{ComputeAggregation, Measure}; +use std::sync::Arc; +use std::{collections::HashMap, sync::Mutex}; /// Summarizes a set of pre-computed sums as their arithmetic sum. pub(crate) struct PrecomputedSum { - value_map: ValueMap, + value_map: ValueMap>, + init_time: AggregateTimeInitiator, + temporality: Temporality, + filter: AttributeSetFilter, monotonic: bool, - start: Mutex, reported: Mutex, T>>, } impl PrecomputedSum { - pub(crate) fn new(monotonic: bool) -> Self { + pub(crate) fn new( + temporality: Temporality, + filter: AttributeSetFilter, + monotonic: bool, + ) -> Self { PrecomputedSum { - value_map: ValueMap::new(), + value_map: ValueMap::new(()), + init_time: AggregateTimeInitiator::default(), + temporality, + filter, monotonic, - start: Mutex::new(SystemTime::now()), reported: Mutex::new(Default::default()), } } - pub(crate) fn measure(&self, measurement: T, attrs: &[KeyValue]) { - // The argument index is not applicable to PrecomputedSum. - self.value_map.measure(measurement, attrs, 0); - } - pub(crate) fn delta( &self, dest: Option<&mut dyn Aggregation>, ) -> (usize, Option>) { - let t = SystemTime::now(); - let prev_start = self.start.lock().map(|start| *start).unwrap_or(t); + let time = self.init_time.delta(); let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if s_data.is_none() { Some(data::Sum { data_points: vec![], + start_time: time.start, + time: time.current, temporality: Temporality::Delta, is_monotonic: self.monotonic, }) @@ -50,68 +54,28 @@ impl PrecomputedSum { None }; let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); - s_data.data_points.clear(); + s_data.start_time = time.start; + s_data.time = time.current; s_data.temporality = Temporality::Delta; s_data.is_monotonic = self.monotonic; - // Max number of data points need to account for the special casing - // of the no attribute value + overflow attribute. - let n = self.value_map.count.load(Ordering::SeqCst) + 2; - if n > s_data.data_points.capacity() { - s_data - .data_points - .reserve_exact(n - s_data.data_points.capacity()); - } - let mut new_reported = HashMap::with_capacity(n); let mut reported = match self.reported.lock() { Ok(r) => r, Err(_) => return (0, None), }; - - if self - .value_map - .has_no_attribute_value - .swap(false, Ordering::AcqRel) - { - let value = self.value_map.no_attribute_tracker.get_value(); - let delta = value - *reported.get(&vec![]).unwrap_or(&T::default()); - new_reported.insert(vec![], value); - - s_data.data_points.push(DataPoint { - attributes: vec![], - start_time: Some(prev_start), - time: Some(t), - value: delta, - exemplars: vec![], - }); - } - - let mut trackers = match self.value_map.trackers.write() { - Ok(v) => v, - Err(_) => return (0, None), - }; - - let mut seen = HashSet::new(); - for (attrs, tracker) in trackers.drain() { - if seen.insert(Arc::as_ptr(&tracker)) { - let value = tracker.get_value(); - let delta = value - *reported.get(&attrs).unwrap_or(&T::default()); - new_reported.insert(attrs.clone(), value); - s_data.data_points.push(DataPoint { - attributes: attrs.clone(), - start_time: Some(prev_start), - time: Some(t), + let mut new_reported = HashMap::with_capacity(reported.len()); + + self.value_map + .collect_and_reset(&mut s_data.data_points, |attributes, aggr| { + let value = aggr.value.get_value(); + new_reported.insert(attributes.clone(), value); + let delta = value - *reported.get(&attributes).unwrap_or(&T::default()); + SumDataPoint { + attributes, value: delta, exemplars: vec![], - }); - } - } - - // The delta collection cycle resets. - if let Ok(mut start) = self.start.lock() { - *start = t; - } - self.value_map.count.store(0, Ordering::SeqCst); + } + }); *reported = new_reported; drop(reported); // drop before values guard is dropped @@ -126,13 +90,14 @@ impl PrecomputedSum { &self, dest: Option<&mut dyn Aggregation>, ) -> (usize, Option>) { - let t = SystemTime::now(); - let prev_start = self.start.lock().map(|start| *start).unwrap_or(t); + let time = self.init_time.cumulative(); let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if s_data.is_none() { Some(data::Sum { data_points: vec![], + start_time: time.start, + time: time.current, temporality: Temporality::Cumulative, is_monotonic: self.monotonic, }) @@ -140,50 +105,17 @@ impl PrecomputedSum { None }; let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); - s_data.data_points.clear(); + s_data.start_time = time.start; + s_data.time = time.current; s_data.temporality = Temporality::Cumulative; s_data.is_monotonic = self.monotonic; - // Max number of data points need to account for the special casing - // of the no attribute value + overflow attribute. - let n = self.value_map.count.load(Ordering::SeqCst) + 2; - if n > s_data.data_points.capacity() { - s_data - .data_points - .reserve_exact(n - s_data.data_points.capacity()); - } - - if self - .value_map - .has_no_attribute_value - .load(Ordering::Acquire) - { - s_data.data_points.push(DataPoint { - attributes: vec![], - start_time: Some(prev_start), - time: Some(t), - value: self.value_map.no_attribute_tracker.get_value(), + self.value_map + .collect_readonly(&mut s_data.data_points, |attributes, aggr| SumDataPoint { + attributes, + value: aggr.value.get_value(), exemplars: vec![], }); - } - - let trackers = match self.value_map.trackers.write() { - Ok(v) => v, - Err(_) => return (0, None), - }; - - let mut seen = HashSet::new(); - for (attrs, tracker) in trackers.iter() { - if seen.insert(Arc::as_ptr(tracker)) { - s_data.data_points.push(DataPoint { - attributes: attrs.clone(), - start_time: Some(prev_start), - time: Some(t), - value: tracker.get_value(), - exemplars: vec![], - }); - } - } ( s_data.data_points.len(), @@ -191,3 +123,26 @@ impl PrecomputedSum { ) } } + +impl Measure for Arc> +where + T: Number, +{ + fn call(&self, measurement: T, attrs: &[KeyValue]) { + self.filter.apply(attrs, |filtered| { + self.value_map.measure(measurement, filtered); + }) + } +} + +impl ComputeAggregation for Arc> +where + T: Number, +{ + fn call(&self, dest: Option<&mut dyn Aggregation>) -> (usize, Option>) { + match self.temporality { + Temporality::Delta => self.delta(dest), + _ => self.cumulative(dest), + } + } +} diff --git a/opentelemetry-sdk/src/metrics/internal/sum.rs b/opentelemetry-sdk/src/metrics/internal/sum.rs index 66af75734d..7de2f7d2b5 100644 --- a/opentelemetry-sdk/src/metrics/internal/sum.rs +++ b/opentelemetry-sdk/src/metrics/internal/sum.rs @@ -1,20 +1,52 @@ -use std::collections::HashSet; -use std::sync::atomic::Ordering; use std::sync::Arc; use std::vec; -use std::{sync::Mutex, time::SystemTime}; -use crate::metrics::data::{self, Aggregation, DataPoint, Temporality}; +use crate::metrics::data::{self, Aggregation, SumDataPoint}; +use crate::metrics::Temporality; use opentelemetry::KeyValue; -use super::{AtomicTracker, Number}; -use super::{Increment, ValueMap}; +use super::aggregate::{AggregateTimeInitiator, AttributeSetFilter}; +use super::{Aggregator, AtomicTracker, ComputeAggregation, Measure, Number}; +use super::{AtomicallyUpdate, ValueMap}; + +struct Increment +where + T: AtomicallyUpdate, +{ + value: T::AtomicTracker, +} + +impl Aggregator for Increment +where + T: Number, +{ + type InitConfig = (); + type PreComputedValue = T; + + fn create(_init: &()) -> Self { + Self { + value: T::new_atomic_tracker(T::default()), + } + } + + fn update(&self, value: T) { + self.value.add(value) + } + + fn clone_and_reset(&self, _: &()) -> Self { + Self { + value: T::new_atomic_tracker(self.value.get_and_reset_value()), + } + } +} /// Summarizes a set of measurements made as their arithmetic sum. pub(crate) struct Sum { - value_map: ValueMap, + value_map: ValueMap>, + init_time: AggregateTimeInitiator, + temporality: Temporality, + filter: AttributeSetFilter, monotonic: bool, - start: Mutex, } impl Sum { @@ -23,29 +55,31 @@ impl Sum { /// /// Each sum is scoped by attributes and the aggregation cycle the measurements /// were made in. - pub(crate) fn new(monotonic: bool) -> Self { + pub(crate) fn new( + temporality: Temporality, + filter: AttributeSetFilter, + monotonic: bool, + ) -> Self { Sum { - value_map: ValueMap::new(), + value_map: ValueMap::new(()), + init_time: AggregateTimeInitiator::default(), + temporality, + filter, monotonic, - start: Mutex::new(SystemTime::now()), } } - pub(crate) fn measure(&self, measurement: T, attrs: &[KeyValue]) { - // The argument index is not applicable to Sum. - self.value_map.measure(measurement, attrs, 0); - } - pub(crate) fn delta( &self, dest: Option<&mut dyn Aggregation>, ) -> (usize, Option>) { - let t = SystemTime::now(); - + let time = self.init_time.delta(); let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if s_data.is_none() { Some(data::Sum { data_points: vec![], + start_time: time.start, + time: time.current, temporality: Temporality::Delta, is_monotonic: self.monotonic, }) @@ -53,57 +87,17 @@ impl Sum { None }; let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); + s_data.start_time = time.start; + s_data.time = time.current; s_data.temporality = Temporality::Delta; s_data.is_monotonic = self.monotonic; - s_data.data_points.clear(); - - // Max number of data points need to account for the special casing - // of the no attribute value + overflow attribute. - let n = self.value_map.count.load(Ordering::SeqCst) + 2; - if n > s_data.data_points.capacity() { - s_data - .data_points - .reserve_exact(n - s_data.data_points.capacity()); - } - let prev_start = self.start.lock().map(|start| *start).unwrap_or(t); - if self - .value_map - .has_no_attribute_value - .swap(false, Ordering::AcqRel) - { - s_data.data_points.push(DataPoint { - attributes: vec![], - start_time: Some(prev_start), - time: Some(t), - value: self.value_map.no_attribute_tracker.get_and_reset_value(), + self.value_map + .collect_and_reset(&mut s_data.data_points, |attributes, aggr| SumDataPoint { + attributes, + value: aggr.value.get_value(), exemplars: vec![], }); - } - - let mut trackers = match self.value_map.trackers.write() { - Ok(v) => v, - Err(_) => return (0, None), - }; - - let mut seen = HashSet::new(); - for (attrs, tracker) in trackers.drain() { - if seen.insert(Arc::as_ptr(&tracker)) { - s_data.data_points.push(DataPoint { - attributes: attrs.clone(), - start_time: Some(prev_start), - time: Some(t), - value: tracker.get_value(), - exemplars: vec![], - }); - } - } - - // The delta collection cycle resets. - if let Ok(mut start) = self.start.lock() { - *start = t; - } - self.value_map.count.store(0, Ordering::SeqCst); ( s_data.data_points.len(), @@ -115,12 +109,13 @@ impl Sum { &self, dest: Option<&mut dyn Aggregation>, ) -> (usize, Option>) { - let t = SystemTime::now(); - + let time = self.init_time.cumulative(); let s_data = dest.and_then(|d| d.as_mut().downcast_mut::>()); let mut new_agg = if s_data.is_none() { Some(data::Sum { data_points: vec![], + start_time: time.start, + time: time.current, temporality: Temporality::Cumulative, is_monotonic: self.monotonic, }) @@ -128,56 +123,18 @@ impl Sum { None }; let s_data = s_data.unwrap_or_else(|| new_agg.as_mut().expect("present if s_data is none")); + + s_data.start_time = time.start; + s_data.time = time.current; s_data.temporality = Temporality::Cumulative; s_data.is_monotonic = self.monotonic; - s_data.data_points.clear(); - - // Max number of data points need to account for the special casing - // of the no attribute value + overflow attribute. - let n = self.value_map.count.load(Ordering::SeqCst) + 2; - if n > s_data.data_points.capacity() { - s_data - .data_points - .reserve_exact(n - s_data.data_points.capacity()); - } - let prev_start = self.start.lock().map(|start| *start).unwrap_or(t); - - if self - .value_map - .has_no_attribute_value - .load(Ordering::Acquire) - { - s_data.data_points.push(DataPoint { - attributes: vec![], - start_time: Some(prev_start), - time: Some(t), - value: self.value_map.no_attribute_tracker.get_value(), + self.value_map + .collect_readonly(&mut s_data.data_points, |attributes, aggr| SumDataPoint { + attributes, + value: aggr.value.get_value(), exemplars: vec![], }); - } - - let trackers = match self.value_map.trackers.write() { - Ok(v) => v, - Err(_) => return (0, None), - }; - - // TODO: This will use an unbounded amount of memory if there - // are unbounded number of attribute sets being aggregated. Attribute - // sets that become "stale" need to be forgotten so this will not - // overload the system. - let mut seen = HashSet::new(); - for (attrs, tracker) in trackers.iter() { - if seen.insert(Arc::as_ptr(tracker)) { - s_data.data_points.push(DataPoint { - attributes: attrs.clone(), - start_time: Some(prev_start), - time: Some(t), - value: tracker.get_value(), - exemplars: vec![], - }); - } - } ( s_data.data_points.len(), @@ -185,3 +142,26 @@ impl Sum { ) } } + +impl Measure for Arc> +where + T: Number, +{ + fn call(&self, measurement: T, attrs: &[KeyValue]) { + self.filter.apply(attrs, |filtered| { + self.value_map.measure(measurement, filtered); + }) + } +} + +impl ComputeAggregation for Arc> +where + T: Number, +{ + fn call(&self, dest: Option<&mut dyn Aggregation>) -> (usize, Option>) { + match self.temporality { + Temporality::Delta => self.delta(dest), + _ => self.cumulative(dest), + } + } +} diff --git a/opentelemetry-sdk/src/metrics/manual_reader.rs b/opentelemetry-sdk/src/metrics/manual_reader.rs index c3e7860e64..652bf19a35 100644 --- a/opentelemetry-sdk/src/metrics/manual_reader.rs +++ b/opentelemetry-sdk/src/metrics/manual_reader.rs @@ -3,16 +3,14 @@ use std::{ sync::{Mutex, Weak}, }; -use opentelemetry::{ - global, - metrics::{MetricsError, Result}, -}; +use opentelemetry::otel_debug; + +use crate::metrics::{MetricError, MetricResult, Temporality}; use super::{ - data::{ResourceMetrics, Temporality}, - instrument::InstrumentKind, + data::ResourceMetrics, pipeline::Pipeline, - reader::{DefaultTemporalitySelector, MetricReader, SdkProducer, TemporalitySelector}, + reader::{MetricReader, SdkProducer}, }; /// A simple [MetricReader] that allows an application to read metrics on demand. @@ -29,8 +27,8 @@ use super::{ /// # drop(reader) /// ``` pub struct ManualReader { - inner: Box>, - temporality_selector: Box, + inner: Mutex, + temporality: Temporality, } impl Default for ManualReader { @@ -58,23 +56,17 @@ impl ManualReader { } /// A [MetricReader] which is directly called to collect metrics. - pub(crate) fn new(temporality_selector: Box) -> Self { + pub(crate) fn new(temporality: Temporality) -> Self { ManualReader { - inner: Box::new(Mutex::new(ManualReaderInner { + inner: Mutex::new(ManualReaderInner { sdk_producer: None, is_shutdown: false, - })), - temporality_selector, + }), + temporality, } } } -impl TemporalitySelector for ManualReader { - fn temporality(&self, kind: InstrumentKind) -> Temporality { - self.temporality_selector.temporality(kind) - } -} - impl MetricReader for ManualReader { /// Register a pipeline which enables the caller to read metrics from the SDK /// on demand. @@ -84,9 +76,9 @@ impl MetricReader for ManualReader { if inner.sdk_producer.is_none() { inner.sdk_producer = Some(pipeline); } else { - global::handle_error(MetricsError::Config( - "duplicate reader registration, did not register manual reader".into(), - )) + otel_debug!( + name: "ManualReader.DuplicateRegistration", + message = "The pipeline is already registered to the Reader. Registering pipeline multiple times is not allowed."); } }); } @@ -95,12 +87,12 @@ impl MetricReader for ManualReader { /// callbacks necessary and returning the results. /// /// Returns an error if called after shutdown. - fn collect(&self, rm: &mut ResourceMetrics) -> Result<()> { + fn collect(&self, rm: &mut ResourceMetrics) -> MetricResult<()> { let inner = self.inner.lock()?; match &inner.sdk_producer.as_ref().and_then(|w| w.upgrade()) { Some(producer) => producer.produce(rm)?, None => { - return Err(MetricsError::Other( + return Err(MetricError::Other( "reader is shut down or not registered".into(), )) } @@ -110,12 +102,12 @@ impl MetricReader for ManualReader { } /// ForceFlush is a no-op, it always returns nil. - fn force_flush(&self) -> Result<()> { + fn force_flush(&self) -> MetricResult<()> { Ok(()) } /// Closes any connections and frees any resources used by the reader. - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { let mut inner = self.inner.lock()?; // Any future call to collect will now return an error. @@ -124,11 +116,16 @@ impl MetricReader for ManualReader { Ok(()) } + + fn temporality(&self, kind: super::InstrumentKind) -> Temporality { + kind.temporality_preference(self.temporality) + } } /// Configuration for a [ManualReader] +#[derive(Default)] pub struct ManualReaderBuilder { - temporality_selector: Box, + temporality: Temporality, } impl fmt::Debug for ManualReaderBuilder { @@ -137,33 +134,20 @@ impl fmt::Debug for ManualReaderBuilder { } } -impl Default for ManualReaderBuilder { - fn default() -> Self { - ManualReaderBuilder { - temporality_selector: Box::new(DefaultTemporalitySelector { _private: () }), - } - } -} - impl ManualReaderBuilder { /// New manual builder configuration pub fn new() -> Self { Default::default() } - /// Sets the [TemporalitySelector] a reader will use to determine the [Temporality] of - /// an instrument based on its kind. If this option is not used, the reader will use - /// the default temporality selector. - pub fn with_temporality_selector( - mut self, - temporality_selector: impl TemporalitySelector + 'static, - ) -> Self { - self.temporality_selector = Box::new(temporality_selector); + /// Set the [Temporality] of the exporter. + pub fn with_temporality(mut self, temporality: Temporality) -> Self { + self.temporality = temporality; self } /// Create a new [ManualReader] from this configuration. pub fn build(self) -> ManualReader { - ManualReader::new(self.temporality_selector) + ManualReader::new(self.temporality) } } diff --git a/opentelemetry-sdk/src/metrics/meter.rs b/opentelemetry-sdk/src/metrics/meter.rs index 7c2df90e5b..e644623e34 100644 --- a/opentelemetry-sdk/src/metrics/meter.rs +++ b/opentelemetry-sdk/src/metrics/meter.rs @@ -2,21 +2,23 @@ use core::fmt; use std::{borrow::Cow, sync::Arc}; use opentelemetry::{ - global, metrics::{ - noop::NoopAsyncInstrument, Callback, Counter, Gauge, Histogram, InstrumentProvider, - MetricsError, ObservableCounter, ObservableGauge, ObservableUpDownCounter, Result, + AsyncInstrumentBuilder, Counter, Gauge, Histogram, HistogramBuilder, InstrumentBuilder, + InstrumentProvider, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, }, + otel_error, InstrumentationScope, }; -use crate::instrumentation::Scope; use crate::metrics::{ instrument::{Instrument, InstrumentKind, Observable, ResolvedMeasures}, internal::{self, Number}, pipeline::{Pipelines, Resolver}, + MetricError, MetricResult, }; +use super::noop::NoopSyncInstrument; + // maximum length of instrument name const INSTRUMENT_NAME_MAX_LENGTH: usize = 255; // maximum length of instrument unit name @@ -43,16 +45,15 @@ const INSTRUMENT_UNIT_INVALID_CHAR: &str = "characters in instrument unit must b /// /// [Meter API]: opentelemetry::metrics::Meter pub(crate) struct SdkMeter { - scope: Scope, + scope: InstrumentationScope, pipes: Arc, u64_resolver: Resolver, i64_resolver: Resolver, f64_resolver: Resolver, - validation_policy: InstrumentValidationPolicy, } impl SdkMeter { - pub(crate) fn new(scope: Scope, pipes: Arc) -> Self { + pub(crate) fn new(scope: InstrumentationScope, pipes: Arc) -> Self { let view_cache = Default::default(); SdkMeter { @@ -61,405 +62,551 @@ impl SdkMeter { u64_resolver: Resolver::new(Arc::clone(&pipes), Arc::clone(&view_cache)), i64_resolver: Resolver::new(Arc::clone(&pipes), Arc::clone(&view_cache)), f64_resolver: Resolver::new(pipes, view_cache), - validation_policy: InstrumentValidationPolicy::HandleGlobalAndIgnore, } } - #[cfg(test)] - fn with_validation_policy(self, validation_policy: InstrumentValidationPolicy) -> Self { - Self { - validation_policy, - ..self + fn create_counter( + &self, + builder: InstrumentBuilder<'_, Counter>, + resolver: &InstrumentResolver<'_, T>, + ) -> Counter + where + T: Number, + { + let validation_result = validate_instrument_config(builder.name.as_ref(), &builder.unit); + if let Err(err) = validation_result { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Measurements from this Counter will be ignored.", + reason = format!("{}", err) + ); + return Counter::new(Arc::new(NoopSyncInstrument::new())); } - } -} -#[doc(hidden)] -impl InstrumentProvider for SdkMeter { - fn u64_counter( - &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.u64_resolver); - p.lookup(InstrumentKind::Counter, name, description, unit) + match resolver + .lookup( + InstrumentKind::Counter, + builder.name.clone(), + builder.description, + builder.unit, + None, + ) .map(|i| Counter::new(Arc::new(i))) + { + Ok(counter) => counter, + Err(err) => { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Measurements from this Counter will be ignored.", + reason = format!("{}", err) + ); + Counter::new(Arc::new(NoopSyncInstrument::new())) + } + } } - fn f64_counter( + fn create_observable_counter( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.f64_resolver); - p.lookup(InstrumentKind::Counter, name, description, unit) - .map(|i| Counter::new(Arc::new(i))) - } + builder: AsyncInstrumentBuilder<'_, ObservableCounter, T>, + resolver: &InstrumentResolver<'_, T>, + ) -> ObservableCounter + where + T: Number, + { + let validation_result = validate_instrument_config(builder.name.as_ref(), &builder.unit); + if let Err(err) = validation_result { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Callbacks for this ObservableCounter will not be invoked.", + reason = format!("{}", err)); + return ObservableCounter::new(); + } - fn u64_observable_counter( - &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - callbacks: Vec>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.u64_resolver); - let ms = p.measures( + match resolver.measures( InstrumentKind::ObservableCounter, - name.clone(), - description.clone(), - unit.clone(), - )?; - if ms.is_empty() { - return Ok(ObservableCounter::new(Arc::new(NoopAsyncInstrument::new()))); - } + builder.name.clone(), + builder.description, + builder.unit, + None, + ) { + Ok(ms) => { + if ms.is_empty() { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Callbacks for this ObservableCounter will not be invoked. Check View Configuration." + ); + return ObservableCounter::new(); + } - let observable = Arc::new(Observable::new(ms)); + let observable = Arc::new(Observable::new(ms)); - for callback in callbacks { - let cb_inst = Arc::clone(&observable); - self.pipes - .register_callback(move || callback(cb_inst.as_ref())); - } + for callback in builder.callbacks { + let cb_inst = Arc::clone(&observable); + self.pipes + .register_callback(move || callback(cb_inst.as_ref())); + } - Ok(ObservableCounter::new(observable)) + ObservableCounter::new() + } + Err(err) => { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Callbacks for this ObservableCounter will not be invoked.", + reason = format!("{}", err)); + ObservableCounter::new() + } + } } - fn f64_observable_counter( + fn create_observable_updown_counter( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - callbacks: Vec>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.f64_resolver); - let ms = p.measures( - InstrumentKind::ObservableCounter, - name.clone(), - description.clone(), - unit.clone(), - )?; - if ms.is_empty() { - return Ok(ObservableCounter::new(Arc::new(NoopAsyncInstrument::new()))); + builder: AsyncInstrumentBuilder<'_, ObservableUpDownCounter, T>, + resolver: &InstrumentResolver<'_, T>, + ) -> ObservableUpDownCounter + where + T: Number, + { + let validation_result = validate_instrument_config(builder.name.as_ref(), &builder.unit); + if let Err(err) = validation_result { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Callbacks for this ObservableUpDownCounter will not be invoked.", + reason = format!("{}", err)); + return ObservableUpDownCounter::new(); } - let observable = Arc::new(Observable::new(ms)); - for callback in callbacks { - let cb_inst = Arc::clone(&observable); - self.pipes - .register_callback(move || callback(cb_inst.as_ref())); - } + match resolver.measures( + InstrumentKind::ObservableUpDownCounter, + builder.name.clone(), + builder.description, + builder.unit, + None, + ) { + Ok(ms) => { + if ms.is_empty() { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Callbacks for this ObservableUpDownCounter will not be invoked. Check View Configuration." + ); + return ObservableUpDownCounter::new(); + } + + let observable = Arc::new(Observable::new(ms)); + + for callback in builder.callbacks { + let cb_inst = Arc::clone(&observable); + self.pipes + .register_callback(move || callback(cb_inst.as_ref())); + } - Ok(ObservableCounter::new(observable)) + ObservableUpDownCounter::new() + } + Err(err) => { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Callbacks for this ObservableUpDownCounter will not be invoked.", + reason = format!("{}", err)); + ObservableUpDownCounter::new() + } + } } - fn i64_up_down_counter( + fn create_observable_gauge( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.i64_resolver); - p.lookup(InstrumentKind::UpDownCounter, name, description, unit) - .map(|i| UpDownCounter::new(Arc::new(i))) + builder: AsyncInstrumentBuilder<'_, ObservableGauge, T>, + resolver: &InstrumentResolver<'_, T>, + ) -> ObservableGauge + where + T: Number, + { + let validation_result = validate_instrument_config(builder.name.as_ref(), &builder.unit); + if let Err(err) = validation_result { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Callbacks for this ObservableGauge will not be invoked.", + reason = format!("{}", err)); + return ObservableGauge::new(); + } + + match resolver.measures( + InstrumentKind::ObservableGauge, + builder.name.clone(), + builder.description, + builder.unit, + None, + ) { + Ok(ms) => { + if ms.is_empty() { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Callbacks for this ObservableGauge will not be invoked. Check View Configuration." + ); + return ObservableGauge::new(); + } + + let observable = Arc::new(Observable::new(ms)); + + for callback in builder.callbacks { + let cb_inst = Arc::clone(&observable); + self.pipes + .register_callback(move || callback(cb_inst.as_ref())); + } + + ObservableGauge::new() + } + Err(err) => { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Callbacks for this ObservableGauge will not be invoked.", + reason = format!("{}", err)); + ObservableGauge::new() + } + } } - fn f64_up_down_counter( + fn create_updown_counter( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.f64_resolver); - p.lookup(InstrumentKind::UpDownCounter, name, description, unit) + builder: InstrumentBuilder<'_, UpDownCounter>, + resolver: &InstrumentResolver<'_, T>, + ) -> UpDownCounter + where + T: Number, + { + let validation_result = validate_instrument_config(builder.name.as_ref(), &builder.unit); + if let Err(err) = validation_result { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Measurements from this UpDownCounter will be ignored.", + reason = format!("{}", err) + ); + return UpDownCounter::new(Arc::new(NoopSyncInstrument::new())); + } + + match resolver + .lookup( + InstrumentKind::UpDownCounter, + builder.name.clone(), + builder.description, + builder.unit, + None, + ) .map(|i| UpDownCounter::new(Arc::new(i))) + { + Ok(updown_counter) => updown_counter, + Err(err) => { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Measurements from this UpDownCounter will be ignored.", + reason = format!("{}", err) + ); + UpDownCounter::new(Arc::new(NoopSyncInstrument::new())) + } + } } - fn i64_observable_up_down_counter( + fn create_gauge( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - callbacks: Vec>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.i64_resolver); - let ms = p.measures( - InstrumentKind::ObservableUpDownCounter, - name.clone(), - description.clone(), - unit.clone(), - )?; - if ms.is_empty() { - return Ok(ObservableUpDownCounter::new(Arc::new( - NoopAsyncInstrument::new(), - ))); + builder: InstrumentBuilder<'_, Gauge>, + resolver: &InstrumentResolver<'_, T>, + ) -> Gauge + where + T: Number, + { + let validation_result = validate_instrument_config(builder.name.as_ref(), &builder.unit); + if let Err(err) = validation_result { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Measurements from this Gauge will be ignored.", + reason = format!("{}", err) + ); + return Gauge::new(Arc::new(NoopSyncInstrument::new())); } - let observable = Arc::new(Observable::new(ms)); - - for callback in callbacks { - let cb_inst = Arc::clone(&observable); - self.pipes - .register_callback(move || callback(cb_inst.as_ref())); + match resolver + .lookup( + InstrumentKind::Gauge, + builder.name.clone(), + builder.description, + builder.unit, + None, + ) + .map(|i| Gauge::new(Arc::new(i))) + { + Ok(gauge) => gauge, + Err(err) => { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Measurements from this Gauge will be ignored.", + reason = format!("{}", err) + ); + Gauge::new(Arc::new(NoopSyncInstrument::new())) + } } - - Ok(ObservableUpDownCounter::new(observable)) } - fn f64_observable_up_down_counter( + fn create_histogram( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - callbacks: Vec>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.f64_resolver); - let ms = p.measures( - InstrumentKind::ObservableUpDownCounter, - name.clone(), - description.clone(), - unit.clone(), - )?; - if ms.is_empty() { - return Ok(ObservableUpDownCounter::new(Arc::new( - NoopAsyncInstrument::new(), - ))); + builder: HistogramBuilder<'_, Histogram>, + resolver: &InstrumentResolver<'_, T>, + ) -> Histogram + where + T: Number, + { + let validation_result = validate_instrument_config(builder.name.as_ref(), &builder.unit); + if let Err(err) = validation_result { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Measurements from this Histogram will be ignored.", + reason = format!("{}", err) + ); + return Histogram::new(Arc::new(NoopSyncInstrument::new())); } - let observable = Arc::new(Observable::new(ms)); + if let Some(ref boundaries) = builder.boundaries { + let validation_result = validate_bucket_boundaries(boundaries); + if let Err(err) = validation_result { + // TODO: Include the buckets too in the error message. + // TODO: This validation is not done when Views are used to + // provide boundaries, and that should be fixed. + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Measurements from this Histogram will be ignored.", + reason = format!("{}", err) + ); + return Histogram::new(Arc::new(NoopSyncInstrument::new())); + } + } - for callback in callbacks { - let cb_inst = Arc::clone(&observable); - self.pipes - .register_callback(move || callback(cb_inst.as_ref())); + match resolver + .lookup( + InstrumentKind::Histogram, + builder.name.clone(), + builder.description, + builder.unit, + builder.boundaries, + ) + .map(|i| Histogram::new(Arc::new(i))) + { + Ok(histogram) => histogram, + Err(err) => { + otel_error!( + name: "InstrumentCreationFailed", + meter_name = self.scope.name(), + instrument_name = builder.name.as_ref(), + message = "Measurements from this Histogram will be ignored.", + reason = format!("{}", err) + ); + Histogram::new(Arc::new(NoopSyncInstrument::new())) + } } + } +} - Ok(ObservableUpDownCounter::new(observable)) +#[doc(hidden)] +impl InstrumentProvider for SdkMeter { + fn u64_counter(&self, builder: InstrumentBuilder<'_, Counter>) -> Counter { + let resolver = InstrumentResolver::new(self, &self.u64_resolver); + self.create_counter(builder, &resolver) } - fn u64_gauge( - &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.u64_resolver); - p.lookup(InstrumentKind::Gauge, name, description, unit) - .map(|i| Gauge::new(Arc::new(i))) + fn f64_counter(&self, builder: InstrumentBuilder<'_, Counter>) -> Counter { + let resolver = InstrumentResolver::new(self, &self.f64_resolver); + self.create_counter(builder, &resolver) } - fn f64_gauge( + fn u64_observable_counter( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.f64_resolver); - p.lookup(InstrumentKind::Gauge, name, description, unit) - .map(|i| Gauge::new(Arc::new(i))) + builder: AsyncInstrumentBuilder<'_, ObservableCounter, u64>, + ) -> ObservableCounter { + let resolver = InstrumentResolver::new(self, &self.u64_resolver); + self.create_observable_counter(builder, &resolver) } - fn i64_gauge( + fn f64_observable_counter( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.i64_resolver); - p.lookup(InstrumentKind::Gauge, name, description, unit) - .map(|i| Gauge::new(Arc::new(i))) + builder: AsyncInstrumentBuilder<'_, ObservableCounter, f64>, + ) -> ObservableCounter { + let resolver = InstrumentResolver::new(self, &self.f64_resolver); + self.create_observable_counter(builder, &resolver) } - fn u64_observable_gauge( + fn i64_up_down_counter( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - callbacks: Vec>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.u64_resolver); - let ms = p.measures( - InstrumentKind::ObservableGauge, - name.clone(), - description.clone(), - unit.clone(), - )?; - if ms.is_empty() { - return Ok(ObservableGauge::new(Arc::new(NoopAsyncInstrument::new()))); - } - - let observable = Arc::new(Observable::new(ms)); + builder: InstrumentBuilder<'_, UpDownCounter>, + ) -> UpDownCounter { + let resolver = InstrumentResolver::new(self, &self.i64_resolver); + self.create_updown_counter(builder, &resolver) + } - for callback in callbacks { - let cb_inst = Arc::clone(&observable); - self.pipes - .register_callback(move || callback(cb_inst.as_ref())); - } + fn f64_up_down_counter( + &self, + builder: InstrumentBuilder<'_, UpDownCounter>, + ) -> UpDownCounter { + let resolver = InstrumentResolver::new(self, &self.f64_resolver); + self.create_updown_counter(builder, &resolver) + } - Ok(ObservableGauge::new(observable)) + fn i64_observable_up_down_counter( + &self, + builder: AsyncInstrumentBuilder<'_, ObservableUpDownCounter, i64>, + ) -> ObservableUpDownCounter { + let resolver = InstrumentResolver::new(self, &self.i64_resolver); + self.create_observable_updown_counter(builder, &resolver) } - fn i64_observable_gauge( + fn f64_observable_up_down_counter( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - callbacks: Vec>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.i64_resolver); - let ms = p.measures( - InstrumentKind::ObservableGauge, - name.clone(), - description.clone(), - unit.clone(), - )?; - if ms.is_empty() { - return Ok(ObservableGauge::new(Arc::new(NoopAsyncInstrument::new()))); - } + builder: AsyncInstrumentBuilder<'_, ObservableUpDownCounter, f64>, + ) -> ObservableUpDownCounter { + let resolver = InstrumentResolver::new(self, &self.f64_resolver); + self.create_observable_updown_counter(builder, &resolver) + } - let observable = Arc::new(Observable::new(ms)); + fn u64_gauge(&self, builder: InstrumentBuilder<'_, Gauge>) -> Gauge { + let resolver = InstrumentResolver::new(self, &self.u64_resolver); + self.create_gauge(builder, &resolver) + } - for callback in callbacks { - let cb_inst = Arc::clone(&observable); - self.pipes - .register_callback(move || callback(cb_inst.as_ref())); - } + fn f64_gauge(&self, builder: InstrumentBuilder<'_, Gauge>) -> Gauge { + let resolver = InstrumentResolver::new(self, &self.f64_resolver); + self.create_gauge(builder, &resolver) + } - Ok(ObservableGauge::new(observable)) + fn i64_gauge(&self, builder: InstrumentBuilder<'_, Gauge>) -> Gauge { + let resolver = InstrumentResolver::new(self, &self.i64_resolver); + self.create_gauge(builder, &resolver) } - fn f64_observable_gauge( + fn u64_observable_gauge( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - callbacks: Vec>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.f64_resolver); - let ms = p.measures( - InstrumentKind::ObservableGauge, - name.clone(), - description.clone(), - unit.clone(), - )?; - if ms.is_empty() { - return Ok(ObservableGauge::new(Arc::new(NoopAsyncInstrument::new()))); - } - - let observable = Arc::new(Observable::new(ms)); - - for callback in callbacks { - let cb_inst = Arc::clone(&observable); - self.pipes - .register_callback(move || callback(cb_inst.as_ref())); - } - - Ok(ObservableGauge::new(observable)) + builder: AsyncInstrumentBuilder<'_, ObservableGauge, u64>, + ) -> ObservableGauge { + let resolver = InstrumentResolver::new(self, &self.u64_resolver); + self.create_observable_gauge(builder, &resolver) } - fn f64_histogram( + fn i64_observable_gauge( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.f64_resolver); - p.lookup(InstrumentKind::Histogram, name, description, unit) - .map(|i| Histogram::new(Arc::new(i))) + builder: AsyncInstrumentBuilder<'_, ObservableGauge, i64>, + ) -> ObservableGauge { + let resolver = InstrumentResolver::new(self, &self.i64_resolver); + self.create_observable_gauge(builder, &resolver) } - fn u64_histogram( + fn f64_observable_gauge( &self, - name: Cow<'static, str>, - description: Option>, - unit: Option>, - ) -> Result> { - validate_instrument_config(name.as_ref(), &unit, self.validation_policy)?; - let p = InstrumentResolver::new(self, &self.u64_resolver); - p.lookup(InstrumentKind::Histogram, name, description, unit) - .map(|i| Histogram::new(Arc::new(i))) + builder: AsyncInstrumentBuilder<'_, ObservableGauge, f64>, + ) -> ObservableGauge { + let resolver = InstrumentResolver::new(self, &self.f64_resolver); + self.create_observable_gauge(builder, &resolver) + } + + fn f64_histogram(&self, builder: HistogramBuilder<'_, Histogram>) -> Histogram { + let resolver = InstrumentResolver::new(self, &self.f64_resolver); + self.create_histogram(builder, &resolver) + } + + fn u64_histogram(&self, builder: HistogramBuilder<'_, Histogram>) -> Histogram { + let resolver = InstrumentResolver::new(self, &self.u64_resolver); + self.create_histogram(builder, &resolver) } } -/// Validation policy for instrument -#[derive(Clone, Copy)] -enum InstrumentValidationPolicy { - HandleGlobalAndIgnore, - /// Currently only for test - #[cfg(test)] - Strict, +fn validate_instrument_config(name: &str, unit: &Option>) -> MetricResult<()> { + validate_instrument_name(name).and_then(|_| validate_instrument_unit(unit)) } -fn validate_instrument_config( - name: &str, - unit: &Option>, - policy: InstrumentValidationPolicy, -) -> Result<()> { - match validate_instrument_name(name).and_then(|_| validate_instrument_unit(unit)) { - Ok(_) => Ok(()), - Err(err) => match policy { - InstrumentValidationPolicy::HandleGlobalAndIgnore => { - global::handle_error(err); - Ok(()) - } - #[cfg(test)] - InstrumentValidationPolicy::Strict => Err(err), - }, +fn validate_bucket_boundaries(boundaries: &[f64]) -> MetricResult<()> { + // Validate boundaries do not contain f64::NAN, f64::INFINITY, or f64::NEG_INFINITY + for boundary in boundaries { + if boundary.is_nan() || boundary.is_infinite() { + return Err(MetricError::InvalidInstrumentConfiguration( + "Bucket boundaries must not contain NaN, +Inf, or -Inf", + )); + } + } + + // validate that buckets are sorted and non-duplicate + for i in 1..boundaries.len() { + if boundaries[i] <= boundaries[i - 1] { + return Err(MetricError::InvalidInstrumentConfiguration( + "Bucket boundaries must be sorted and non-duplicate", + )); + } } + + Ok(()) } -fn validate_instrument_name(name: &str) -> Result<()> { +fn validate_instrument_name(name: &str) -> MetricResult<()> { if name.is_empty() { - return Err(MetricsError::InvalidInstrumentConfiguration( + return Err(MetricError::InvalidInstrumentConfiguration( INSTRUMENT_NAME_EMPTY, )); } if name.len() > INSTRUMENT_NAME_MAX_LENGTH { - return Err(MetricsError::InvalidInstrumentConfiguration( + return Err(MetricError::InvalidInstrumentConfiguration( INSTRUMENT_NAME_LENGTH, )); } if name.starts_with(|c: char| !c.is_ascii_alphabetic()) { - return Err(MetricsError::InvalidInstrumentConfiguration( + return Err(MetricError::InvalidInstrumentConfiguration( INSTRUMENT_NAME_FIRST_ALPHABETIC, )); } if name.contains(|c: char| { !c.is_ascii_alphanumeric() && !INSTRUMENT_NAME_ALLOWED_NON_ALPHANUMERIC_CHARS.contains(&c) }) { - return Err(MetricsError::InvalidInstrumentConfiguration( + return Err(MetricError::InvalidInstrumentConfiguration( INSTRUMENT_NAME_INVALID_CHAR, )); } Ok(()) } -fn validate_instrument_unit(unit: &Option>) -> Result<()> { +fn validate_instrument_unit(unit: &Option>) -> MetricResult<()> { if let Some(unit) = unit { if unit.len() > INSTRUMENT_UNIT_NAME_MAX_LENGTH { - return Err(MetricsError::InvalidInstrumentConfiguration( + return Err(MetricError::InvalidInstrumentConfiguration( INSTRUMENT_UNIT_LENGTH, )); } if unit.contains(|c: char| !c.is_ascii()) { - return Err(MetricsError::InvalidInstrumentConfiguration( + return Err(MetricError::InvalidInstrumentConfiguration( INSTRUMENT_UNIT_INVALID_CHAR, )); } @@ -494,8 +641,9 @@ where name: Cow<'static, str>, description: Option>, unit: Option>, - ) -> Result> { - let aggregators = self.measures(kind, name, description, unit)?; + boundaries: Option>, + ) -> MetricResult> { + let aggregators = self.measures(kind, name, description, unit, boundaries)?; Ok(ResolvedMeasures { measures: aggregators, }) @@ -507,7 +655,8 @@ where name: Cow<'static, str>, description: Option>, unit: Option>, - ) -> Result>>> { + boundaries: Option>, + ) -> MetricResult>>> { let inst = Instrument { name, description: description.unwrap_or_default(), @@ -516,45 +665,24 @@ where scope: self.meter.scope.clone(), }; - self.resolve.measures(inst) + self.resolve.measures(inst, boundaries) } } #[cfg(test)] mod tests { - use std::sync::Arc; + use std::borrow::Cow; - use opentelemetry::metrics::{InstrumentProvider, MeterProvider, MetricsError}; + use crate::metrics::MetricError; use super::{ - InstrumentValidationPolicy, SdkMeter, INSTRUMENT_NAME_FIRST_ALPHABETIC, + validate_instrument_name, validate_instrument_unit, INSTRUMENT_NAME_FIRST_ALPHABETIC, INSTRUMENT_NAME_INVALID_CHAR, INSTRUMENT_NAME_LENGTH, INSTRUMENT_UNIT_INVALID_CHAR, INSTRUMENT_UNIT_LENGTH, }; - use crate::{ - metrics::{pipeline::Pipelines, SdkMeterProvider}, - Resource, Scope, - }; - - #[test] - #[ignore = "See issue https://github.com/open-telemetry/opentelemetry-rust/issues/1699"] - fn test_instrument_creation() { - let provider = SdkMeterProvider::builder().build(); - let meter = provider.meter("test"); - assert!(meter.u64_counter("test").try_init().is_ok()); - let result = meter.u64_counter("test with invalid name").try_init(); - // this assert fails, as result is always ok variant. - assert!(result.is_err()); - } #[test] - fn test_instrument_config_validation() { - // scope and pipelines are not related to test - let meter = SdkMeter::new( - Scope::default(), - Arc::new(Pipelines::new(Resource::default(), Vec::new(), Vec::new())), - ) - .with_validation_policy(InstrumentValidationPolicy::Strict); + fn instrument_name_validation() { // (name, expected error) let instrument_name_test_cases = vec![ ("validateName", ""), @@ -568,71 +696,23 @@ mod tests { ("allow.dots.ok", ""), ]; for (name, expected_error) in instrument_name_test_cases { - let assert = |result: Result<_, MetricsError>| { + let assert = |result: Result<_, MetricError>| { if expected_error.is_empty() { assert!(result.is_ok()); } else { assert!(matches!( result.unwrap_err(), - MetricsError::InvalidInstrumentConfiguration(msg) if msg == expected_error + MetricError::InvalidInstrumentConfiguration(msg) if msg == expected_error )); } }; - assert(meter.u64_counter(name.into(), None, None).map(|_| ())); - assert(meter.f64_counter(name.into(), None, None).map(|_| ())); - assert( - meter - .u64_observable_counter(name.into(), None, None, Vec::new()) - .map(|_| ()), - ); - assert( - meter - .f64_observable_counter(name.into(), None, None, Vec::new()) - .map(|_| ()), - ); - assert( - meter - .i64_up_down_counter(name.into(), None, None) - .map(|_| ()), - ); - assert( - meter - .f64_up_down_counter(name.into(), None, None) - .map(|_| ()), - ); - assert( - meter - .i64_observable_up_down_counter(name.into(), None, None, Vec::new()) - .map(|_| ()), - ); - assert( - meter - .f64_observable_up_down_counter(name.into(), None, None, Vec::new()) - .map(|_| ()), - ); - assert(meter.u64_gauge(name.into(), None, None).map(|_| ())); - assert(meter.f64_gauge(name.into(), None, None).map(|_| ())); - assert(meter.i64_gauge(name.into(), None, None).map(|_| ())); - assert( - meter - .u64_observable_gauge(name.into(), None, None, Vec::new()) - .map(|_| ()), - ); - assert( - meter - .i64_observable_gauge(name.into(), None, None, Vec::new()) - .map(|_| ()), - ); - assert( - meter - .f64_observable_gauge(name.into(), None, None, Vec::new()) - .map(|_| ()), - ); - assert(meter.f64_histogram(name.into(), None, None).map(|_| ())); - assert(meter.u64_histogram(name.into(), None, None).map(|_| ())); + assert(validate_instrument_name(name).map(|_| ())); } + } + #[test] + fn instrument_unit_validation() { // (unit, expected error) let instrument_unit_test_cases = vec![ ( @@ -647,82 +727,19 @@ mod tests { ]; for (unit, expected_error) in instrument_unit_test_cases { - let assert = |result: Result<_, MetricsError>| { + let assert = |result: Result<_, MetricError>| { if expected_error.is_empty() { assert!(result.is_ok()); } else { assert!(matches!( result.unwrap_err(), - MetricsError::InvalidInstrumentConfiguration(msg) if msg == expected_error + MetricError::InvalidInstrumentConfiguration(msg) if msg == expected_error )); } }; - let unit = Some(unit.into()); - assert( - meter - .u64_counter("test".into(), None, unit.clone()) - .map(|_| ()), - ); - assert( - meter - .f64_counter("test".into(), None, unit.clone()) - .map(|_| ()), - ); - assert( - meter - .u64_observable_counter("test".into(), None, unit.clone(), Vec::new()) - .map(|_| ()), - ); - assert( - meter - .f64_observable_counter("test".into(), None, unit.clone(), Vec::new()) - .map(|_| ()), - ); - assert( - meter - .i64_up_down_counter("test".into(), None, unit.clone()) - .map(|_| ()), - ); - assert( - meter - .f64_up_down_counter("test".into(), None, unit.clone()) - .map(|_| ()), - ); - assert( - meter - .i64_observable_up_down_counter("test".into(), None, unit.clone(), Vec::new()) - .map(|_| ()), - ); - assert( - meter - .f64_observable_up_down_counter("test".into(), None, unit.clone(), Vec::new()) - .map(|_| ()), - ); - assert( - meter - .u64_observable_gauge("test".into(), None, unit.clone(), Vec::new()) - .map(|_| ()), - ); - assert( - meter - .i64_observable_gauge("test".into(), None, unit.clone(), Vec::new()) - .map(|_| ()), - ); - assert( - meter - .f64_observable_gauge("test".into(), None, unit.clone(), Vec::new()) - .map(|_| ()), - ); - assert( - meter - .f64_histogram("test".into(), None, unit.clone()) - .map(|_| ()), - ); - assert( - meter - .u64_histogram("test".into(), None, unit.clone()) - .map(|_| ()), - ); + let unit: Option> = Some(unit.into()); + + assert(validate_instrument_unit(&unit).map(|_| ())); } } } diff --git a/opentelemetry-sdk/src/metrics/meter_provider.rs b/opentelemetry-sdk/src/metrics/meter_provider.rs index c693b2aa56..011de1f41c 100644 --- a/opentelemetry-sdk/src/metrics/meter_provider.rs +++ b/opentelemetry-sdk/src/metrics/meter_provider.rs @@ -1,6 +1,5 @@ use core::fmt; use std::{ - borrow::Cow, collections::HashMap, sync::{ atomic::{AtomicBool, Ordering}, @@ -9,32 +8,37 @@ use std::{ }; use opentelemetry::{ - global, - metrics::{noop::NoopMeterCore, Meter, MeterProvider, MetricsError, Result}, - KeyValue, + metrics::{Meter, MeterProvider}, + otel_debug, otel_error, otel_info, InstrumentationScope, }; -use crate::{instrumentation::Scope, Resource}; +use crate::metrics::{MetricError, MetricResult}; +use crate::Resource; -use super::{meter::SdkMeter, pipeline::Pipelines, reader::MetricReader, view::View}; +use super::{ + meter::SdkMeter, noop::NoopMeter, pipeline::Pipelines, reader::MetricReader, view::View, +}; /// Handles the creation and coordination of [Meter]s. /// /// All `Meter`s created by a `MeterProvider` will be associated with the same /// [Resource], have the same [View]s applied to them, and have their produced -/// metric telemetry passed to the configured [MetricReader]s. -/// +/// metric telemetry passed to the configured [MetricReader]s. This is a +/// clonable handle to the MeterProvider implementation itself, and cloning it +/// will create a new reference, not a new instance of a MeterProvider. Dropping +/// the last reference to it will trigger shutdown of the provider. Shutdown can +/// also be triggered manually by calling the `shutdown` method. /// [Meter]: opentelemetry::metrics::Meter #[derive(Clone, Debug)] pub struct SdkMeterProvider { inner: Arc, } -#[derive(Clone, Debug)] +#[derive(Debug)] struct SdkMeterProviderInner { pipes: Arc, - meters: Arc>>>, - is_shutdown: Arc, + meters: Mutex>>, + shutdown_invoked: AtomicBool, } impl Default for SdkMeterProvider { @@ -88,7 +92,7 @@ impl SdkMeterProvider { /// Ok(()) /// } /// ``` - pub fn force_flush(&self) -> Result<()> { + pub fn force_flush(&self) -> MetricResult<()> { self.inner.force_flush() } @@ -104,74 +108,114 @@ impl SdkMeterProvider { /// /// There is no guaranteed that all telemetry be flushed or all resources have /// been released on error. - pub fn shutdown(&self) -> Result<()> { + pub fn shutdown(&self) -> MetricResult<()> { + otel_info!( + name: "MeterProvider.Shutdown", + message = "User initiated shutdown of MeterProvider." + ); self.inner.shutdown() } } impl SdkMeterProviderInner { - fn force_flush(&self) -> Result<()> { - self.pipes.force_flush() + fn force_flush(&self) -> MetricResult<()> { + if self + .shutdown_invoked + .load(std::sync::atomic::Ordering::Relaxed) + { + Err(MetricError::Other( + "Cannot perform flush as MeterProvider shutdown already invoked.".into(), + )) + } else { + self.pipes.force_flush() + } } - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { if self - .is_shutdown - .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) - .is_ok() + .shutdown_invoked + .swap(true, std::sync::atomic::Ordering::SeqCst) { - self.pipes.shutdown() - } else { - Err(MetricsError::Other( - "metrics provider already shut down".into(), + // If the previous value was true, shutdown was already invoked. + Err(MetricError::Other( + "MeterProvider shutdown already invoked.".into(), )) + } else { + self.pipes.shutdown() } } } impl Drop for SdkMeterProviderInner { fn drop(&mut self) { - if let Err(err) = self.shutdown() { - global::handle_error(err); + // If user has already shutdown the provider manually by calling + // shutdown(), then we don't need to call shutdown again. + if self.shutdown_invoked.load(Ordering::Relaxed) { + otel_debug!( + name: "MeterProvider.Drop.AlreadyShutdown", + message = "MeterProvider was already shut down; drop will not attempt shutdown again." + ); + } else { + otel_info!( + name: "MeterProvider.Drop", + message = "Last reference of MeterProvider dropped, initiating shutdown." + ); + if let Err(err) = self.shutdown() { + otel_error!( + name: "MeterProvider.Drop.ShutdownFailed", + message = "Shutdown attempt failed during drop of MeterProvider.", + reason = format!("{}", err) + ); + } else { + otel_info!( + name: "MeterProvider.Drop.ShutdownCompleted", + ); + } } } } -impl MeterProvider for SdkMeterProvider { - fn versioned_meter( - &self, - name: impl Into>, - version: Option>>, - schema_url: Option>>, - attributes: Option>, - ) -> Meter { - if self.inner.is_shutdown.load(Ordering::Relaxed) { - return Meter::new(Arc::new(NoopMeterCore::new())); - } - let mut builder = Scope::builder(name); +impl MeterProvider for SdkMeterProvider { + fn meter(&self, name: &'static str) -> Meter { + let scope = InstrumentationScope::builder(name).build(); + self.meter_with_scope(scope) + } - if let Some(v) = version { - builder = builder.with_version(v); - } - if let Some(s) = schema_url { - builder = builder.with_schema_url(s); - } - if let Some(a) = attributes { - builder = builder.with_attributes(a); + fn meter_with_scope(&self, scope: InstrumentationScope) -> Meter { + if self.inner.shutdown_invoked.load(Ordering::Relaxed) { + otel_debug!( + name: "MeterProvider.NoOpMeterReturned", + meter_name = scope.name(), + ); + return Meter::new(Arc::new(NoopMeter::new())); } - let scope = builder.build(); + if scope.name().is_empty() { + otel_info!(name: "MeterNameEmpty", message = "Meter name is empty; consider providing a meaningful name. Meter will function normally and the provided name will be used as-is."); + }; if let Ok(mut meters) = self.inner.meters.lock() { - let meter = meters - .entry(scope) - .or_insert_with_key(|scope| { - Arc::new(SdkMeter::new(scope.clone(), self.inner.pipes.clone())) - }) - .clone(); - Meter::new(meter) + if let Some(existing_meter) = meters.get(&scope) { + otel_debug!( + name: "MeterProvider.ExistingMeterReturned", + meter_name = scope.name(), + ); + Meter::new(existing_meter.clone()) + } else { + let new_meter = Arc::new(SdkMeter::new(scope.clone(), self.inner.pipes.clone())); + meters.insert(scope.clone(), new_meter.clone()); + otel_debug!( + name: "MeterProvider.NewMeterCreated", + meter_name = scope.name(), + ); + Meter::new(new_meter) + } } else { - Meter::new(Arc::new(NoopMeterCore::new())) + otel_debug!( + name: "MeterProvider.NoOpMeterReturned", + meter_name = scope.name(), + ); + Meter::new(Arc::new(NoopMeter::new())) } } } @@ -207,6 +251,7 @@ impl MeterProviderBuilder { self } + #[cfg(feature = "spec_unstable_metrics_views")] /// Associates a [View] with a [MeterProvider]. /// /// [View]s are appended to existing ones in a [MeterProvider] if this option is @@ -220,19 +265,28 @@ impl MeterProviderBuilder { } /// Construct a new [MeterProvider] with this configuration. - pub fn build(self) -> SdkMeterProvider { - SdkMeterProvider { + otel_debug!( + name: "MeterProvider.Building", + builder = format!("{:?}", &self), + ); + + let meter_provider = SdkMeterProvider { inner: Arc::new(SdkMeterProviderInner { pipes: Arc::new(Pipelines::new( - self.resource.unwrap_or_default(), + self.resource.unwrap_or(Resource::builder().build()), self.readers, self.views, )), meters: Default::default(), - is_shutdown: Arc::new(AtomicBool::new(false)), + shutdown_invoked: AtomicBool::new(false), }), - } + }; + + otel_info!( + name: "MeterProvider.Built", + ); + meter_provider } } @@ -245,15 +299,15 @@ impl fmt::Debug for MeterProviderBuilder { .finish() } } -#[cfg(test)] +#[cfg(all(test, feature = "testing"))] mod tests { use crate::resource::{ SERVICE_NAME, TELEMETRY_SDK_LANGUAGE, TELEMETRY_SDK_NAME, TELEMETRY_SDK_VERSION, }; use crate::testing::metrics::metric_reader::TestMetricReader; use crate::Resource; - use opentelemetry::global; use opentelemetry::metrics::MeterProvider; + use opentelemetry::{global, InstrumentationScope}; use opentelemetry::{Key, KeyValue, Value}; use std::env; @@ -309,10 +363,11 @@ mod tests { let reader2 = TestMetricReader::new(); let custom_meter_provider = super::SdkMeterProvider::builder() .with_reader(reader2) - .with_resource(Resource::new(vec![KeyValue::new( - SERVICE_NAME, - "test_service", - )])) + .with_resource( + Resource::builder_empty() + .with_service_name("test_service") + .build(), + ) .build(); assert_resource(&custom_meter_provider, SERVICE_NAME, Some("test_service")); assert_eq!(custom_meter_provider.inner.pipes.0[0].resource.len(), 1); @@ -346,10 +401,14 @@ mod tests { let reader4 = TestMetricReader::new(); let user_provided_resource_config_provider = super::SdkMeterProvider::builder() .with_reader(reader4) - .with_resource(Resource::default().merge(&mut Resource::new(vec![ - KeyValue::new("my-custom-key", "my-custom-value"), - KeyValue::new("my-custom-key2", "my-custom-value2"), - ]))) + .with_resource( + Resource::builder() + .with_attributes([ + KeyValue::new("my-custom-key", "my-custom-value"), + KeyValue::new("my-custom-key2", "my-custom-value2"), + ]) + .build(), + ) .build(); assert_resource( &user_provided_resource_config_provider, @@ -401,7 +460,7 @@ mod tests { assert!(!reader.is_shutdown()); // create a meter and an instrument let meter = global::meter("test"); - let counter = meter.u64_counter("test_counter").init(); + let counter = meter.u64_counter("test_counter").build(); // no need to drop a meter for meter_provider shutdown let shutdown_res = provider.shutdown(); assert!(shutdown_res.is_ok()); @@ -446,21 +505,29 @@ mod tests { let _meter1 = provider.meter("test"); let _meter2 = provider.meter("test"); assert_eq!(provider.inner.meters.lock().unwrap().len(), 1); - let _meter3 = - provider.versioned_meter("test", Some("1.0.0"), Some("http://example.com"), None); - let _meter4 = - provider.versioned_meter("test", Some("1.0.0"), Some("http://example.com"), None); - let _meter5 = - provider.versioned_meter("test", Some("1.0.0"), Some("http://example.com"), None); + + let scope = InstrumentationScope::builder("test") + .with_version("1.0.0") + .with_schema_url("http://example.com") + .build(); + + let _meter3 = provider.meter_with_scope(scope.clone()); + let _meter4 = provider.meter_with_scope(scope.clone()); + let _meter5 = provider.meter_with_scope(scope); assert_eq!(provider.inner.meters.lock().unwrap().len(), 2); - // the below are different meters, as meter names are case sensitive - let _meter6 = - provider.versioned_meter("ABC", Some("1.0.0"), Some("http://example.com"), None); - let _meter7 = - provider.versioned_meter("Abc", Some("1.0.0"), Some("http://example.com"), None); - let _meter8 = - provider.versioned_meter("abc", Some("1.0.0"), Some("http://example.com"), None); + // these are different meters because meter names are case sensitive + let make_scope = |name| { + InstrumentationScope::builder(name) + .with_version("1.0.0") + .with_schema_url("http://example.com") + .build() + }; + + let _meter6 = provider.meter_with_scope(make_scope("ABC")); + let _meter7 = provider.meter_with_scope(make_scope("Abc")); + let _meter8 = provider.meter_with_scope(make_scope("abc")); + assert_eq!(provider.inner.meters.lock().unwrap().len(), 5); } } diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index cf6e3fb928..5faeba724a 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -14,7 +14,7 @@ //! use opentelemetry_sdk::{metrics::SdkMeterProvider, Resource}; //! //! // Generate SDK configuration, resource, views, etc -//! let resource = Resource::default(); // default attributes about the current process +//! let resource = Resource::builder().build(); // default attributes about the current process //! //! // Create a meter provider with the desired config //! let meter_provider = SdkMeterProvider::builder().with_resource(resource).build(); @@ -27,7 +27,7 @@ //! let counter = meter //! .u64_counter("power_consumption") //! .with_unit("kWh") -//! .init(); +//! .build(); //! //! // use instruments to record measurements //! counter.add(10, &[KeyValue::new("rate", "standard")]); @@ -41,102 +41,78 @@ pub(crate) mod aggregation; pub mod data; +mod error; pub mod exporter; pub(crate) mod instrument; pub(crate) mod internal; pub(crate) mod manual_reader; pub(crate) mod meter; mod meter_provider; +pub(crate) mod noop; pub(crate) mod periodic_reader; +#[cfg(feature = "experimental_metrics_periodicreader_with_async_runtime")] +/// Module for periodic reader with async runtime. +pub mod periodic_reader_with_async_runtime; pub(crate) mod pipeline; pub mod reader; pub(crate) mod view; pub use aggregation::*; -pub use instrument::*; +pub use error::{MetricError, MetricResult}; pub use manual_reader::*; pub use meter_provider::*; pub use periodic_reader::*; pub use pipeline::Pipeline; -pub use view::*; - -use std::collections::hash_map::DefaultHasher; -use std::collections::HashSet; -use std::hash::{Hash, Hasher}; -use opentelemetry::{Key, KeyValue, Value}; +pub use instrument::InstrumentKind; -/// A unique set of attributes that can be used as instrument identifiers. -/// -/// This must implement [Hash], [PartialEq], and [Eq] so it may be used as -/// HashMap keys and other de-duplication methods. -#[derive(Clone, Default, Debug, PartialEq, Eq)] -pub(crate) struct AttributeSet(Vec, u64); - -impl From<&[KeyValue]> for AttributeSet { - fn from(values: &[KeyValue]) -> Self { - let mut seen_keys = HashSet::with_capacity(values.len()); - let vec = values - .iter() - .rev() - .filter_map(|kv| { - if seen_keys.insert(kv.key.clone()) { - Some(kv.clone()) - } else { - None - } - }) - .collect::>(); - - AttributeSet::new(vec) - } -} - -fn calculate_hash(values: &[KeyValue]) -> u64 { - let mut hasher = DefaultHasher::new(); - values.iter().fold(&mut hasher, |mut hasher, item| { - item.hash(&mut hasher); - hasher - }); - hasher.finish() -} - -impl AttributeSet { - fn new(mut values: Vec) -> Self { - values.sort_unstable(); - let hash = calculate_hash(&values); - AttributeSet(values, hash) - } - - /// Iterate over key value pairs in the set - pub(crate) fn iter(&self) -> impl Iterator { - self.0.iter().map(|kv| (&kv.key, &kv.value)) - } - - /// Returns the underlying Vec of KeyValue pairs - pub(crate) fn into_vec(self) -> Vec { - self.0 - } -} +#[cfg(feature = "spec_unstable_metrics_views")] +pub use instrument::*; +// #[cfg(not(feature = "spec_unstable_metrics_views"))] +// pub(crate) use instrument::*; -impl Hash for AttributeSet { - fn hash(&self, state: &mut H) { - state.write_u64(self.1) - } +#[cfg(feature = "spec_unstable_metrics_views")] +pub use view::*; +// #[cfg(not(feature = "spec_unstable_metrics_views"))] +// pub(crate) use view::*; + +use std::hash::Hash; + +/// Defines the window that an aggregation was calculated over. +#[derive(Debug, Copy, Clone, Default, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub enum Temporality { + /// A measurement interval that continues to expand forward in time from a + /// starting point. + /// + /// New measurements are added to all previous measurements since a start time. + #[default] + Cumulative, + + /// A measurement interval that resets each cycle. + /// + /// Measurements from one cycle are recorded independently, measurements from + /// other cycles do not affect them. + Delta, + + /// Configures Synchronous Counter and Histogram instruments to use + /// Delta aggregation temporality, which allows them to shed memory + /// following a cardinality explosion, thus use less memory. + LowMemory, } #[cfg(all(test, feature = "testing"))] mod tests { - use self::data::{DataPoint, HistogramDataPoint, ScopeMetrics}; + use self::data::{HistogramDataPoint, ScopeMetrics, SumDataPoint}; use super::*; - use crate::metrics::data::{ResourceMetrics, Temporality}; - use crate::metrics::reader::TemporalitySelector; - use crate::testing::metrics::InMemoryMetricsExporterBuilder; - use crate::{runtime, testing::metrics::InMemoryMetricsExporter}; + use crate::metrics::data::ResourceMetrics; + use crate::testing::metrics::InMemoryMetricExporter; + use crate::testing::metrics::InMemoryMetricExporterBuilder; + use data::GaugeDataPoint; use opentelemetry::metrics::{Counter, Meter, UpDownCounter}; + use opentelemetry::InstrumentationScope; use opentelemetry::{metrics::MeterProvider as _, KeyValue}; use rand::{rngs, Rng, SeedableRng}; - use std::borrow::Cow; use std::cmp::{max, min}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; @@ -149,6 +125,84 @@ mod tests { // "multi_thread" tokio flavor must be used else flush won't // be able to make progress! + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn invalid_instrument_config_noops() { + // Run this test with stdout enabled to see output. + // cargo test invalid_instrument_config_noops --features=testing -- --nocapture + let invalid_instrument_names = vec![ + "_startWithNoneAlphabet", + "utf8char锈", + "a".repeat(256).leak(), + "invalid name", + ]; + for name in invalid_instrument_names { + let test_context = TestContext::new(Temporality::Cumulative); + let counter = test_context.meter().u64_counter(name).build(); + counter.add(1, &[]); + + let up_down_counter = test_context.meter().i64_up_down_counter(name).build(); + up_down_counter.add(1, &[]); + + let gauge = test_context.meter().f64_gauge(name).build(); + gauge.record(1.9, &[]); + + let histogram = test_context.meter().f64_histogram(name).build(); + histogram.record(1.0, &[]); + + let _observable_counter = test_context + .meter() + .u64_observable_counter(name) + .with_callback(move |observer| { + observer.observe(1, &[]); + }) + .build(); + + let _observable_gauge = test_context + .meter() + .f64_observable_gauge(name) + .with_callback(move |observer| { + observer.observe(1.0, &[]); + }) + .build(); + + let _observable_up_down_counter = test_context + .meter() + .i64_observable_up_down_counter(name) + .with_callback(move |observer| { + observer.observe(1, &[]); + }) + .build(); + + test_context.flush_metrics(); + + // As instrument name is invalid, no metrics should be exported + test_context.check_no_metrics(); + } + + let invalid_bucket_boundaries = vec![ + vec![1.0, 1.0], // duplicate boundaries + vec![1.0, 2.0, 3.0, 2.0], // duplicate non consequent boundaries + vec![1.0, 2.0, 3.0, 4.0, 2.5], // unsorted boundaries + vec![1.0, 2.0, 3.0, f64::INFINITY, 4.0], // boundaries with positive infinity + vec![1.0, 2.0, 3.0, f64::NAN], // boundaries with NaNs + vec![f64::NEG_INFINITY, 2.0, 3.0], // boundaries with negative infinity + ]; + for bucket_boundaries in invalid_bucket_boundaries { + let test_context = TestContext::new(Temporality::Cumulative); + let histogram = test_context + .meter() + .f64_histogram("test") + .with_boundaries(bucket_boundaries) + .build(); + histogram.record(1.9, &[]); + test_context.flush_metrics(); + + // As bucket boundaires provided via advisory params are invalid, no + // metrics should be exported + test_context.check_no_metrics(); + } + } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn counter_aggregation_delta() { // Run this test with stdout enabled to see output. @@ -259,6 +313,14 @@ mod tests { histogram_aggregation_helper(Temporality::Delta); } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn histogram_aggregation_with_custom_bounds() { + // Run this test with stdout enabled to see output. + // cargo test histogram_aggregation_with_custom_bounds --features=testing -- --nocapture + histogram_aggregation_with_custom_bounds_helper(Temporality::Delta); + histogram_aggregation_with_custom_bounds_helper(Temporality::Cumulative); + } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn updown_counter_aggregation_cumulative() { // Run this test with stdout enabled to see output. @@ -382,7 +444,7 @@ mod tests { *index += 1; } }) - .init(); + .build(); for (iter, v) in values_clone.iter().enumerate() { test_context.flush_metrics(); @@ -403,7 +465,7 @@ mod tests { let data_point = if is_empty_attributes { &sum.data_points[0] } else { - find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected") }; @@ -424,11 +486,49 @@ mod tests { } } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn empty_meter_name_retained() { + async fn meter_name_retained_helper( + meter: Meter, + provider: SdkMeterProvider, + exporter: InMemoryMetricExporter, + ) { + // Act + let counter = meter.u64_counter("my_counter").build(); + + counter.add(10, &[]); + provider.force_flush().unwrap(); + + // Assert + let resource_metrics = exporter + .get_finished_metrics() + .expect("metrics are expected to be exported."); + assert!( + resource_metrics[0].scope_metrics[0].metrics.len() == 1, + "There should be a single metric" + ); + let meter_name = resource_metrics[0].scope_metrics[0].scope.name(); + assert_eq!(meter_name, ""); + } + + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()).build(); + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + + // Test Meter creation in 2 ways, both with empty string as meter name + let meter1 = meter_provider.meter(""); + meter_name_retained_helper(meter1, meter_provider.clone(), exporter.clone()).await; + + let meter_scope = InstrumentationScope::builder("").build(); + let meter2 = meter_provider.meter_with_scope(meter_scope); + meter_name_retained_helper(meter2, meter_provider, exporter).await; + } + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn counter_duplicate_instrument_merge() { // Arrange - let exporter = InMemoryMetricsExporter::default(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()).build(); let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); // Act @@ -437,13 +537,13 @@ mod tests { .u64_counter("my_counter") .with_unit("my_unit") .with_description("my_description") - .init(); + .build(); let counter_duplicated = meter .u64_counter("my_counter") .with_unit("my_unit") .with_description("my_description") - .init(); + .build(); let attribute = vec![KeyValue::new("key1", "value1")]; counter.add(10, &attribute); @@ -478,8 +578,8 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn counter_duplicate_instrument_different_meter_no_merge() { // Arrange - let exporter = InMemoryMetricsExporter::default(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()).build(); let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); // Act @@ -489,13 +589,13 @@ mod tests { .u64_counter("my_counter") .with_unit("my_unit") .with_description("my_description") - .init(); + .build(); let counter2 = meter2 .u64_counter("my_counter") .with_unit("my_unit") .with_description("my_description") - .init(); + .build(); let attribute = vec![KeyValue::new("key1", "value1")]; counter1.add(10, &attribute); @@ -567,36 +667,37 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn instrumentation_scope_identity_test() { // Arrange - let exporter = InMemoryMetricsExporter::default(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()).build(); let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); // Act // Meters are identical except for scope attributes, but scope attributes are not an identifying property. // Hence there should be a single metric stream output for this test. - let meter1 = meter_provider.versioned_meter( - "test.meter", - Some("v0.1.0"), - Some("schema_url"), - Some(vec![KeyValue::new("key", "value1")]), - ); - let meter2 = meter_provider.versioned_meter( - "test.meter", - Some("v0.1.0"), - Some("schema_url"), - Some(vec![KeyValue::new("key", "value2")]), - ); + let make_scope = |attributes| { + InstrumentationScope::builder("test.meter") + .with_version("v0.1.0") + .with_schema_url("http://example.com") + .with_attributes(attributes) + .build() + }; + + let meter1 = + meter_provider.meter_with_scope(make_scope(vec![KeyValue::new("key", "value1")])); + let meter2 = + meter_provider.meter_with_scope(make_scope(vec![KeyValue::new("key", "value2")])); + let counter1 = meter1 .u64_counter("my_counter") .with_unit("my_unit") .with_description("my_description") - .init(); + .build(); let counter2 = meter2 .u64_counter("my_counter") .with_unit("my_unit") .with_description("my_description") - .init(); + .build(); let attribute = vec![KeyValue::new("key1", "value1")]; counter1.add(10, &attribute); @@ -619,13 +720,13 @@ mod tests { ); let scope = &resource_metrics[0].scope_metrics[0].scope; - assert_eq!(scope.name, "test.meter"); - assert_eq!(scope.version, Some(Cow::Borrowed("v0.1.0"))); - assert_eq!(scope.schema_url, Some(Cow::Borrowed("schema_url"))); + assert_eq!(scope.name(), "test.meter"); + assert_eq!(scope.version(), Some("v0.1.0")); + assert_eq!(scope.schema_url(), Some("http://example.com")); // This is validating current behavior, but it is not guaranteed to be the case in the future, // as this is a user error and SDK reserves right to change this behavior. - assert_eq!(scope.attributes, vec![KeyValue::new("key", "value1")]); + assert!(scope.attributes().eq(&[KeyValue::new("key", "value1")])); let metric = &resource_metrics[0].scope_metrics[0].metrics[0]; assert_eq!(metric.name, "my_counter"); @@ -650,8 +751,8 @@ mod tests { // cargo test histogram_aggregation_with_invalid_aggregation_should_proceed_as_if_view_not_exist --features=testing -- --nocapture // Arrange - let exporter = InMemoryMetricsExporter::default(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()).build(); let criteria = Instrument::new().name("test_histogram"); let stream_invalid_aggregation = Stream::new() .aggregation(Aggregation::ExplicitBucketHistogram { @@ -673,7 +774,7 @@ mod tests { let histogram = meter .f64_histogram("test_histogram") .with_unit("test_unit") - .init(); + .build(); histogram.record(1.5, &[KeyValue::new("key1", "value1")]); meter_provider.force_flush().unwrap(); @@ -700,8 +801,8 @@ mod tests { // cargo test metrics::tests::spatial_aggregation_when_view_drops_attributes_observable_counter --features=testing // Arrange - let exporter = InMemoryMetricsExporter::default(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()).build(); let criteria = Instrument::new().name("my_observable_counter"); // View drops all attributes. let stream_invalid_aggregation = Stream::new().allowed_attribute_keys(vec![]); @@ -742,7 +843,7 @@ mod tests { ], ); }) - .init(); + .build(); meter_provider.force_flush().unwrap(); @@ -775,8 +876,8 @@ mod tests { // cargo test spatial_aggregation_when_view_drops_attributes_counter --features=testing // Arrange - let exporter = InMemoryMetricsExporter::default(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()).build(); let criteria = Instrument::new().name("my_counter"); // View drops all attributes. let stream_invalid_aggregation = Stream::new().allowed_attribute_keys(vec![]); @@ -790,7 +891,7 @@ mod tests { // Act let meter = meter_provider.meter("test"); - let counter = meter.u64_counter("my_counter").init(); + let counter = meter.u64_counter("my_counter").build(); // Normally, this would generate 3 time-series, but since the view // drops all attributes, we expect only 1 time-series. @@ -870,7 +971,7 @@ mod tests { } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - async fn no_attr_delta_up_down_counter() { + async fn no_attr_up_down_counter_always_cumulative() { let mut test_context = TestContext::new(Temporality::Delta); let counter = test_context.i64_up_down_counter("test", "my_counter", Some("my_unit")); @@ -881,7 +982,11 @@ mod tests { assert_eq!(sum.data_points.len(), 1, "Expected only one data point"); assert!(!sum.is_monotonic, "Should not produce monotonic."); - assert_eq!(sum.temporality, Temporality::Delta, "Should produce Delta"); + assert_eq!( + sum.temporality, + Temporality::Cumulative, + "Should produce Cumulative due to UpDownCounter temporality_preference" + ); let data_point = &sum.data_points[0]; assert!(data_point.attributes.is_empty(), "Non-empty attribute set"); @@ -965,7 +1070,6 @@ mod tests { } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - #[ignore = "Known bug: https://github.com/open-telemetry/opentelemetry-rust/issues/1598"] async fn delta_memory_efficiency_test() { // Run this test with stdout enabled to see output. // cargo test delta_memory_efficiency_test --features=testing -- --nocapture @@ -992,12 +1096,12 @@ mod tests { assert_eq!(sum.data_points.len(), 2); // find and validate key1=value1 datapoint - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 5); // find and validate key1=value2 datapoint - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); assert_eq!(data_point1.value, 3); @@ -1069,7 +1173,7 @@ mod tests { // Create instrument and emit measurements match instrument_name { "counter" => { - let counter = test_context.meter().u64_counter("test_counter").init(); + let counter = test_context.meter().u64_counter("test_counter").build(); counter.add(5, &[]); counter.add(10, attributes); } @@ -1077,17 +1181,17 @@ mod tests { let updown_counter = test_context .meter() .i64_up_down_counter("test_updowncounter") - .init(); + .build(); updown_counter.add(15, &[]); updown_counter.add(20, attributes); } "histogram" => { - let histogram = test_context.meter().u64_histogram("test_histogram").init(); + let histogram = test_context.meter().u64_histogram("test_histogram").build(); histogram.record(25, &[]); histogram.record(30, attributes); } "gauge" => { - let gauge = test_context.meter().u64_gauge("test_gauge").init(); + let gauge = test_context.meter().u64_gauge("test_gauge").build(); gauge.record(35, &[]); gauge.record(40, attributes); } @@ -1114,12 +1218,15 @@ mod tests { test_context.get_aggregation::>("test_counter", None); assert_eq!(counter_data.data_points.len(), 2); let zero_attribute_datapoint = - find_datapoint_with_no_attributes(&counter_data.data_points) + find_sum_datapoint_with_no_attributes(&counter_data.data_points) .expect("datapoint with no attributes expected"); assert_eq!(zero_attribute_datapoint.value, 5); - let data_point1 = - find_datapoint_with_key_value(&counter_data.data_points, "key1", "value1") - .expect("datapoint with key1=value1 expected"); + let data_point1 = find_sum_datapoint_with_key_value( + &counter_data.data_points, + "key1", + "value1", + ) + .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 10); } "updown_counter" => { @@ -1127,10 +1234,10 @@ mod tests { test_context.get_aggregation::>("test_updowncounter", None); assert_eq!(updown_counter_data.data_points.len(), 2); let zero_attribute_datapoint = - find_datapoint_with_no_attributes(&updown_counter_data.data_points) + find_sum_datapoint_with_no_attributes(&updown_counter_data.data_points) .expect("datapoint with no attributes expected"); assert_eq!(zero_attribute_datapoint.value, 15); - let data_point1 = find_datapoint_with_key_value( + let data_point1 = find_sum_datapoint_with_key_value( &updown_counter_data.data_points, "key1", "value1", @@ -1165,12 +1272,15 @@ mod tests { test_context.get_aggregation::>("test_gauge", None); assert_eq!(gauge_data.data_points.len(), 2); let zero_attribute_datapoint = - find_datapoint_with_no_attributes(&gauge_data.data_points) + find_gauge_datapoint_with_no_attributes(&gauge_data.data_points) .expect("datapoint with no attributes expected"); assert_eq!(zero_attribute_datapoint.value, 35); - let data_point1 = - find_datapoint_with_key_value(&gauge_data.data_points, "key1", "value1") - .expect("datapoint with key1=value1 expected"); + let data_point1 = find_gauge_datapoint_with_key_value( + &gauge_data.data_points, + "key1", + "value1", + ) + .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 40); } _ => panic!("Incorrect instrument kind provided"), @@ -1208,7 +1318,7 @@ mod tests { has_run.store(true, Ordering::SeqCst); } }) - .init(); + .build(); } "updown_counter" => { let has_run = AtomicBool::new(false); @@ -1222,7 +1332,7 @@ mod tests { has_run.store(true, Ordering::SeqCst); } }) - .init(); + .build(); } "gauge" => { let has_run = AtomicBool::new(false); @@ -1236,7 +1346,7 @@ mod tests { has_run.store(true, Ordering::SeqCst); } }) - .init(); + .build(); } _ => panic!("Incorrect instrument kind provided"), }; @@ -1262,12 +1372,15 @@ mod tests { assert_eq!(counter_data.data_points.len(), 2); assert!(counter_data.is_monotonic); let zero_attribute_datapoint = - find_datapoint_with_no_attributes(&counter_data.data_points) + find_sum_datapoint_with_no_attributes(&counter_data.data_points) .expect("datapoint with no attributes expected"); assert_eq!(zero_attribute_datapoint.value, 5); - let data_point1 = - find_datapoint_with_key_value(&counter_data.data_points, "key1", "value1") - .expect("datapoint with key1=value1 expected"); + let data_point1 = find_sum_datapoint_with_key_value( + &counter_data.data_points, + "key1", + "value1", + ) + .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 10); } "updown_counter" => { @@ -1276,10 +1389,10 @@ mod tests { assert_eq!(updown_counter_data.data_points.len(), 2); assert!(!updown_counter_data.is_monotonic); let zero_attribute_datapoint = - find_datapoint_with_no_attributes(&updown_counter_data.data_points) + find_sum_datapoint_with_no_attributes(&updown_counter_data.data_points) .expect("datapoint with no attributes expected"); assert_eq!(zero_attribute_datapoint.value, 15); - let data_point1 = find_datapoint_with_key_value( + let data_point1 = find_sum_datapoint_with_key_value( &updown_counter_data.data_points, "key1", "value1", @@ -1292,12 +1405,15 @@ mod tests { test_context.get_aggregation::>("test_gauge", None); assert_eq!(gauge_data.data_points.len(), 2); let zero_attribute_datapoint = - find_datapoint_with_no_attributes(&gauge_data.data_points) + find_gauge_datapoint_with_no_attributes(&gauge_data.data_points) .expect("datapoint with no attributes expected"); assert_eq!(zero_attribute_datapoint.value, 25); - let data_point1 = - find_datapoint_with_key_value(&gauge_data.data_points, "key1", "value1") - .expect("datapoint with key1=value1 expected"); + let data_point1 = find_gauge_datapoint_with_key_value( + &gauge_data.data_points, + "key1", + "value1", + ) + .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 30); } _ => panic!("Incorrect instrument kind provided"), @@ -1361,7 +1477,7 @@ mod tests { fn counter_f64_multithreaded_aggregation_helper(temporality: Temporality) { // Arrange let mut test_context = TestContext::new(temporality); - let counter = Arc::new(test_context.meter().f64_counter("test_counter").init()); + let counter = Arc::new(test_context.meter().f64_counter("test_counter").build()); for i in 0..10 { thread::scope(|s| { @@ -1414,7 +1530,7 @@ mod tests { fn histogram_multithreaded_aggregation_helper(temporality: Temporality) { // Arrange let mut test_context = TestContext::new(temporality); - let histogram = Arc::new(test_context.meter().u64_histogram("test_histogram").init()); + let histogram = Arc::new(test_context.meter().u64_histogram("test_histogram").build()); for i in 0..10 { thread::scope(|s| { @@ -1551,7 +1667,7 @@ mod tests { fn histogram_f64_multithreaded_aggregation_helper(temporality: Temporality) { // Arrange let mut test_context = TestContext::new(temporality); - let histogram = Arc::new(test_context.meter().f64_histogram("test_histogram").init()); + let histogram = Arc::new(test_context.meter().f64_histogram("test_histogram").build()); for i in 0..10 { thread::scope(|s| { @@ -1688,7 +1804,7 @@ mod tests { fn histogram_aggregation_helper(temporality: Temporality) { // Arrange let mut test_context = TestContext::new(temporality); - let histogram = test_context.meter().u64_histogram("my_histogram").init(); + let histogram = test_context.meter().u64_histogram("my_histogram").build(); // Act let mut rand = rngs::SmallRng::from_entropy(); @@ -1790,10 +1906,61 @@ mod tests { } } + fn histogram_aggregation_with_custom_bounds_helper(temporality: Temporality) { + let mut test_context = TestContext::new(temporality); + let histogram = test_context + .meter() + .u64_histogram("test_histogram") + .with_boundaries(vec![1.0, 2.5, 5.5]) + .build(); + histogram.record(1, &[KeyValue::new("key1", "value1")]); + histogram.record(2, &[KeyValue::new("key1", "value1")]); + histogram.record(3, &[KeyValue::new("key1", "value1")]); + histogram.record(4, &[KeyValue::new("key1", "value1")]); + histogram.record(5, &[KeyValue::new("key1", "value1")]); + + test_context.flush_metrics(); + + // Assert + let histogram_data = + test_context.get_aggregation::>("test_histogram", None); + // Expecting 2 time-series. + assert_eq!(histogram_data.data_points.len(), 1); + if let Temporality::Cumulative = temporality { + assert_eq!( + histogram_data.temporality, + Temporality::Cumulative, + "Should produce cumulative" + ); + } else { + assert_eq!( + histogram_data.temporality, + Temporality::Delta, + "Should produce delta" + ); + } + + // find and validate key1=value1 datapoint + let data_point = + find_histogram_datapoint_with_key_value(&histogram_data.data_points, "key1", "value1") + .expect("datapoint with key1=value1 expected"); + + assert_eq!(data_point.count, 5); + assert_eq!(data_point.sum, 15); + + // Check the bucket counts + // -∞ to 1.0: 1 + // 1.0 to 2.5: 1 + // 2.5 to 5.5: 3 + // 5.5 to +∞: 0 + + assert_eq!(vec![1.0, 2.5, 5.5], data_point.bounds); + assert_eq!(vec![1, 1, 3, 0], data_point.bucket_counts); + } fn gauge_aggregation_helper(temporality: Temporality) { // Arrange let mut test_context = TestContext::new(temporality); - let gauge = test_context.meter().i64_gauge("my_gauge").init(); + let gauge = test_context.meter().i64_gauge("my_gauge").build(); // Act gauge.record(1, &[KeyValue::new("key1", "value1")]); @@ -1815,12 +1982,12 @@ mod tests { // find and validate key1=value2 datapoint let data_point1 = - find_datapoint_with_key_value(&gauge_data_point.data_points, "key1", "value1") + find_gauge_datapoint_with_key_value(&gauge_data_point.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 4); let data_point1 = - find_datapoint_with_key_value(&gauge_data_point.data_points, "key1", "value2") + find_gauge_datapoint_with_key_value(&gauge_data_point.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); assert_eq!(data_point1.value, 6); @@ -1840,11 +2007,11 @@ mod tests { let gauge = test_context.get_aggregation::>("my_gauge", None); assert_eq!(gauge.data_points.len(), 2); - let data_point1 = find_datapoint_with_key_value(&gauge.data_points, "key1", "value1") + let data_point1 = find_gauge_datapoint_with_key_value(&gauge.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 41); - let data_point1 = find_datapoint_with_key_value(&gauge.data_points, "key1", "value2") + let data_point1 = find_gauge_datapoint_with_key_value(&gauge.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); assert_eq!(data_point1.value, 54); } @@ -1862,7 +2029,7 @@ mod tests { observer.observe(4, &[KeyValue::new("key1", "value1")]); observer.observe(5, &[KeyValue::new("key2", "value2")]); }) - .init(); + .build(); test_context.flush_metrics(); @@ -1874,18 +2041,19 @@ mod tests { if use_empty_attributes { // find and validate zero attribute datapoint - let zero_attribute_datapoint = find_datapoint_with_no_attributes(&gauge.data_points) - .expect("datapoint with no attributes expected"); + let zero_attribute_datapoint = + find_gauge_datapoint_with_no_attributes(&gauge.data_points) + .expect("datapoint with no attributes expected"); assert_eq!(zero_attribute_datapoint.value, 1); } // find and validate key1=value1 datapoint - let data_point1 = find_datapoint_with_key_value(&gauge.data_points, "key1", "value1") + let data_point1 = find_gauge_datapoint_with_key_value(&gauge.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 4); // find and validate key2=value2 datapoint - let data_point2 = find_datapoint_with_key_value(&gauge.data_points, "key2", "value2") + let data_point2 = find_gauge_datapoint_with_key_value(&gauge.data_points, "key2", "value2") .expect("datapoint with key2=value2 expected"); assert_eq!(data_point2.value, 5); @@ -1898,16 +2066,17 @@ mod tests { assert_eq!(gauge.data_points.len(), expected_time_series_count); if use_empty_attributes { - let zero_attribute_datapoint = find_datapoint_with_no_attributes(&gauge.data_points) - .expect("datapoint with no attributes expected"); + let zero_attribute_datapoint = + find_gauge_datapoint_with_no_attributes(&gauge.data_points) + .expect("datapoint with no attributes expected"); assert_eq!(zero_attribute_datapoint.value, 1); } - let data_point1 = find_datapoint_with_key_value(&gauge.data_points, "key1", "value1") + let data_point1 = find_gauge_datapoint_with_key_value(&gauge.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 4); - let data_point2 = find_datapoint_with_key_value(&gauge.data_points, "key2", "value2") + let data_point2 = find_gauge_datapoint_with_key_value(&gauge.data_points, "key2", "value2") .expect("datapoint with key2=value2 expected"); assert_eq!(data_point2.value, 5); } @@ -1946,11 +2115,11 @@ mod tests { } // find and validate key1=value2 datapoint - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 5); - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); assert_eq!(data_point1.value, 3); @@ -1970,7 +2139,7 @@ mod tests { let sum = test_context.get_aggregation::>("my_counter", None); assert_eq!(sum.data_points.len(), 2); - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); if temporality == Temporality::Cumulative { assert_eq!(data_point1.value, 10); @@ -1978,7 +2147,7 @@ mod tests { assert_eq!(data_point1.value, 5); } - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); if temporality == Temporality::Cumulative { assert_eq!(data_point1.value, 6); @@ -2014,12 +2183,12 @@ mod tests { assert_eq!(sum.data_points.len(), 2002); let data_point = - find_datapoint_with_key_value(&sum.data_points, "otel.metric.overflow", "true") + find_sum_datapoint_with_key_value(&sum.data_points, "otel.metric.overflow", "true") .expect("overflow point expected"); assert_eq!(data_point.value, 300); // let empty_attrs_data_point = &sum.data_points[0]; - let empty_attrs_data_point = find_datapoint_with_no_attributes(&sum.data_points) + let empty_attrs_data_point = find_sum_datapoint_with_no_attributes(&sum.data_points) .expect("Empty attributes point expected"); assert!( empty_attrs_data_point.attributes.is_empty(), @@ -2138,22 +2307,18 @@ mod tests { !sum.is_monotonic, "UpDownCounter should produce non-monotonic." ); - if let Temporality::Cumulative = temporality { - assert_eq!( - sum.temporality, - Temporality::Cumulative, - "Should produce cumulative" - ); - } else { - assert_eq!(sum.temporality, Temporality::Delta, "Should produce delta"); - } + assert_eq!( + sum.temporality, + Temporality::Cumulative, + "Should produce Cumulative for UpDownCounter" + ); // find and validate key1=value2 datapoint - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); assert_eq!(data_point1.value, 5); - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); assert_eq!(data_point1.value, 7); @@ -2173,28 +2338,20 @@ mod tests { let sum = test_context.get_aggregation::>("my_updown_counter", None); assert_eq!(sum.data_points.len(), 2); - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value1") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value1") .expect("datapoint with key1=value1 expected"); - if temporality == Temporality::Cumulative { - assert_eq!(data_point1.value, 10); - } else { - assert_eq!(data_point1.value, 5); - } + assert_eq!(data_point1.value, 10); - let data_point1 = find_datapoint_with_key_value(&sum.data_points, "key1", "value2") + let data_point1 = find_sum_datapoint_with_key_value(&sum.data_points, "key1", "value2") .expect("datapoint with key1=value2 expected"); - if temporality == Temporality::Cumulative { - assert_eq!(data_point1.value, 14); - } else { - assert_eq!(data_point1.value, 7); - } + assert_eq!(data_point1.value, 14); } - fn find_datapoint_with_key_value<'a, T>( - data_points: &'a [DataPoint], + fn find_sum_datapoint_with_key_value<'a, T>( + data_points: &'a [SumDataPoint], key: &str, value: &str, - ) -> Option<&'a DataPoint> { + ) -> Option<&'a SumDataPoint> { data_points.iter().find(|&datapoint| { datapoint .attributes @@ -2203,7 +2360,30 @@ mod tests { }) } - fn find_datapoint_with_no_attributes(data_points: &[DataPoint]) -> Option<&DataPoint> { + fn find_gauge_datapoint_with_key_value<'a, T>( + data_points: &'a [GaugeDataPoint], + key: &str, + value: &str, + ) -> Option<&'a GaugeDataPoint> { + data_points.iter().find(|&datapoint| { + datapoint + .attributes + .iter() + .any(|kv| kv.key.as_str() == key && kv.value.as_str() == value) + }) + } + + fn find_sum_datapoint_with_no_attributes( + data_points: &[SumDataPoint], + ) -> Option<&SumDataPoint> { + data_points + .iter() + .find(|&datapoint| datapoint.attributes.is_empty()) + } + + fn find_gauge_datapoint_with_no_attributes( + data_points: &[GaugeDataPoint], + ) -> Option<&GaugeDataPoint> { data_points .iter() .find(|&datapoint| datapoint.attributes.is_empty()) @@ -2236,11 +2416,11 @@ mod tests { ) -> Option<&'a ScopeMetrics> { metrics .iter() - .find(|&scope_metric| scope_metric.scope.name == name) + .find(|&scope_metric| scope_metric.scope.name() == name) } struct TestContext { - exporter: InMemoryMetricsExporter, + exporter: InMemoryMetricExporter, meter_provider: SdkMeterProvider, // Saving this on the test context for lifetime simplicity @@ -2249,18 +2429,10 @@ mod tests { impl TestContext { fn new(temporality: Temporality) -> Self { - struct TestTemporalitySelector(Temporality); - impl TemporalitySelector for TestTemporalitySelector { - fn temporality(&self, _kind: InstrumentKind) -> Temporality { - self.0 - } - } - - let mut exporter = InMemoryMetricsExporterBuilder::new(); - exporter = exporter.with_temporality_selector(TestTemporalitySelector(temporality)); + let exporter = InMemoryMetricExporterBuilder::new().with_temporality(temporality); let exporter = exporter.build(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let reader = PeriodicReader::builder(exporter.clone()).build(); let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); TestContext { @@ -2281,7 +2453,7 @@ mod tests { if let Some(unit_name) = unit { counter_builder = counter_builder.with_unit(unit_name); } - counter_builder.init() + counter_builder.build() } fn i64_up_down_counter( @@ -2295,7 +2467,7 @@ mod tests { if let Some(unit_name) = unit { updown_counter_builder = updown_counter_builder.with_unit(unit_name); } - updown_counter_builder.init() + updown_counter_builder.build() } fn meter(&self) -> Meter { @@ -2310,6 +2482,15 @@ mod tests { self.exporter.reset(); } + fn check_no_metrics(&self) { + let resource_metrics = self + .exporter + .get_finished_metrics() + .expect("metrics expected to be exported"); // TODO: Need to fix InMemoryMetricExporter to return None. + + assert!(resource_metrics.is_empty(), "no metrics should be exported"); + } + fn get_aggregation( &mut self, counter_name: &str, diff --git a/opentelemetry-sdk/src/metrics/noop.rs b/opentelemetry-sdk/src/metrics/noop.rs new file mode 100644 index 0000000000..1a490698ad --- /dev/null +++ b/opentelemetry-sdk/src/metrics/noop.rs @@ -0,0 +1,38 @@ +use opentelemetry::{ + metrics::{InstrumentProvider, SyncInstrument}, + KeyValue, +}; + +/// A no-op instance of a `Meter` +#[derive(Debug, Default)] +pub(crate) struct NoopMeter { + _private: (), +} + +impl NoopMeter { + /// Create a new no-op meter core. + pub(crate) fn new() -> Self { + NoopMeter { _private: () } + } +} + +impl InstrumentProvider for NoopMeter {} + +/// A no-op sync instrument +#[derive(Debug, Default)] +pub(crate) struct NoopSyncInstrument { + _private: (), +} + +impl NoopSyncInstrument { + /// Create a new no-op sync instrument + pub(crate) fn new() -> Self { + NoopSyncInstrument { _private: () } + } +} + +impl SyncInstrument for NoopSyncInstrument { + fn measure(&self, _value: T, _attributes: &[KeyValue]) { + // Ignored + } +} diff --git a/opentelemetry-sdk/src/metrics/periodic_reader.rs b/opentelemetry-sdk/src/metrics/periodic_reader.rs index 7821786675..43bfd0912e 100644 --- a/opentelemetry-sdk/src/metrics/periodic_reader.rs +++ b/opentelemetry-sdk/src/metrics/periodic_reader.rs @@ -1,32 +1,22 @@ use std::{ - env, fmt, mem, - sync::{Arc, Mutex, Weak}, - time::Duration, + env, fmt, + sync::{ + mpsc::{self, Receiver, Sender}, + Arc, Mutex, Weak, + }, + thread, + time::{Duration, Instant}, }; -use futures_channel::{mpsc, oneshot}; -use futures_util::{ - future::{self, Either}, - pin_mut, - stream::{self, FusedStream}, - StreamExt, -}; -use opentelemetry::{ - global, - metrics::{MetricsError, Result}, -}; +use opentelemetry::{otel_debug, otel_error, otel_info, otel_warn}; -use crate::runtime::Runtime; use crate::{ - metrics::{exporter::PushMetricsExporter, reader::SdkProducer}, + metrics::{exporter::PushMetricExporter, reader::SdkProducer, MetricError, MetricResult}, Resource, }; use super::{ - data::{ResourceMetrics, Temporality}, - instrument::InstrumentKind, - reader::{MetricReader, TemporalitySelector}, - Pipeline, + data::ResourceMetrics, instrument::InstrumentKind, reader::MetricReader, Pipeline, Temporality, }; const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); @@ -41,8 +31,9 @@ const METRIC_EXPORT_TIMEOUT_NAME: &str = "OTEL_METRIC_EXPORT_TIMEOUT"; /// to the exporter at a defined interval. /// /// By default, the returned [MetricReader] will collect and export data every -/// 60 seconds, and will cancel export attempts that exceed 30 seconds. The -/// export time is not counted towards the interval between attempts. +/// 60 seconds. The export time is not counted towards the interval between +/// attempts. PeriodicReader itself does not enforce timeout. Instead timeout +/// is passed on to the exporter for each export attempt. /// /// The [collect] method of the returned [MetricReader] continues to gather and /// return metric data to the user. It will not automatically send that data to @@ -50,19 +41,17 @@ const METRIC_EXPORT_TIMEOUT_NAME: &str = "OTEL_METRIC_EXPORT_TIMEOUT"; /// /// [collect]: MetricReader::collect #[derive(Debug)] -pub struct PeriodicReaderBuilder { +pub struct PeriodicReaderBuilder { interval: Duration, timeout: Duration, exporter: E, - runtime: RT, } -impl PeriodicReaderBuilder +impl PeriodicReaderBuilder where - E: PushMetricsExporter, - RT: Runtime, + E: PushMetricExporter, { - fn new(exporter: E, runtime: RT) -> Self { + fn new(exporter: E) -> Self { let interval = env::var(METRIC_EXPORT_INTERVAL_NAME) .ok() .and_then(|v| v.parse().map(Duration::from_millis).ok()) @@ -76,7 +65,6 @@ where interval, timeout, exporter, - runtime, } } @@ -94,8 +82,9 @@ where self } - /// Configures the time a [PeriodicReader] waits for an export to complete - /// before canceling it. + /// Configures the timeout for an export to complete. PeriodicReader itself + /// does not enforce timeout. Instead timeout is passed on to the exporter + /// for each export attempt. /// /// This option overrides any value set for the `OTEL_METRIC_EXPORT_TIMEOUT` /// environment variable. @@ -111,94 +100,191 @@ where /// Create a [PeriodicReader] with the given config. pub fn build(self) -> PeriodicReader { - let (message_sender, message_receiver) = mpsc::channel(256); - - let worker = move |reader: &PeriodicReader| { - let ticker = self - .runtime - .interval(self.interval) - .skip(1) // The ticker is fired immediately, so we should skip the first one to align with the interval. - .map(|_| Message::Export); - - let messages = Box::pin(stream::select(message_receiver, ticker)); - - let runtime = self.runtime.clone(); - self.runtime.spawn(Box::pin( - PeriodicReaderWorker { - reader: reader.clone(), - timeout: self.timeout, - runtime, - rm: ResourceMetrics { - resource: Resource::empty(), - scope_metrics: Vec::new(), - }, - } - .run(messages), - )); - }; - - PeriodicReader { - exporter: Arc::new(self.exporter), - inner: Arc::new(Mutex::new(PeriodicReaderInner { - message_sender, - is_shutdown: false, - sdk_producer_or_worker: ProducerOrWorker::Worker(Box::new(worker)), - })), - } + PeriodicReader::new(self.exporter, self.interval, self.timeout) } } /// A [MetricReader] that continuously collects and exports metric data at a set /// interval. /// -/// By default it will collect and export data every 60 seconds, and will cancel -/// export attempts that exceed 30 seconds. The export time is not counted -/// towards the interval between attempts. +/// By default, PeriodicReader will collect and export data every +/// 60 seconds. The export time is not counted towards the interval between +/// attempts. PeriodicReader itself does not enforce timeout. +/// Instead timeout is passed on to the exporter for each export attempt. /// /// The [collect] method of the returned continues to gather and /// return metric data to the user. It will not automatically send that data to /// the exporter outside of the predefined interval. /// -/// The [runtime] can be selected based on feature flags set for this crate. -/// -/// The exporter can be any exporter that implements [PushMetricsExporter] such -/// as [opentelemetry-otlp]. /// /// [collect]: MetricReader::collect -/// [runtime]: crate::runtime -/// [opentelemetry-otlp]: https://docs.rs/opentelemetry-otlp/latest/opentelemetry_otlp/ /// /// # Example /// /// ```no_run /// use opentelemetry_sdk::metrics::PeriodicReader; -/// # fn example(get_exporter: impl Fn() -> E, get_runtime: impl Fn() -> R) +/// # fn example(get_exporter: impl Fn() -> E) /// # where -/// # E: opentelemetry_sdk::metrics::exporter::PushMetricsExporter, -/// # R: opentelemetry_sdk::runtime::Runtime, +/// # E: opentelemetry_sdk::metrics::exporter::PushMetricExporter, /// # { /// -/// let exporter = get_exporter(); // set up a push exporter like OTLP -/// let runtime = get_runtime(); // select runtime: e.g. opentelemetry_sdk:runtime::Tokio +/// let exporter = get_exporter(); // set up a push exporter /// -/// let reader = PeriodicReader::builder(exporter, runtime).build(); +/// let reader = PeriodicReader::builder(exporter).build(); /// # drop(reader); /// # } /// ``` #[derive(Clone)] pub struct PeriodicReader { - exporter: Arc, - inner: Arc>, + inner: Arc, } impl PeriodicReader { - /// Configuration options for a periodic reader - pub fn builder(exporter: E, runtime: RT) -> PeriodicReaderBuilder + /// Configuration options for a periodic reader with own thread + pub fn builder(exporter: E) -> PeriodicReaderBuilder where - E: PushMetricsExporter, - RT: Runtime, + E: PushMetricExporter, { - PeriodicReaderBuilder::new(exporter, runtime) + PeriodicReaderBuilder::new(exporter) + } + + fn new(exporter: E, interval: Duration, timeout: Duration) -> Self + where + E: PushMetricExporter, + { + let (message_sender, message_receiver): (Sender, Receiver) = + mpsc::channel(); + let reader = PeriodicReader { + inner: Arc::new(PeriodicReaderInner { + message_sender: Arc::new(message_sender), + producer: Mutex::new(None), + exporter: Arc::new(exporter), + }), + }; + let cloned_reader = reader.clone(); + + let result_thread_creation = thread::Builder::new() + .name("OpenTelemetry.Metrics.PeriodicReader".to_string()) + .spawn(move || { + let mut interval_start = Instant::now(); + let mut remaining_interval = interval; + otel_info!( + name: "PeriodReaderThreadStarted", + interval_in_millisecs = interval.as_millis(), + timeout_in_millisecs = timeout.as_millis() + ); + loop { + otel_debug!( + name: "PeriodReaderThreadLoopAlive", message = "Next export will happen after interval, unless flush or shutdown is triggered.", interval_in_millisecs = remaining_interval.as_millis() + ); + match message_receiver.recv_timeout(remaining_interval) { + Ok(Message::Flush(response_sender)) => { + otel_debug!( + name: "PeriodReaderThreadExportingDueToFlush" + ); + if let Err(_e) = cloned_reader.collect_and_export(timeout) { + response_sender.send(false).unwrap(); + } else { + response_sender.send(true).unwrap(); + } + + // Adjust the remaining interval after the flush + let elapsed = interval_start.elapsed(); + if elapsed < interval { + remaining_interval = interval - elapsed; + otel_debug!( + name: "PeriodReaderThreadAdjustingRemainingIntervalAfterFlush", + remaining_interval = remaining_interval.as_secs() + ); + } else { + otel_debug!( + name: "PeriodReaderThreadAdjustingExportAfterFlush", + ); + // Reset the interval if the flush finishes after the expected export time + // effectively missing the normal export. + // Should we attempt to do the missed export immediately? + // Or do the next export at the next interval? + // Currently this attempts the next export immediately. + // i.e calling Flush can affect the regularity. + interval_start = Instant::now(); + remaining_interval = Duration::ZERO; + } + } + Ok(Message::Shutdown(response_sender)) => { + // Perform final export and break out of loop and exit the thread + otel_debug!(name: "PeriodReaderThreadExportingDueToShutdown"); + if let Err(_e) = cloned_reader.collect_and_export(timeout) { + response_sender.send(false).unwrap(); + } else { + response_sender.send(true).unwrap(); + } + + otel_debug!( + name: "PeriodReaderThreadExiting", + reason = "ShutdownRequested" + ); + break; + } + Err(mpsc::RecvTimeoutError::Timeout) => { + let export_start = Instant::now(); + otel_debug!( + name: "PeriodReaderThreadExportingDueToTimer" + ); + + if let Err(_e) = cloned_reader.collect_and_export(timeout) { + otel_debug!( + name: "PeriodReaderThreadExportingDueToTimerFailed" + ); + } + + let time_taken_for_export = export_start.elapsed(); + if time_taken_for_export > interval { + otel_debug!( + name: "PeriodReaderThreadExportTookLongerThanInterval" + ); + // if export took longer than interval, do the + // next export immediately. + // Alternatively, we could skip the next export + // and wait for the next interval. + // Or enforce that export timeout is less than interval. + // What is the desired behavior? + interval_start = Instant::now(); + remaining_interval = Duration::ZERO; + } else { + remaining_interval = interval - time_taken_for_export; + interval_start = Instant::now(); + } + } + Err(mpsc::RecvTimeoutError::Disconnected) => { + // Channel disconnected, only thing to do is break + // out (i.e exit the thread) + otel_debug!( + name: "PeriodReaderThreadExiting", + reason = "MessageReceiverDisconnected" + ); + break; + } + } + } + otel_info!( + name: "PeriodReaderThreadStopped" + ); + }); + + // TODO: Should we fail-fast here and bubble up the error to user? + #[allow(unused_variables)] + if let Err(e) = result_thread_creation { + otel_error!( + name: "PeriodReaderThreadStartError", + message = "Failed to start PeriodicReader thread. Metrics will not be exported.", + error = format!("{:?}", e) + ); + } + reader + } + + fn collect_and_export(&self, timeout: Duration) -> MetricResult<()> { + self.inner.collect_and_export(timeout) } } @@ -209,187 +295,246 @@ impl fmt::Debug for PeriodicReader { } struct PeriodicReaderInner { - message_sender: mpsc::Sender, - is_shutdown: bool, - sdk_producer_or_worker: ProducerOrWorker, + exporter: Arc, + message_sender: Arc>, + producer: Mutex>>, } -#[derive(Debug)] -enum Message { - Export, - Flush(oneshot::Sender>), - Shutdown(oneshot::Sender>), -} +impl PeriodicReaderInner { + fn register_pipeline(&self, producer: Weak) { + let mut inner = self.producer.lock().expect("lock poisoned"); + *inner = Some(producer); + } -enum ProducerOrWorker { - Producer(Weak), - Worker(Box), -} + fn temporality(&self, _kind: InstrumentKind) -> Temporality { + self.exporter.temporality() + } -struct PeriodicReaderWorker { - reader: PeriodicReader, - timeout: Duration, - runtime: RT, - rm: ResourceMetrics, -} + fn collect(&self, rm: &mut ResourceMetrics) -> MetricResult<()> { + let producer = self.producer.lock().expect("lock poisoned"); + if let Some(p) = producer.as_ref() { + p.upgrade() + .ok_or_else(|| MetricError::Other("pipeline is dropped".into()))? + .produce(rm)?; + Ok(()) + } else { + Err(MetricError::Other("pipeline is not registered".into())) + } + } -impl PeriodicReaderWorker { - async fn collect_and_export(&mut self) -> Result<()> { - self.reader.collect(&mut self.rm)?; + fn collect_and_export(&self, _timeout: Duration) -> MetricResult<()> { + // TODO: Reuse the internal vectors. Or refactor to avoid needing any + // owned data structures to be passed to exporters. + let mut rm = ResourceMetrics { + resource: Resource::empty(), + scope_metrics: Vec::new(), + }; - let export = self.reader.exporter.export(&mut self.rm); - let timeout = self.runtime.delay(self.timeout); - pin_mut!(export); - pin_mut!(timeout); + let collect_result = self.collect(&mut rm); + #[allow(clippy::question_mark)] + if let Err(e) = collect_result { + otel_warn!( + name: "PeriodReaderCollectError", + error = format!("{:?}", e) + ); + return Err(e); + } - match future::select(export, timeout).await { - Either::Left((res, _)) => res, // return the status of export. - Either::Right(_) => Err(MetricsError::Other("export timed out".into())), + if rm.scope_metrics.is_empty() { + otel_debug!(name: "NoMetricsCollected"); + return Ok(()); } + + let metrics_count = rm.scope_metrics.iter().fold(0, |count, scope_metrics| { + count + scope_metrics.metrics.len() + }); + otel_debug!(name: "PeriodicReaderMetricsCollected", count = metrics_count); + + // TODO: substract the time taken for collect from the timeout. collect + // involves observable callbacks too, which are user defined and can + // take arbitrary time. + // + // Relying on futures executor to execute async call. + // TODO: Add timeout and pass it to exporter or consider alternative + // design to enforce timeout here. + let exporter_result = futures_executor::block_on(self.exporter.export(&mut rm)); + #[allow(clippy::question_mark)] + if let Err(e) = exporter_result { + otel_warn!( + name: "PeriodReaderExportError", + error = format!("{:?}", e) + ); + return Err(e); + } + + Ok(()) } - async fn process_message(&mut self, message: Message) -> bool { - match message { - Message::Export => { - if let Err(err) = self.collect_and_export().await { - global::handle_error(err) - } - } - Message::Flush(ch) => { - let res = self.collect_and_export().await; - if ch.send(res).is_err() { - global::handle_error(MetricsError::Other("flush channel closed".into())) - } - } - Message::Shutdown(ch) => { - let res = self.collect_and_export().await; - let _ = self.reader.exporter.shutdown(); - if ch.send(res).is_err() { - global::handle_error(MetricsError::Other("shutdown channel closed".into())) - } - return false; + fn force_flush(&self) -> MetricResult<()> { + // TODO: Better message for this scenario. + // Flush and Shutdown called from 2 threads Flush check shutdown + // flag before shutdown thread sets it. Both threads attempt to send + // message to the same channel. Case1: Flush thread sends message first, + // shutdown thread sends message next. Flush would succeed, as + // background thread won't process shutdown message until flush + // triggered export is done. Case2: Shutdown thread sends message first, + // flush thread sends message next. Shutdown would succeed, as + // background thread would process shutdown message first. The + // background exits so it won't receive the flush message. ForceFlush + // returns Failure, but we could indicate specifically that shutdown has + // completed. TODO is to see if this message can be improved. + + let (response_tx, response_rx) = mpsc::channel(); + self.message_sender + .send(Message::Flush(response_tx)) + .map_err(|e| MetricError::Other(e.to_string()))?; + + if let Ok(response) = response_rx.recv() { + // TODO: call exporter's force_flush method. + if response { + Ok(()) + } else { + Err(MetricError::Other("Failed to flush".into())) } + } else { + Err(MetricError::Other("Failed to flush".into())) } - - true } - async fn run(mut self, mut messages: impl FusedStream + Unpin) { - while let Some(message) = messages.next().await { - if !self.process_message(message).await { - break; + fn shutdown(&self) -> MetricResult<()> { + // TODO: See if this is better to be created upfront. + let (response_tx, response_rx) = mpsc::channel(); + self.message_sender + .send(Message::Shutdown(response_tx)) + .map_err(|e| MetricError::Other(e.to_string()))?; + + if let Ok(response) = response_rx.recv() { + if response { + Ok(()) + } else { + Err(MetricError::Other("Failed to shutdown".into())) } + } else { + Err(MetricError::Other("Failed to shutdown".into())) } } } -impl TemporalitySelector for PeriodicReader { - fn temporality(&self, kind: InstrumentKind) -> Temporality { - self.exporter.temporality(kind) - } +#[derive(Debug)] +enum Message { + Flush(Sender), + Shutdown(Sender), } impl MetricReader for PeriodicReader { fn register_pipeline(&self, pipeline: Weak) { - let mut inner = match self.inner.lock() { - Ok(guard) => guard, - Err(_) => return, - }; - - let worker = match &mut inner.sdk_producer_or_worker { - ProducerOrWorker::Producer(_) => { - // Only register once. If producer is already set, do nothing. - global::handle_error(MetricsError::Other( - "duplicate meter registration, did not register manual reader".into(), - )); - return; - } - ProducerOrWorker::Worker(w) => mem::replace(w, Box::new(|_| {})), - }; + self.inner.register_pipeline(pipeline); + } - inner.sdk_producer_or_worker = ProducerOrWorker::Producer(pipeline); - worker(self); + fn collect(&self, rm: &mut ResourceMetrics) -> MetricResult<()> { + self.inner.collect(rm) } - fn collect(&self, rm: &mut ResourceMetrics) -> Result<()> { - let inner = self.inner.lock()?; - if inner.is_shutdown { - return Err(MetricsError::Other("reader is shut down".into())); - } + fn force_flush(&self) -> MetricResult<()> { + self.inner.force_flush() + } - if let Some(producer) = match &inner.sdk_producer_or_worker { - ProducerOrWorker::Producer(sdk_producer) => sdk_producer.upgrade(), - ProducerOrWorker::Worker(_) => None, - } { - producer.produce(rm)?; - } else { - return Err(MetricsError::Other("reader is not registered".into())); - } + // TODO: Offer an async version of shutdown so users can await the shutdown + // completion, and avoid blocking the thread. The default shutdown on drop + // can still use blocking call. If user already explicitly called shutdown, + // drop won't call shutdown again. + fn shutdown(&self) -> MetricResult<()> { + self.inner.shutdown() + } - Ok(()) + /// To construct a [MetricReader][metric-reader] when setting up an SDK, + /// The output temporality (optional), a function of instrument kind. + /// This function SHOULD be obtained from the exporter. + /// + /// If not configured, the Cumulative temporality SHOULD be used. + /// + /// [metric-reader]: https://github.com/open-telemetry/opentelemetry-specification/blob/0a78571045ca1dca48621c9648ec3c832c3c541c/specification/metrics/sdk.md#metricreader + fn temporality(&self, kind: InstrumentKind) -> Temporality { + kind.temporality_preference(self.inner.temporality(kind)) } +} - fn force_flush(&self) -> Result<()> { - let mut inner = self.inner.lock()?; - if inner.is_shutdown { - return Err(MetricsError::Other("reader is shut down".into())); - } - let (sender, receiver) = oneshot::channel(); - inner - .message_sender - .try_send(Message::Flush(sender)) - .map_err(|e| MetricsError::Other(e.to_string()))?; +#[cfg(all(test, feature = "testing"))] +mod tests { + use super::PeriodicReader; + use crate::{ + metrics::{ + data::ResourceMetrics, exporter::PushMetricExporter, reader::MetricReader, MetricError, + MetricResult, SdkMeterProvider, Temporality, + }, + testing::metrics::InMemoryMetricExporter, + Resource, + }; + use async_trait::async_trait; + use opentelemetry::metrics::MeterProvider; + use std::{ + sync::{ + atomic::{AtomicUsize, Ordering}, + mpsc, Arc, + }, + time::Duration, + }; - drop(inner); // don't hold lock when blocking on future + // use below command to run all tests + // cargo test metrics::periodic_reader::tests --features=testing,spec_unstable_metrics_views -- --nocapture - futures_executor::block_on(receiver) - .map_err(|err| MetricsError::Other(err.to_string())) - .and_then(|res| res) + #[derive(Debug, Clone)] + struct MetricExporterThatFailsOnlyOnFirst { + count: Arc, } - fn shutdown(&self) -> Result<()> { - let mut inner = self.inner.lock()?; - if inner.is_shutdown { - return Err(MetricsError::Other("reader is already shut down".into())); + impl Default for MetricExporterThatFailsOnlyOnFirst { + fn default() -> Self { + MetricExporterThatFailsOnlyOnFirst { + count: Arc::new(AtomicUsize::new(0)), + } } + } - let (sender, receiver) = oneshot::channel(); - inner - .message_sender - .try_send(Message::Shutdown(sender)) - .map_err(|e| MetricsError::Other(e.to_string()))?; - drop(inner); // don't hold lock when blocking on future + impl MetricExporterThatFailsOnlyOnFirst { + fn get_count(&self) -> usize { + self.count.load(Ordering::Relaxed) + } + } - let shutdown_result = futures_executor::block_on(receiver) - .map_err(|err| MetricsError::Other(err.to_string()))?; + #[async_trait] + impl PushMetricExporter for MetricExporterThatFailsOnlyOnFirst { + async fn export(&self, _metrics: &mut ResourceMetrics) -> MetricResult<()> { + if self.count.fetch_add(1, Ordering::Relaxed) == 0 { + Err(MetricError::Other("export failed".into())) + } else { + Ok(()) + } + } - // Acquire the lock again to set the shutdown flag - let mut inner = self.inner.lock()?; - inner.is_shutdown = true; + async fn force_flush(&self) -> MetricResult<()> { + Ok(()) + } - shutdown_result - } -} + fn shutdown(&self) -> MetricResult<()> { + Ok(()) + } -#[cfg(all(test, feature = "testing"))] -mod tests { - use super::PeriodicReader; - use crate::{ - metrics::data::ResourceMetrics, metrics::reader::MetricReader, metrics::SdkMeterProvider, - runtime, testing::metrics::InMemoryMetricsExporter, Resource, - }; - use opentelemetry::metrics::MeterProvider; - use std::sync::mpsc; + fn temporality(&self) -> Temporality { + Temporality::Cumulative + } + } - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - async fn registration_triggers_collection() { + #[test] + fn collection_triggered_by_interval_multiple() { // Arrange let interval = std::time::Duration::from_millis(1); - let exporter = InMemoryMetricsExporter::default(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio) + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()) .with_interval(interval) .build(); - let (sender, receiver) = mpsc::channel(); + let i = Arc::new(AtomicUsize::new(0)); + let i_clone = i.clone(); // Act let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); @@ -397,32 +542,374 @@ mod tests { let _counter = meter .u64_observable_counter("testcounter") .with_callback(move |_| { - sender.send(()).expect("channel should still be open"); + i_clone.fetch_add(1, Ordering::Relaxed); }) - .init(); + .build(); - _ = meter_provider.force_flush(); + // Sleep for a duration 5X (plus liberal buffer to account for potential + // CI slowness) the interval to ensure multiple collection. + // Not a fan of such tests, but this seems to be the only way to test + // if periodic reader is doing its job. + // TODO: Decide if this should be ignored in CI + std::thread::sleep(interval * 5 * 20); // Assert - receiver - .try_recv() - .expect("message should be available in channel, indicating a collection occurred"); + assert!(i.load(Ordering::Relaxed) >= 5); } - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - async fn unregistered_collect() { + #[test] + fn shutdown_repeat() { // Arrange - let exporter = InMemoryMetricsExporter::default(); - let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); - let mut rm = ResourceMetrics { + let interval = std::time::Duration::from_millis(1); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()) + .with_interval(interval) + .build(); + + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + let result = meter_provider.shutdown(); + assert!(result.is_ok()); + + // calling shutdown again should return Err + let result = meter_provider.shutdown(); + assert!(result.is_err()); + + // calling shutdown again should return Err + let result = meter_provider.shutdown(); + assert!(result.is_err()); + } + + #[test] + fn flush_after_shutdown() { + // Arrange + let interval = std::time::Duration::from_millis(1); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()) + .with_interval(interval) + .build(); + + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + let result = meter_provider.force_flush(); + assert!(result.is_ok()); + + let result = meter_provider.shutdown(); + assert!(result.is_ok()); + + // calling force_flush after shutdown should return Err + let result = meter_provider.force_flush(); + assert!(result.is_err()); + } + + #[test] + fn flush_repeat() { + // Arrange + let interval = std::time::Duration::from_millis(1); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()) + .with_interval(interval) + .build(); + + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + let result = meter_provider.force_flush(); + assert!(result.is_ok()); + + // calling force_flush again should return Ok + let result = meter_provider.force_flush(); + assert!(result.is_ok()); + } + + #[test] + fn periodic_reader_without_pipeline() { + // Arrange + let interval = std::time::Duration::from_millis(1); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()) + .with_interval(interval) + .build(); + + let rm = &mut ResourceMetrics { resource: Resource::empty(), scope_metrics: Vec::new(), }; + // Pipeline is not registered, so collect should return an error + let result = reader.collect(rm); + assert!(result.is_err()); + + // Pipeline is not registered, so flush should return an error + let result = reader.force_flush(); + assert!(result.is_err()); + + // Adding reader to meter provider should register the pipeline + // TODO: This part might benefit from a different design. + let meter_provider = SdkMeterProvider::builder() + .with_reader(reader.clone()) + .build(); + + // Now collect and flush should succeed + let result = reader.collect(rm); + assert!(result.is_ok()); + + let result = meter_provider.force_flush(); + assert!(result.is_ok()); + } + + #[test] + fn exporter_failures_are_handled() { + // create a mock exporter that fails 1st time and succeeds 2nd time + // Validate using this exporter that periodic reader can handle exporter failure + // and continue to export metrics. + // Arrange + let interval = std::time::Duration::from_millis(10); + let exporter = MetricExporterThatFailsOnlyOnFirst::default(); + let reader = PeriodicReader::builder(exporter.clone()) + .with_interval(interval) + .build(); + + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + let meter = meter_provider.meter("test"); + let counter = meter.u64_counter("sync_counter").build(); + counter.add(1, &[]); + let _obs_counter = meter + .u64_observable_counter("testcounter") + .with_callback(move |observer| { + observer.observe(1, &[]); + }) + .build(); + + // Sleep for a duration much longer than the interval to trigger + // multiple exports, including failures. + // Not a fan of such tests, but this seems to be the + // only way to test if periodic reader is doing its job. TODO: Decide if + // this should be ignored in CI + std::thread::sleep(Duration::from_millis(500)); + + // Assert that atleast 2 exports are attempted given the 1st one fails. + assert!(exporter.get_count() >= 2); + } + + #[test] + fn collection() { + collection_triggered_by_interval_helper(); + collection_triggered_by_flush_helper(); + collection_triggered_by_shutdown_helper(); + collection_triggered_by_drop_helper(); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn collection_from_tokio_multi_with_one_worker() { + collection_triggered_by_interval_helper(); + collection_triggered_by_flush_helper(); + collection_triggered_by_shutdown_helper(); + collection_triggered_by_drop_helper(); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn collection_from_tokio_with_two_worker() { + collection_triggered_by_interval_helper(); + collection_triggered_by_flush_helper(); + collection_triggered_by_shutdown_helper(); + collection_triggered_by_drop_helper(); + } + + #[tokio::test(flavor = "current_thread")] + async fn collection_from_tokio_current() { + collection_triggered_by_interval_helper(); + collection_triggered_by_flush_helper(); + collection_triggered_by_shutdown_helper(); + collection_triggered_by_drop_helper(); + } + + fn collection_triggered_by_interval_helper() { + collection_helper(|_| { + // Sleep for a duration longer than the interval to ensure at least one collection + // Not a fan of such tests, but this seems to be the only way to test + // if periodic reader is doing its job. + // TODO: Decide if this should be ignored in CI + std::thread::sleep(Duration::from_millis(500)); + }); + } + + fn collection_triggered_by_flush_helper() { + collection_helper(|meter_provider| { + meter_provider.force_flush().expect("flush should succeed"); + }); + } + + fn collection_triggered_by_shutdown_helper() { + collection_helper(|meter_provider| { + meter_provider.shutdown().expect("shutdown should succeed"); + }); + } + + fn collection_triggered_by_drop_helper() { + collection_helper(|meter_provider| { + drop(meter_provider); + }); + } + + fn collection_helper(trigger: fn(SdkMeterProvider)) { + // Arrange + let interval = std::time::Duration::from_millis(10); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()) + .with_interval(interval) + .build(); + let (sender, receiver) = mpsc::channel(); + + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + let meter = meter_provider.meter("test"); + let _counter = meter + .u64_observable_counter("testcounter") + .with_callback(move |observer| { + observer.observe(1, &[]); + sender.send(()).expect("channel should still be open"); + }) + .build(); // Act - let result = reader.collect(&mut rm); + trigger(meter_provider); // Assert - result.expect_err("error expected when reader is not registered"); + receiver + .recv_timeout(Duration::ZERO) + .expect("message should be available in channel, indicating a collection occurred, which should trigger observable callback"); + + let exported_metrics = exporter + .get_finished_metrics() + .expect("this should not fail"); + assert!( + !exported_metrics.is_empty(), + "Metrics should be available in exporter." + ); + } + + async fn some_async_function() -> u64 { + // No dependency on any particular async runtime. + std::thread::sleep(std::time::Duration::from_millis(1)); + 1 + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn async_inside_observable_callback_from_tokio_multi_with_one_worker() { + async_inside_observable_callback_helper(); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn async_inside_observable_callback_from_tokio_multi_with_two_worker() { + async_inside_observable_callback_helper(); + } + + #[tokio::test(flavor = "current_thread")] + async fn async_inside_observable_callback_from_tokio_current_thread() { + async_inside_observable_callback_helper(); + } + + #[test] + fn async_inside_observable_callback_from_regular_main() { + async_inside_observable_callback_helper(); + } + + fn async_inside_observable_callback_helper() { + let interval = std::time::Duration::from_millis(10); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()) + .with_interval(interval) + .build(); + + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + let meter = meter_provider.meter("test"); + let _gauge = meter + .u64_observable_gauge("my_observable_gauge") + .with_callback(|observer| { + // using futures_executor::block_on intentionally and avoiding + // any particular async runtime. + let value = futures_executor::block_on(some_async_function()); + observer.observe(value, &[]); + }) + .build(); + + meter_provider.force_flush().expect("flush should succeed"); + let exported_metrics = exporter + .get_finished_metrics() + .expect("this should not fail"); + assert!( + !exported_metrics.is_empty(), + "Metrics should be available in exporter." + ); + } + + async fn some_tokio_async_function() -> u64 { + // Tokio specific async function + tokio::time::sleep(Duration::from_millis(1)).await; + 1 + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + + async fn tokio_async_inside_observable_callback_from_tokio_multi_with_one_worker() { + tokio_async_inside_observable_callback_helper(true); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn tokio_async_inside_observable_callback_from_tokio_multi_with_two_worker() { + tokio_async_inside_observable_callback_helper(true); + } + + #[tokio::test(flavor = "current_thread")] + #[ignore] //TODO: Investigate if this can be fixed. + async fn tokio_async_inside_observable_callback_from_tokio_current_thread() { + tokio_async_inside_observable_callback_helper(true); + } + + #[test] + fn tokio_async_inside_observable_callback_from_regular_main() { + tokio_async_inside_observable_callback_helper(false); + } + + fn tokio_async_inside_observable_callback_helper(use_current_tokio_runtime: bool) { + let interval = std::time::Duration::from_millis(10); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone()) + .with_interval(interval) + .build(); + + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + let meter = meter_provider.meter("test"); + + if use_current_tokio_runtime { + let rt = tokio::runtime::Handle::current().clone(); + let _gauge = meter + .u64_observable_gauge("my_observable_gauge") + .with_callback(move |observer| { + // call tokio specific async function from here + let value = rt.block_on(some_tokio_async_function()); + observer.observe(value, &[]); + }) + .build(); + // rt here is a reference to the current tokio runtime. + // Droppng it occurs when the tokio::main itself ends. + } else { + let rt = tokio::runtime::Runtime::new().unwrap(); + let _gauge = meter + .u64_observable_gauge("my_observable_gauge") + .with_callback(move |observer| { + // call tokio specific async function from here + let value = rt.block_on(some_tokio_async_function()); + observer.observe(value, &[]); + }) + .build(); + // rt is not dropped here as it is moved to the closure, + // and is dropped only when MeterProvider itself is dropped. + // This works when called from normal main. + }; + + meter_provider.force_flush().expect("flush should succeed"); + let exported_metrics = exporter + .get_finished_metrics() + .expect("this should not fail"); + assert!( + !exported_metrics.is_empty(), + "Metrics should be available in exporter." + ); } } diff --git a/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs b/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs new file mode 100644 index 0000000000..33558b579b --- /dev/null +++ b/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs @@ -0,0 +1,508 @@ +use std::{ + env, fmt, mem, + sync::{Arc, Mutex, Weak}, + time::Duration, +}; + +use futures_channel::{mpsc, oneshot}; +use futures_util::{ + future::{self, Either}, + pin_mut, + stream::{self, FusedStream}, + StreamExt, +}; +use opentelemetry::{otel_debug, otel_error}; + +use crate::runtime::Runtime; +use crate::{ + metrics::{exporter::PushMetricExporter, reader::SdkProducer, MetricError, MetricResult}, + Resource, +}; + +use super::{data::ResourceMetrics, reader::MetricReader, InstrumentKind, Pipeline}; + +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(30); +const DEFAULT_INTERVAL: Duration = Duration::from_secs(60); + +const METRIC_EXPORT_INTERVAL_NAME: &str = "OTEL_METRIC_EXPORT_INTERVAL"; +const METRIC_EXPORT_TIMEOUT_NAME: &str = "OTEL_METRIC_EXPORT_TIMEOUT"; + +/// Configuration options for [PeriodicReader]. +/// +/// A periodic reader is a [MetricReader] that collects and exports metric data +/// to the exporter at a defined interval. +/// +/// By default, the returned [MetricReader] will collect and export data every +/// 60 seconds, and will cancel export attempts that exceed 30 seconds. The +/// export time is not counted towards the interval between attempts. +/// +/// The [collect] method of the returned [MetricReader] continues to gather and +/// return metric data to the user. It will not automatically send that data to +/// the exporter outside of the predefined interval. +/// +/// [collect]: MetricReader::collect +#[derive(Debug)] +pub struct PeriodicReaderBuilder { + interval: Duration, + timeout: Duration, + exporter: E, + runtime: RT, +} + +impl PeriodicReaderBuilder +where + E: PushMetricExporter, + RT: Runtime, +{ + fn new(exporter: E, runtime: RT) -> Self { + let interval = env::var(METRIC_EXPORT_INTERVAL_NAME) + .ok() + .and_then(|v| v.parse().map(Duration::from_millis).ok()) + .unwrap_or(DEFAULT_INTERVAL); + let timeout = env::var(METRIC_EXPORT_TIMEOUT_NAME) + .ok() + .and_then(|v| v.parse().map(Duration::from_millis).ok()) + .unwrap_or(DEFAULT_TIMEOUT); + + PeriodicReaderBuilder { + interval, + timeout, + exporter, + runtime, + } + } + + /// Configures the intervening time between exports for a [PeriodicReader]. + /// + /// This option overrides any value set for the `OTEL_METRIC_EXPORT_INTERVAL` + /// environment variable. + /// + /// If this option is not used or `interval` is equal to zero, 60 seconds is + /// used as the default. + pub fn with_interval(mut self, interval: Duration) -> Self { + if !interval.is_zero() { + self.interval = interval; + } + self + } + + /// Configures the time a [PeriodicReader] waits for an export to complete + /// before canceling it. + /// + /// This option overrides any value set for the `OTEL_METRIC_EXPORT_TIMEOUT` + /// environment variable. + /// + /// If this option is not used or `timeout` is equal to zero, 30 seconds is used + /// as the default. + pub fn with_timeout(mut self, timeout: Duration) -> Self { + if !timeout.is_zero() { + self.timeout = timeout; + } + self + } + + /// Create a [PeriodicReader] with the given config. + pub fn build(self) -> PeriodicReader { + let (message_sender, message_receiver) = mpsc::channel(256); + + let worker = move |reader: &PeriodicReader| { + let runtime = self.runtime.clone(); + let reader = reader.clone(); + self.runtime.spawn(Box::pin(async move { + let ticker = runtime + .interval(self.interval) + .skip(1) // The ticker is fired immediately, so we should skip the first one to align with the interval. + .map(|_| Message::Export); + let messages = Box::pin(stream::select(message_receiver, ticker)); + PeriodicReaderWorker { + reader, + timeout: self.timeout, + runtime, + rm: ResourceMetrics { + resource: Resource::empty(), + scope_metrics: Vec::new(), + }, + } + .run(messages) + .await + })); + }; + + otel_debug!( + name: "PeriodicReader.BuildCompleted", + message = "Periodic reader built.", + interval_in_secs = self.interval.as_secs(), + temporality = format!("{:?}", self.exporter.temporality()), + ); + + PeriodicReader { + exporter: Arc::new(self.exporter), + inner: Arc::new(Mutex::new(PeriodicReaderInner { + message_sender, + is_shutdown: false, + sdk_producer_or_worker: ProducerOrWorker::Worker(Box::new(worker)), + })), + } + } +} + +/// A [MetricReader] that continuously collects and exports metric data at a set +/// interval. +/// +/// By default it will collect and export data every 60 seconds, and will cancel +/// export attempts that exceed 30 seconds. The export time is not counted +/// towards the interval between attempts. +/// +/// The [collect] method of the returned continues to gather and +/// return metric data to the user. It will not automatically send that data to +/// the exporter outside of the predefined interval. +/// +/// The [runtime] can be selected based on feature flags set for this crate. +/// +/// The exporter can be any exporter that implements [PushMetricExporter] such +/// as [opentelemetry-otlp]. +/// +/// [collect]: MetricReader::collect +/// [runtime]: crate::runtime +/// [opentelemetry-otlp]: https://docs.rs/opentelemetry-otlp/latest/opentelemetry_otlp/ +/// +/// # Example +/// +/// ```no_run +/// use opentelemetry_sdk::metrics::periodic_reader_with_async_runtime::PeriodicReader; +/// # fn example(get_exporter: impl Fn() -> E, get_runtime: impl Fn() -> R) +/// # where +/// # E: opentelemetry_sdk::metrics::exporter::PushMetricExporter, +/// # R: opentelemetry_sdk::runtime::Runtime, +/// # { +/// +/// let exporter = get_exporter(); // set up a push exporter like OTLP +/// let runtime = get_runtime(); // select runtime: e.g. opentelemetry_sdk:runtime::Tokio +/// +/// let reader = PeriodicReader::builder(exporter, runtime).build(); +/// # drop(reader); +/// # } +/// ``` +#[derive(Clone)] +pub struct PeriodicReader { + exporter: Arc, + inner: Arc>, +} + +impl PeriodicReader { + /// Configuration options for a periodic reader + pub fn builder(exporter: E, runtime: RT) -> PeriodicReaderBuilder + where + E: PushMetricExporter, + RT: Runtime, + { + PeriodicReaderBuilder::new(exporter, runtime) + } +} + +impl fmt::Debug for PeriodicReader { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PeriodicReader").finish() + } +} + +struct PeriodicReaderInner { + message_sender: mpsc::Sender, + is_shutdown: bool, + sdk_producer_or_worker: ProducerOrWorker, +} + +#[derive(Debug)] +enum Message { + Export, + Flush(oneshot::Sender>), + Shutdown(oneshot::Sender>), +} + +enum ProducerOrWorker { + Producer(Weak), + Worker(Box), +} + +struct PeriodicReaderWorker { + reader: PeriodicReader, + timeout: Duration, + runtime: RT, + rm: ResourceMetrics, +} + +impl PeriodicReaderWorker { + async fn collect_and_export(&mut self) -> MetricResult<()> { + self.reader.collect(&mut self.rm)?; + if self.rm.scope_metrics.is_empty() { + otel_debug!( + name: "PeriodicReaderWorker.NoMetricsToExport", + ); + // No metrics to export. + return Ok(()); + } + + otel_debug!( + name: "PeriodicReaderWorker.InvokeExporter", + message = "Calling exporter's export method with collected metrics.", + count = self.rm.scope_metrics.len(), + ); + let export = self.reader.exporter.export(&mut self.rm); + let timeout = self.runtime.delay(self.timeout); + pin_mut!(export); + pin_mut!(timeout); + + match future::select(export, timeout).await { + Either::Left((res, _)) => { + res // return the status of export. + } + Either::Right(_) => Err(MetricError::Other("export timed out".into())), + } + } + + async fn process_message(&mut self, message: Message) -> bool { + match message { + Message::Export => { + otel_debug!( + name: "PeriodicReader.ExportTriggered", + message = "Export message received.", + ); + if let Err(err) = self.collect_and_export().await { + otel_error!( + name: "PeriodicReader.ExportFailed", + message = "Failed to export metrics", + reason = format!("{}", err)); + } + } + Message::Flush(ch) => { + otel_debug!( + name: "PeriodicReader.ForceFlushCalled", + message = "Flush message received.", + ); + let res = self.collect_and_export().await; + if let Err(send_error) = ch.send(res) { + otel_debug!( + name: "PeriodicReader.Flush.SendResultError", + message = "Failed to send flush result.", + reason = format!("{:?}", send_error), + ); + } + } + Message::Shutdown(ch) => { + otel_debug!( + name: "PeriodicReader.ShutdownCalled", + message = "Shutdown message received", + ); + let res = self.collect_and_export().await; + let _ = self.reader.exporter.shutdown(); + if let Err(send_error) = ch.send(res) { + otel_debug!( + name: "PeriodicReader.Shutdown.SendResultError", + message = "Failed to send shutdown result", + reason = format!("{:?}", send_error), + ); + } + return false; + } + } + + true + } + + async fn run(mut self, mut messages: impl FusedStream + Unpin) { + while let Some(message) = messages.next().await { + if !self.process_message(message).await { + break; + } + } + } +} + +impl MetricReader for PeriodicReader { + fn register_pipeline(&self, pipeline: Weak) { + let mut inner = match self.inner.lock() { + Ok(guard) => guard, + Err(_) => return, + }; + + let worker = match &mut inner.sdk_producer_or_worker { + ProducerOrWorker::Producer(_) => { + // Only register once. If producer is already set, do nothing. + otel_debug!(name: "PeriodicReader.DuplicateRegistration", + message = "duplicate registration found, did not register periodic reader."); + return; + } + ProducerOrWorker::Worker(w) => mem::replace(w, Box::new(|_| {})), + }; + + inner.sdk_producer_or_worker = ProducerOrWorker::Producer(pipeline); + worker(self); + } + + fn collect(&self, rm: &mut ResourceMetrics) -> MetricResult<()> { + let inner = self.inner.lock()?; + if inner.is_shutdown { + return Err(MetricError::Other("reader is shut down".into())); + } + + if let Some(producer) = match &inner.sdk_producer_or_worker { + ProducerOrWorker::Producer(sdk_producer) => sdk_producer.upgrade(), + ProducerOrWorker::Worker(_) => None, + } { + producer.produce(rm)?; + } else { + return Err(MetricError::Other("reader is not registered".into())); + } + + Ok(()) + } + + fn force_flush(&self) -> MetricResult<()> { + let mut inner = self.inner.lock()?; + if inner.is_shutdown { + return Err(MetricError::Other("reader is shut down".into())); + } + let (sender, receiver) = oneshot::channel(); + inner + .message_sender + .try_send(Message::Flush(sender)) + .map_err(|e| MetricError::Other(e.to_string()))?; + + drop(inner); // don't hold lock when blocking on future + + futures_executor::block_on(receiver) + .map_err(|err| MetricError::Other(err.to_string())) + .and_then(|res| res) + } + + fn shutdown(&self) -> MetricResult<()> { + let mut inner = self.inner.lock()?; + if inner.is_shutdown { + return Err(MetricError::Other("reader is already shut down".into())); + } + + let (sender, receiver) = oneshot::channel(); + inner + .message_sender + .try_send(Message::Shutdown(sender)) + .map_err(|e| MetricError::Other(e.to_string()))?; + drop(inner); // don't hold lock when blocking on future + + let shutdown_result = futures_executor::block_on(receiver) + .map_err(|err| MetricError::Other(err.to_string()))?; + + // Acquire the lock again to set the shutdown flag + let mut inner = self.inner.lock()?; + inner.is_shutdown = true; + + shutdown_result + } + + /// To construct a [MetricReader][metric-reader] when setting up an SDK, + /// The output temporality (optional), a function of instrument kind. + /// This function SHOULD be obtained from the exporter. + /// + /// If not configured, the Cumulative temporality SHOULD be used. + /// + /// [metric-reader]: https://github.com/open-telemetry/opentelemetry-specification/blob/0a78571045ca1dca48621c9648ec3c832c3c541c/specification/metrics/sdk.md#metricreader + fn temporality(&self, kind: InstrumentKind) -> super::Temporality { + kind.temporality_preference(self.exporter.temporality()) + } +} + +#[cfg(all(test, feature = "testing"))] +mod tests { + use super::PeriodicReader; + use crate::metrics::reader::MetricReader; + use crate::metrics::MetricError; + use crate::{ + metrics::data::ResourceMetrics, metrics::SdkMeterProvider, runtime, + testing::metrics::InMemoryMetricExporter, Resource, + }; + use opentelemetry::metrics::MeterProvider; + use std::sync::mpsc; + + #[test] + fn collection_triggered_by_interval_tokio_current() { + collection_triggered_by_interval_helper(runtime::TokioCurrentThread); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn collection_triggered_by_interval_from_tokio_multi_one_thread_on_runtime_tokio() { + collection_triggered_by_interval_helper(runtime::Tokio); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn collection_triggered_by_interval_from_tokio_multi_two_thread_on_runtime_tokio() { + collection_triggered_by_interval_helper(runtime::Tokio); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn collection_triggered_by_interval_from_tokio_multi_one_thread_on_runtime_tokio_current() + { + collection_triggered_by_interval_helper(runtime::TokioCurrentThread); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn collection_triggered_by_interval_from_tokio_multi_two_thread_on_runtime_tokio_current() + { + collection_triggered_by_interval_helper(runtime::TokioCurrentThread); + } + + #[tokio::test(flavor = "current_thread")] + #[ignore = "See issue https://github.com/open-telemetry/opentelemetry-rust/issues/2056"] + async fn collection_triggered_by_interval_from_tokio_current_on_runtime_tokio() { + collection_triggered_by_interval_helper(runtime::Tokio); + } + + #[tokio::test(flavor = "current_thread")] + async fn collection_triggered_by_interval_from_tokio_current_on_runtime_tokio_current() { + collection_triggered_by_interval_helper(runtime::TokioCurrentThread); + } + + #[test] + fn unregistered_collect() { + // Arrange + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone(), runtime::Tokio).build(); + let mut rm = ResourceMetrics { + resource: Resource::empty(), + scope_metrics: Vec::new(), + }; + + // Act + let result = reader.collect(&mut rm); + + // Assert + assert!( + matches!(result.unwrap_err(), MetricError::Other(err) if err == "reader is not registered") + ); + } + + fn collection_triggered_by_interval_helper(runtime: RT) + where + RT: crate::runtime::Runtime, + { + let interval = std::time::Duration::from_millis(1); + let exporter = InMemoryMetricExporter::default(); + let reader = PeriodicReader::builder(exporter.clone(), runtime) + .with_interval(interval) + .build(); + let (sender, receiver) = mpsc::channel(); + + // Act + let meter_provider = SdkMeterProvider::builder().with_reader(reader).build(); + let meter = meter_provider.meter("test"); + let _counter = meter + .u64_observable_counter("testcounter") + .with_callback(move |_| { + sender.send(()).expect("channel should still be open"); + }) + .build(); + + // Assert + receiver + .recv() + .expect("message should be available in channel, indicating a collection occurred"); + } +} diff --git a/opentelemetry-sdk/src/metrics/pipeline.rs b/opentelemetry-sdk/src/metrics/pipeline.rs index 6e0f139809..5ba4bba75f 100644 --- a/opentelemetry-sdk/src/metrics/pipeline.rs +++ b/opentelemetry-sdk/src/metrics/pipeline.rs @@ -5,14 +5,9 @@ use std::{ sync::{Arc, Mutex}, }; -use opentelemetry::{ - global, - metrics::{MetricsError, Result}, - KeyValue, -}; +use opentelemetry::{otel_debug, InstrumentationScope, KeyValue}; use crate::{ - instrumentation::Scope, metrics::{ aggregation, data::{Metric, ResourceMetrics, ScopeMetrics}, @@ -22,6 +17,7 @@ use crate::{ internal::Number, reader::{MetricReader, SdkProducer}, view::View, + MetricError, MetricResult, }, Resource, }; @@ -41,7 +37,7 @@ pub struct Pipeline { pub(crate) resource: Resource, reader: Box, views: Vec>, - inner: Box>, + inner: Mutex, } impl fmt::Debug for Pipeline { @@ -55,7 +51,7 @@ type GenericCallback = Arc; #[derive(Default)] struct PipelineInner { - aggregations: HashMap>, + aggregations: HashMap>, callbacks: Vec, } @@ -74,8 +70,12 @@ impl Pipeline { /// This method is not idempotent. Duplicate calls will result in duplicate /// additions, it is the callers responsibility to ensure this is called with /// unique values. - fn add_sync(&self, scope: Scope, i_sync: InstrumentSync) { + fn add_sync(&self, scope: InstrumentationScope, i_sync: InstrumentSync) { let _ = self.inner.lock().map(|mut inner| { + otel_debug!( + name : "InstrumentCreated", + instrument_name = i_sync.name.as_ref(), + ); inner.aggregations.entry(scope).or_default().push(i_sync); }); } @@ -89,20 +89,24 @@ impl Pipeline { } /// Send accumulated telemetry - fn force_flush(&self) -> Result<()> { + fn force_flush(&self) -> MetricResult<()> { self.reader.force_flush() } /// Shut down pipeline - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { self.reader.shutdown() } } impl SdkProducer for Pipeline { /// Returns aggregated metrics from a single collection. - fn produce(&self, rm: &mut ResourceMetrics) -> Result<()> { + fn produce(&self, rm: &mut ResourceMetrics) -> MetricResult<()> { let inner = self.inner.lock()?; + otel_debug!( + name: "MeterProviderInvokingObservableCallbacks", + count = inner.callbacks.len(), + ); for cb in &inner.callbacks { // TODO consider parallel callbacks. cb(); @@ -185,7 +189,7 @@ impl fmt::Debug for InstrumentSync { } } -type Cache = Mutex>>>>>; +type Cache = Mutex>>>>>; /// Facilitates inserting of new instruments from a single scope into a pipeline. struct Inserter { @@ -244,13 +248,17 @@ where /// /// If an instrument is determined to use a [aggregation::Aggregation::Drop], /// that instrument is not inserted nor returned. - fn instrument(&self, inst: Instrument) -> Result>>> { + fn instrument( + &self, + inst: Instrument, + boundaries: Option<&[f64]>, + ) -> MetricResult>>> { let mut matched = false; let mut measures = vec![]; let mut errs = vec![]; let kind = match inst.kind { Some(kind) => kind, - None => return Err(MetricsError::Other("instrument must have a kind".into())), + None => return Err(MetricError::Other("instrument must have a kind".into())), }; // The cache will return the same Aggregator instance. Use stream ids to de duplicate. @@ -283,12 +291,12 @@ where if errs.is_empty() { return Ok(measures); } else { - return Err(MetricsError::Other(format!("{errs:?}"))); + return Err(MetricError::Other(format!("{errs:?}"))); } } // Apply implicit default view if no explicit matched. - let stream = Stream { + let mut stream = Stream { name: inst.name, description: inst.description, unit: inst.unit, @@ -296,6 +304,14 @@ where allowed_attribute_keys: None, }; + // Override default histogram boundaries if provided. + if let Some(boundaries) = boundaries { + stream.aggregation = Some(Aggregation::ExplicitBucketHistogram { + boundaries: boundaries.to_vec(), + record_min_max: true, + }); + } + match self.cached_aggregator(&inst.scope, kind, stream) { Ok(agg) => { if errs.is_empty() { @@ -304,12 +320,12 @@ where } Ok(measures) } else { - Err(MetricsError::Other(format!("{errs:?}"))) + Err(MetricError::Other(format!("{errs:?}"))) } } Err(err) => { errs.push(err); - Err(MetricsError::Other(format!("{errs:?}"))) + Err(MetricError::Other(format!("{errs:?}"))) } } } @@ -329,10 +345,10 @@ where /// is returned. fn cached_aggregator( &self, - scope: &Scope, + scope: &InstrumentationScope, kind: InstrumentKind, mut stream: Stream, - ) -> Result>>> { + ) -> MetricResult>>> { let mut agg = stream .aggregation .take() @@ -344,7 +360,7 @@ where } if let Err(err) = is_aggregator_compatible(&kind, &agg) { - return Err(MetricsError::Other(format!( + return Err(MetricError::Other(format!( "creating aggregator with instrumentKind: {:?}, aggregation {:?}: {:?}", kind, stream.aggregation, err, ))); @@ -368,7 +384,7 @@ where .clone() .map(|allowed| Arc::new(move |kv: &KeyValue| allowed.contains(&kv.key)) as Arc<_>); - let b = AggregateBuilder::new(Some(self.pipeline.reader.temporality(kind)), filter); + let b = AggregateBuilder::new(self.pipeline.reader.temporality(kind), filter); let (m, ca) = match aggregate_fn(b, &agg, kind) { Ok(Some((m, ca))) => (m, ca), other => return other.map(|fs| fs.map(|(m, _)| m)), // Drop aggregator or error @@ -389,7 +405,7 @@ where match cached { Ok(opt) => Ok(opt.clone()), - Err(err) => Err(MetricsError::Other(err.to_string())), + Err(err) => Err(MetricError::Other(err.to_string())), } } @@ -402,15 +418,18 @@ where if existing == id { return; } - - global::handle_error(MetricsError::Other(format!( - "duplicate metric stream definitions, names: ({} and {}), descriptions: ({} and {}), kinds: ({:?} and {:?}), units: ({:?} and {:?}), and numbers: ({} and {})", + // If an existing instrument with the same name but different attributes is found, + // log a warning with details about the conflicting metric stream definitions. + otel_debug!( + name: "Instrument.DuplicateMetricStreamDefinitions", + message = "duplicate metric stream definitions", + reason = format!("names: ({} and {}), descriptions: ({} and {}), kinds: ({:?} and {:?}), units: ({:?} and {:?}), and numbers: ({} and {})", existing.name, id.name, existing.description, id.description, existing.kind, id.kind, existing.unit, id.unit, - existing.number, id.number, - ))) + existing.number, id.number,) + ); } } } @@ -469,8 +488,7 @@ fn aggregate_fn( b: AggregateBuilder, agg: &aggregation::Aggregation, kind: InstrumentKind, -) -> Result>> { - use aggregation::Aggregation; +) -> MetricResult>> { fn box_val( (m, ca): (impl internal::Measure, impl internal::ComputeAggregation), ) -> ( @@ -543,8 +561,10 @@ fn aggregate_fn( /// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ | /// | Gauge | ✓ | ✓ | | ✓ | ✓ | /// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ | -fn is_aggregator_compatible(kind: &InstrumentKind, agg: &aggregation::Aggregation) -> Result<()> { - use aggregation::Aggregation; +fn is_aggregator_compatible( + kind: &InstrumentKind, + agg: &aggregation::Aggregation, +) -> MetricResult<()> { match agg { Aggregation::Default => Ok(()), Aggregation::ExplicitBucketHistogram { .. } @@ -561,7 +581,7 @@ fn is_aggregator_compatible(kind: &InstrumentKind, agg: &aggregation::Aggregatio ) { return Ok(()); } - Err(MetricsError::Other("incompatible aggregation".into())) + Err(MetricError::Other("incompatible aggregation".into())) } Aggregation::Sum => { match kind { @@ -573,7 +593,7 @@ fn is_aggregator_compatible(kind: &InstrumentKind, agg: &aggregation::Aggregatio _ => { // TODO: review need for aggregation check after // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 - Err(MetricsError::Other("incompatible aggregation".into())) + Err(MetricError::Other("incompatible aggregation".into())) } } } @@ -583,7 +603,7 @@ fn is_aggregator_compatible(kind: &InstrumentKind, agg: &aggregation::Aggregatio _ => { // TODO: review need for aggregation check after // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 - Err(MetricsError::Other("incompatible aggregation".into())) + Err(MetricError::Other("incompatible aggregation".into())) } } } @@ -627,7 +647,7 @@ impl Pipelines { } /// Force flush all pipelines - pub(crate) fn force_flush(&self) -> Result<()> { + pub(crate) fn force_flush(&self) -> MetricResult<()> { let mut errs = vec![]; for pipeline in &self.0 { if let Err(err) = pipeline.force_flush() { @@ -638,12 +658,12 @@ impl Pipelines { if errs.is_empty() { Ok(()) } else { - Err(MetricsError::Other(format!("{errs:?}"))) + Err(MetricError::Other(format!("{errs:?}"))) } } /// Shut down all pipelines - pub(crate) fn shutdown(&self) -> Result<()> { + pub(crate) fn shutdown(&self) -> MetricResult<()> { let mut errs = vec![]; for pipeline in &self.0 { if let Err(err) = pipeline.shutdown() { @@ -654,7 +674,7 @@ impl Pipelines { if errs.is_empty() { Ok(()) } else { - Err(MetricsError::Other(format!("{errs:?}"))) + Err(MetricError::Other(format!("{errs:?}"))) } } } @@ -684,20 +704,28 @@ where } /// The measures that must be updated by the instrument defined by key. - pub(crate) fn measures(&self, id: Instrument) -> Result>>> { + pub(crate) fn measures( + &self, + id: Instrument, + boundaries: Option>, + ) -> MetricResult>>> { let (mut measures, mut errs) = (vec![], vec![]); for inserter in &self.inserters { - match inserter.instrument(id.clone()) { + match inserter.instrument(id.clone(), boundaries.as_deref()) { Ok(ms) => measures.extend(ms), Err(err) => errs.push(err), } } if errs.is_empty() { + if measures.is_empty() { + // TODO: Emit internal log that measurements from the instrument + // are being dropped due to view configuration + } Ok(measures) } else { - Err(MetricsError::Other(format!("{errs:?}"))) + Err(MetricError::Other(format!("{errs:?}"))) } } } diff --git a/opentelemetry-sdk/src/metrics/reader.rs b/opentelemetry-sdk/src/metrics/reader.rs index 0a6dbc1b4e..8016a0dab4 100644 --- a/opentelemetry-sdk/src/metrics/reader.rs +++ b/opentelemetry-sdk/src/metrics/reader.rs @@ -1,13 +1,9 @@ //! Interfaces for reading and producing metrics use std::{fmt, sync::Weak}; -use opentelemetry::metrics::Result; +use crate::metrics::MetricResult; -use super::{ - data::{ResourceMetrics, Temporality}, - instrument::InstrumentKind, - pipeline::Pipeline, -}; +use super::{data::ResourceMetrics, pipeline::Pipeline, InstrumentKind, Temporality}; /// The interface used between the SDK and an exporter. /// @@ -23,7 +19,7 @@ use super::{ /// /// Pull-based exporters will typically implement `MetricReader` themselves, /// since they read on demand. -pub trait MetricReader: TemporalitySelector + fmt::Debug + Send + Sync + 'static { +pub trait MetricReader: fmt::Debug + Send + Sync + 'static { /// Registers a [MetricReader] with a [Pipeline]. /// /// The pipeline argument allows the `MetricReader` to signal the sdk to collect @@ -34,13 +30,13 @@ pub trait MetricReader: TemporalitySelector + fmt::Debug + Send + Sync + 'static /// SDK and stores it in the provided [ResourceMetrics] reference. /// /// An error is returned if this is called after shutdown. - fn collect(&self, rm: &mut ResourceMetrics) -> Result<()>; + fn collect(&self, rm: &mut ResourceMetrics) -> MetricResult<()>; /// Flushes all metric measurements held in an export pipeline. /// /// There is no guaranteed that all telemetry be flushed or all resources have /// been released on error. - fn force_flush(&self) -> Result<()>; + fn force_flush(&self) -> MetricResult<()>; /// Flushes all metric measurements held in an export pipeline and releases any /// held computational resources. @@ -50,39 +46,17 @@ pub trait MetricReader: TemporalitySelector + fmt::Debug + Send + Sync + 'static /// /// After `shutdown` is called, calls to `collect` will perform no operation and /// instead will return an error indicating the shutdown state. - fn shutdown(&self) -> Result<()>; + fn shutdown(&self) -> MetricResult<()>; + + /// The output temporality, a function of instrument kind. + /// This SHOULD be obtained from the exporter. + /// + /// If not configured, the Cumulative temporality SHOULD be used. + fn temporality(&self, kind: InstrumentKind) -> Temporality; } /// Produces metrics for a [MetricReader]. pub(crate) trait SdkProducer: fmt::Debug + Send + Sync { /// Returns aggregated metrics from a single collection. - fn produce(&self, rm: &mut ResourceMetrics) -> Result<()>; -} - -/// An interface for selecting the temporality for an [InstrumentKind]. -pub trait TemporalitySelector: Send + Sync { - /// Selects the temporality to use based on the [InstrumentKind]. - fn temporality(&self, kind: InstrumentKind) -> Temporality; -} - -/// The default temporality used if not specified for a given [InstrumentKind]. -/// -/// [Temporality::Cumulative] will be used for all instrument kinds if this -/// [TemporalitySelector] is used. -#[derive(Clone, Default, Debug)] -pub struct DefaultTemporalitySelector { - pub(crate) _private: (), -} - -impl DefaultTemporalitySelector { - /// Create a new default temporality selector. - pub fn new() -> Self { - Self::default() - } -} - -impl TemporalitySelector for DefaultTemporalitySelector { - fn temporality(&self, _kind: InstrumentKind) -> Temporality { - Temporality::Cumulative - } + fn produce(&self, rm: &mut ResourceMetrics) -> MetricResult<()>; } diff --git a/opentelemetry-sdk/src/metrics/view.rs b/opentelemetry-sdk/src/metrics/view.rs index d9f256bd2b..ed67fe9de2 100644 --- a/opentelemetry-sdk/src/metrics/view.rs +++ b/opentelemetry-sdk/src/metrics/view.rs @@ -1,10 +1,10 @@ use super::instrument::{Instrument, Stream}; +#[cfg(feature = "spec_unstable_metrics_views")] +use crate::metrics::{MetricError, MetricResult}; +#[cfg(feature = "spec_unstable_metrics_views")] use glob::Pattern; -use opentelemetry::{ - global, - metrics::{MetricsError, Result}, -}; +#[cfg(feature = "spec_unstable_metrics_views")] fn empty_view(_inst: &Instrument) -> Option { None } @@ -45,6 +45,7 @@ fn empty_view(_inst: &Instrument) -> Option { /// let provider = SdkMeterProvider::builder().with_view(my_view).build(); /// # drop(provider) /// ``` +#[allow(unreachable_pub)] pub trait View: Send + Sync + 'static { /// Defines how data should be collected for certain instruments. /// @@ -68,6 +69,7 @@ impl View for Box { } } +#[cfg(feature = "spec_unstable_metrics_views")] /// Creates a [View] that applies the [Stream] mask for all instruments that /// match criteria. /// @@ -100,27 +102,22 @@ impl View for Box { /// let view = new_view(criteria, mask); /// # drop(view); /// ``` -pub fn new_view(criteria: Instrument, mask: Stream) -> Result> { +pub fn new_view(criteria: Instrument, mask: Stream) -> MetricResult> { if criteria.is_empty() { - global::handle_error(MetricsError::Config(format!( - "no criteria provided, dropping view. mask: {mask:?}" - ))); + // TODO - The error is getting lost here. Need to return or log. return Ok(Box::new(empty_view)); } let contains_wildcard = criteria.name.contains(['*', '?']); - let err_msg_criteria = criteria.clone(); let match_fn: Box bool + Send + Sync> = if contains_wildcard { if mask.name != "" { - global::handle_error(MetricsError::Config(format!( - "name replacement for multiple instruments, dropping view, criteria: {criteria:?}, mask: {mask:?}" - ))); + // TODO - The error is getting lost here. Need to return or log. return Ok(Box::new(empty_view)); } let pattern = criteria.name.clone(); let glob_pattern = - Pattern::new(&pattern).map_err(|e| MetricsError::Config(e.to_string()))?; + Pattern::new(&pattern).map_err(|e| MetricError::Config(e.to_string()))?; Box::new(move |i| { glob_pattern.matches(&i.name) @@ -137,11 +134,8 @@ pub fn new_view(criteria: Instrument, mask: Stream) -> Result> { if let Some(ma) = &mask.aggregation { match ma.validate() { Ok(_) => agg = Some(ma.clone()), - Err(err) => { - global::handle_error(MetricsError::Other(format!( - "{}, proceeding as if view did not exist. criteria: {:?}, mask: {:?}", - err, err_msg_criteria, mask - ))); + Err(_) => { + // TODO - The error is getting lost here. Need to return or log. return Ok(Box::new(empty_view)); } } diff --git a/opentelemetry-sdk/src/propagation/baggage.rs b/opentelemetry-sdk/src/propagation/baggage.rs index 397edc3fc9..05d93e632f 100644 --- a/opentelemetry-sdk/src/propagation/baggage.rs +++ b/opentelemetry-sdk/src/propagation/baggage.rs @@ -1,17 +1,22 @@ -use once_cell::sync::Lazy; -use opentelemetry::propagation::PropagationError; use opentelemetry::{ baggage::{BaggageExt, KeyValueMetadata}, - global, + otel_warn, propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, Context, }; use percent_encoding::{percent_decode_str, utf8_percent_encode, AsciiSet, CONTROLS}; use std::iter; +use std::sync::OnceLock; static BAGGAGE_HEADER: &str = "baggage"; const FRAGMENT: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b';').add(b',').add(b'='); -static BAGGAGE_FIELDS: Lazy<[String; 1]> = Lazy::new(|| [BAGGAGE_HEADER.to_owned()]); + +// TODO Replace this with LazyLock once it is stable. +static BAGGAGE_FIELDS: OnceLock<[String; 1]> = OnceLock::new(); +#[inline] +fn baggage_fields() -> &'static [String; 1] { + BAGGAGE_FIELDS.get_or_init(|| [BAGGAGE_HEADER.to_owned()]) +} /// Propagates name-value pairs in [W3C Baggage] format. /// @@ -120,24 +125,26 @@ impl TextMapPropagator for BaggagePropagator { decoded_props.as_str(), )) } else { - global::handle_error(PropagationError::extract( - "invalid UTF8 string in key values", - "BaggagePropagator", - )); + otel_warn!( + name: "BaggagePropagator.Extract.InvalidUTF8", + message = "Invalid UTF8 string in key values", + baggage_header = header_value, + ); None } } else { - global::handle_error(PropagationError::extract( - "invalid baggage key-value format", - "BaggagePropagator", - )); + otel_warn!( + name: "BaggagePropagator.Extract.InvalidKeyValueFormat", + message = "Invalid baggage key-value format", + baggage_header = header_value, + ); None } } else { - global::handle_error(PropagationError::extract( - "invalid baggage format", - "BaggagePropagator", - )); + otel_warn!( + name: "BaggagePropagator.Extract.InvalidFormat", + message = "Invalid baggage format", + baggage_header = header_value); None } }); @@ -148,7 +155,7 @@ impl TextMapPropagator for BaggagePropagator { } fn fields(&self) -> FieldIter<'_> { - FieldIter::new(BAGGAGE_FIELDS.as_ref()) + FieldIter::new(baggage_fields()) } } diff --git a/opentelemetry-sdk/src/propagation/trace_context.rs b/opentelemetry-sdk/src/propagation/trace_context.rs index ced269c2d2..2a6d53ef04 100644 --- a/opentelemetry-sdk/src/propagation/trace_context.rs +++ b/opentelemetry-sdk/src/propagation/trace_context.rs @@ -1,21 +1,26 @@ //! # W3C Trace Context Propagator //! -use once_cell::sync::Lazy; use opentelemetry::{ propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, Context, }; use std::str::FromStr; +use std::sync::OnceLock; const SUPPORTED_VERSION: u8 = 0; const MAX_VERSION: u8 = 254; const TRACEPARENT_HEADER: &str = "traceparent"; const TRACESTATE_HEADER: &str = "tracestate"; -static TRACE_CONTEXT_HEADER_FIELDS: Lazy<[String; 2]> = - Lazy::new(|| [TRACEPARENT_HEADER.to_owned(), TRACESTATE_HEADER.to_owned()]); +// TODO Replace this with LazyLock once it is stable. +static TRACE_CONTEXT_HEADER_FIELDS: OnceLock<[String; 2]> = OnceLock::new(); + +fn trace_context_header_fields() -> &'static [String; 2] { + TRACE_CONTEXT_HEADER_FIELDS + .get_or_init(|| [TRACEPARENT_HEADER.to_owned(), TRACESTATE_HEADER.to_owned()]) +} /// Propagates `SpanContext`s in [W3C TraceContext] format under `traceparent` and `tracestate` header. /// @@ -146,7 +151,7 @@ impl TextMapPropagator for TraceContextPropagator { } fn fields(&self) -> FieldIter<'_> { - FieldIter::new(TRACE_CONTEXT_HEADER_FIELDS.as_ref()) + FieldIter::new(trace_context_header_fields()) } } diff --git a/opentelemetry-sdk/src/resource/env.rs b/opentelemetry-sdk/src/resource/env.rs index b5287e1c76..ac5ee0c034 100644 --- a/opentelemetry-sdk/src/resource/env.rs +++ b/opentelemetry-sdk/src/resource/env.rs @@ -5,7 +5,6 @@ use crate::resource::{Resource, ResourceDetector}; use opentelemetry::{Key, KeyValue, Value}; use std::env; -use std::time::Duration; const OTEL_RESOURCE_ATTRIBUTES: &str = "OTEL_RESOURCE_ATTRIBUTES"; const OTEL_SERVICE_NAME: &str = "OTEL_SERVICE_NAME"; @@ -20,10 +19,10 @@ pub struct EnvResourceDetector { } impl ResourceDetector for EnvResourceDetector { - fn detect(&self, _timeout: Duration) -> Resource { + fn detect(&self) -> Resource { match env::var(OTEL_RESOURCE_ATTRIBUTES) { Ok(s) if !s.is_empty() => construct_otel_resources(s), - Ok(_) | Err(_) => Resource::new(vec![]), // return empty resource + Ok(_) | Err(_) => Resource::empty(), // return empty resource } } } @@ -44,16 +43,18 @@ impl Default for EnvResourceDetector { /// Extract key value pairs and construct a resource from resources string like /// key1=value1,key2=value2,... fn construct_otel_resources(s: String) -> Resource { - Resource::new(s.split_terminator(',').filter_map(|entry| { - let mut parts = entry.splitn(2, '='); - let key = parts.next()?.trim(); - let value = parts.next()?.trim(); - if value.find('=').is_some() { - return None; - } - - Some(KeyValue::new(key.to_owned(), value.to_owned())) - })) + Resource::builder_empty() + .with_attributes(s.split_terminator(',').filter_map(|entry| { + let mut parts = entry.splitn(2, '='); + let key = parts.next()?.trim(); + let value = parts.next()?.trim(); + if value.find('=').is_some() { + return None; + } + + Some(KeyValue::new(key.to_owned(), value.to_owned())) + })) + .build() } /// There are attributes which MUST be provided by the SDK as specified in @@ -72,20 +73,22 @@ fn construct_otel_resources(s: String) -> Resource { pub struct SdkProvidedResourceDetector; impl ResourceDetector for SdkProvidedResourceDetector { - fn detect(&self, _timeout: Duration) -> Resource { - Resource::new(vec![KeyValue::new( - super::SERVICE_NAME, - env::var(OTEL_SERVICE_NAME) - .ok() - .filter(|s| !s.is_empty()) - .map(Value::from) - .or_else(|| { - EnvResourceDetector::new() - .detect(Duration::from_secs(0)) - .get(Key::new(super::SERVICE_NAME)) - }) - .unwrap_or_else(|| "unknown_service".into()), - )]) + fn detect(&self) -> Resource { + Resource::builder_empty() + .with_attributes([KeyValue::new( + super::SERVICE_NAME, + env::var(OTEL_SERVICE_NAME) + .ok() + .filter(|s| !s.is_empty()) + .map(Value::from) + .or_else(|| { + EnvResourceDetector::new() + .detect() + .get(Key::new(super::SERVICE_NAME)) + }) + .unwrap_or_else(|| "unknown_service".into()), + )]) + .build() } } @@ -96,7 +99,6 @@ mod tests { }; use crate::resource::{EnvResourceDetector, Resource, ResourceDetector}; use opentelemetry::{Key, KeyValue, Value}; - use std::time::Duration; #[test] fn test_read_from_env() { @@ -110,35 +112,37 @@ mod tests { ], || { let detector = EnvResourceDetector::new(); - let resource = detector.detect(Duration::from_secs(5)); + let resource = detector.detect(); assert_eq!( resource, - Resource::new(vec![ - KeyValue::new("key", "value"), - KeyValue::new("k", "v"), - KeyValue::new("a", "x"), - KeyValue::new("a", "z"), - ]) + Resource::builder_empty() + .with_attributes([ + KeyValue::new("key", "value"), + KeyValue::new("k", "v"), + KeyValue::new("a", "x"), + KeyValue::new("a", "z"), + ]) + .build() ); }, ); let detector = EnvResourceDetector::new(); - let resource = detector.detect(Duration::from_secs(5)); + let resource = detector.detect(); assert!(resource.is_empty()); } #[test] fn test_sdk_provided_resource_detector() { // Ensure no env var set - let no_env = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); + let no_env = SdkProvidedResourceDetector.detect(); assert_eq!( no_env.get(Key::from_static_str(crate::resource::SERVICE_NAME)), Some(Value::from("unknown_service")), ); temp_env::with_var(OTEL_SERVICE_NAME, Some("test service"), || { - let with_service = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); + let with_service = SdkProvidedResourceDetector.detect(); assert_eq!( with_service.get(Key::from_static_str(crate::resource::SERVICE_NAME)), Some(Value::from("test service")), @@ -149,7 +153,7 @@ mod tests { OTEL_RESOURCE_ATTRIBUTES, Some("service.name=test service1"), || { - let with_service = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); + let with_service = SdkProvidedResourceDetector.detect(); assert_eq!( with_service.get(Key::from_static_str(crate::resource::SERVICE_NAME)), Some(Value::from("test service1")), @@ -164,7 +168,7 @@ mod tests { (OTEL_RESOURCE_ATTRIBUTES, Some("service.name=test service3")), ], || { - let with_service = SdkProvidedResourceDetector.detect(Duration::from_secs(1)); + let with_service = SdkProvidedResourceDetector.detect(); assert_eq!( with_service.get(Key::from_static_str(crate::resource::SERVICE_NAME)), Some(Value::from("test service")) diff --git a/opentelemetry-sdk/src/resource/mod.rs b/opentelemetry-sdk/src/resource/mod.rs index dc9bbe316b..c7a26978ea 100644 --- a/opentelemetry-sdk/src/resource/mod.rs +++ b/opentelemetry-sdk/src/resource/mod.rs @@ -35,7 +35,6 @@ use std::borrow::Cow; use std::collections::{hash_map, HashMap}; use std::ops::Deref; use std::sync::Arc; -use std::time::Duration; /// Inner structure of `Resource` holding the actual data. /// This structure is designed to be shared among `Resource` instances via `Arc`. @@ -52,23 +51,35 @@ pub struct Resource { inner: Arc, } -impl Default for Resource { - fn default() -> Self { - Self::from_detectors( - Duration::from_secs(0), - vec![ +impl Resource { + /// Creates a [ResourceBuilder] that allows you to configure multiple aspects of the Resource. + /// + /// This [ResourceBuilder] will always include the following [ResourceDetector]s: + /// - [SdkProvidedResourceDetector] + /// - [TelemetryResourceDetector] + /// - [EnvResourceDetector] + pub fn builder() -> ResourceBuilder { + ResourceBuilder { + resource: Self::from_detectors(&[ Box::new(SdkProvidedResourceDetector), Box::new(TelemetryResourceDetector), Box::new(EnvResourceDetector::new()), - ], - ) + ]), + } + } + + /// Creates a [ResourceBuilder] that allows you to configure multiple aspects of the Resource. + /// + /// This [ResourceBuilder] will not include any attributes or [ResourceDetector]s by default. + pub fn builder_empty() -> ResourceBuilder { + ResourceBuilder { + resource: Resource::empty(), + } } -} -impl Resource { /// Creates an empty resource. /// This is the basic constructor that initializes a resource with no attributes and no schema URL. - pub fn empty() -> Self { + pub(crate) fn empty() -> Self { Resource { inner: Arc::new(ResourceInner { attrs: HashMap::new(), @@ -81,7 +92,7 @@ impl Resource { /// /// Values are de-duplicated by key, and the first key-value pair with a non-empty string value /// will be retained - pub fn new>(kvs: T) -> Self { + pub(crate) fn new>(kvs: T) -> Self { let mut attrs = HashMap::new(); for kv in kvs { attrs.insert(kv.key, kv.value); @@ -103,7 +114,7 @@ impl Resource { /// schema_url must be a valid URL using HTTP or HTTPS protocol. /// /// [schema url]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/schemas/overview.md#schema-url - pub fn from_schema_url(kvs: KV, schema_url: S) -> Self + fn from_schema_url(kvs: KV, schema_url: S) -> Self where KV: IntoIterator, S: Into>, @@ -129,10 +140,10 @@ impl Resource { /// Create a new `Resource` from resource detectors. /// /// timeout will be applied to each detector. - pub fn from_detectors(timeout: Duration, detectors: Vec>) -> Self { + fn from_detectors(detectors: &[Box]) -> Self { let mut resource = Resource::empty(); for detector in detectors { - let detected_res = detector.detect(timeout); + let detected_res = detector.detect(); // This call ensures that if the Arc is not uniquely owned, // the data is cloned before modification, preserving safety. // If the Arc is uniquely owned, it simply returns a mutable reference to the data. @@ -160,7 +171,7 @@ impl Resource { /// 5. If both resources do not have a schema url, the schema url will be empty. /// /// [Schema url]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/schemas/overview.md#schema-url - pub fn merge>(&self, other: T) -> Self { + fn merge>(&self, other: T) -> Self { if self.is_empty() { return other.clone(); } @@ -171,6 +182,7 @@ impl Resource { for (k, v) in other.inner.attrs.iter() { combined_attrs.insert(k.clone(), v.clone()); } + // Resolve the schema URL according to the precedence rules let combined_schema_url = match (&self.inner.schema_url, &other.inner.schema_url) { // If both resources have a schema URL and it's the same, use it @@ -244,7 +256,7 @@ impl<'a> IntoIterator for &'a Resource { /// ResourceDetector detects OpenTelemetry resource information /// /// Implementations of this trait can be passed to -/// the [`Resource::from_detectors`] function to generate a Resource from the merged information. +/// the [`ResourceBuilder::with_detectors`] function to generate a Resource from the merged information. pub trait ResourceDetector { /// detect returns an initialized Resource based on gathered information. /// @@ -254,22 +266,84 @@ pub trait ResourceDetector { /// /// If source information to construct a Resource is invalid, for example, /// missing required values. an empty Resource should be returned. - fn detect(&self, timeout: Duration) -> Resource; + fn detect(&self) -> Resource; +} + +/// Builder for [Resource] +#[derive(Debug)] +pub struct ResourceBuilder { + resource: Resource, +} + +impl ResourceBuilder { + /// Add a single [ResourceDetector] to your resource. + pub fn with_detector(self, detector: Box) -> Self { + self.with_detectors(&[detector]) + } + + /// Add multiple [ResourceDetector]s to your resource. + pub fn with_detectors(mut self, detectors: &[Box]) -> Self { + self.resource = self.resource.merge(&Resource::from_detectors(detectors)); + self + } + + /// Add a [KeyValue] to the resource. + pub fn with_attribute(self, kv: KeyValue) -> Self { + self.with_attributes([kv]) + } + + /// Add multiple [KeyValue]s to the resource. + pub fn with_attributes>(mut self, kvs: T) -> Self { + self.resource = self.resource.merge(&Resource::new(kvs)); + self + } + + /// Add `service.name` resource attribute. + pub fn with_service_name(self, name: impl Into) -> Self { + self.with_attribute(KeyValue::new(SERVICE_NAME, name.into())) + } + + /// This will merge the provided `schema_url` with the current state of the Resource being built. It + /// will use the following rules to determine which `schema_url` should be used. + /// + /// ### [Schema url] + /// Schema url is determined by the following rules, in order: + /// 1. If the current builder resource doesn't have a `schema_url`, the provided `schema_url` will be used. + /// 2. If the current builder resource has a `schema_url`, and the provided `schema_url` is different from the builder resource, `schema_url` will be empty. + /// 3. If the provided `schema_url` is the same as the current builder resource, it will be used. + /// + /// [Schema url]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/schemas/overview.md#schema-url + pub fn with_schema_url(mut self, attributes: KV, schema_url: S) -> Self + where + KV: IntoIterator, + S: Into>, + { + self.resource = Resource::from_schema_url(attributes, schema_url).merge(&self.resource); + self + } + + /// Create a [Resource] with the options provided to the [ResourceBuilder]. + pub fn build(self) -> Resource { + self.resource + } } #[cfg(test)] mod tests { + use rstest::rstest; + use super::*; - use std::time; #[test] fn new_resource() { - let args_with_dupe_keys = vec![KeyValue::new("a", ""), KeyValue::new("a", "final")]; + let args_with_dupe_keys = [KeyValue::new("a", ""), KeyValue::new("a", "final")]; let mut expected_attrs = HashMap::new(); expected_attrs.insert(Key::new("a"), Value::from("final")); - let resource = Resource::new(args_with_dupe_keys); + let resource = Resource::builder_empty() + .with_attributes(args_with_dupe_keys) + .build(); let resource_inner = Arc::try_unwrap(resource.inner).expect("Failed to unwrap Arc"); assert_eq!(resource_inner.attrs, expected_attrs); assert_eq!(resource_inner.schema_url, None); @@ -277,17 +351,21 @@ mod tests { #[test] fn merge_resource_key_value_pairs() { - let resource_a = Resource::new(vec![ - KeyValue::new("a", ""), - KeyValue::new("b", "b-value"), - KeyValue::new("d", "d-value"), - ]); - - let resource_b = Resource::new(vec![ - KeyValue::new("a", "a-value"), - KeyValue::new("c", "c-value"), - KeyValue::new("d", ""), - ]); + let resource_a = Resource::builder_empty() + .with_attributes([ + KeyValue::new("a", ""), + KeyValue::new("b", "b-value"), + KeyValue::new("d", "d-value"), + ]) + .build(); + + let resource_b = Resource::builder_empty() + .with_attributes([ + KeyValue::new("a", "a-value"), + KeyValue::new("c", "c-value"), + KeyValue::new("d", ""), + ]) + .build(); let mut expected_attrs = HashMap::new(); expected_attrs.insert(Key::new("a"), Value::from("a-value")); @@ -305,47 +383,57 @@ mod tests { assert_eq!(resource_a.merge(&resource_b), expected_resource); } - #[test] - fn merge_resource_schema_url() { - // if both resources contains key value pairs - let test_cases = vec![ - (Some("http://schema/a"), None, Some("http://schema/a")), - (Some("http://schema/a"), Some("http://schema/b"), None), - (None, Some("http://schema/b"), Some("http://schema/b")), - ( - Some("http://schema/a"), - Some("http://schema/a"), - Some("http://schema/a"), - ), - (None, None, None), - ]; - - for (schema_url_a, schema_url_b, expected_schema_url) in test_cases.into_iter() { - let resource_a = Resource::from_schema_url( - vec![KeyValue::new("key", "")], - schema_url_a.unwrap_or(""), - ); - let resource_b = Resource::from_schema_url( - vec![KeyValue::new("key", "")], - schema_url_b.unwrap_or(""), - ); - - let merged_resource = resource_a.merge(&resource_b); - let result_schema_url = merged_resource.schema_url(); - - assert_eq!( - result_schema_url.map(|s| s as &str), - expected_schema_url, - "Merging schema_url_a {:?} with schema_url_b {:?} did not yield expected result {:?}", - schema_url_a, schema_url_b, expected_schema_url - ); - } - - // if only one resource contains key value pairs - let resource = Resource::from_schema_url(vec![], "http://schema/a"); - let other_resource = Resource::new(vec![KeyValue::new("key", "")]); + #[rstest] + #[case(Some("http://schema/a"), None, Some("http://schema/a"))] + #[case(Some("http://schema/a"), Some("http://schema/b"), None)] + #[case(None, Some("http://schema/b"), Some("http://schema/b"))] + #[case( + Some("http://schema/a"), + Some("http://schema/a"), + Some("http://schema/a") + )] + #[case(None, None, None)] + fn merge_resource_schema_url( + #[case] schema_url_a: Option<&'static str>, + #[case] schema_url_b: Option<&'static str>, + #[case] expected_schema_url: Option<&'static str>, + ) { + let resource_a = + Resource::from_schema_url([KeyValue::new("key", "")], schema_url_a.unwrap_or("")); + let resource_b = + Resource::from_schema_url([KeyValue::new("key", "")], schema_url_b.unwrap_or("")); + + let merged_resource = resource_a.merge(&resource_b); + let result_schema_url = merged_resource.schema_url(); + + assert_eq!( + result_schema_url.map(|s| s as &str), + expected_schema_url, + "Merging schema_url_a {:?} with schema_url_b {:?} did not yield expected result {:?}", + schema_url_a, + schema_url_b, + expected_schema_url + ); + } - assert_eq!(resource.merge(&other_resource).schema_url(), None); + #[rstest] + #[case(vec![], vec![KeyValue::new("key", "b")], "http://schema/a", None)] + #[case(vec![KeyValue::new("key", "a")], vec![KeyValue::new("key", "b")], "http://schema/a", Some("http://schema/a"))] + fn merge_resource_with_missing_attribtes( + #[case] key_values_a: Vec, + #[case] key_values_b: Vec, + #[case] schema_url: &'static str, + #[case] expected_schema_url: Option<&'static str>, + ) { + let resource = Resource::from_schema_url(key_values_a, schema_url); + let other_resource = Resource::builder_empty() + .with_attributes(key_values_b) + .build(); + + assert_eq!( + resource.merge(&other_resource).schema_url(), + expected_schema_url + ); } #[test] @@ -360,18 +448,96 @@ mod tests { ], || { let detector = EnvResourceDetector::new(); - let resource = Resource::from_detectors( - time::Duration::from_secs(5), - vec![Box::new(detector)], - ); + let resource = Resource::from_detectors(&[Box::new(detector)]); assert_eq!( resource, - Resource::new(vec![ - KeyValue::new("key", "value"), - KeyValue::new("k", "v"), - KeyValue::new("a", "x"), - KeyValue::new("a", "z"), + Resource::builder_empty() + .with_attributes([ + KeyValue::new("key", "value"), + KeyValue::new("k", "v"), + KeyValue::new("a", "x"), + KeyValue::new("a", "z"), + ]) + .build() + ) + }, + ) + } + + #[rstest] + #[case(Some("http://schema/a"), Some("http://schema/b"), None)] + #[case(None, Some("http://schema/b"), Some("http://schema/b"))] + #[case( + Some("http://schema/a"), + Some("http://schema/a"), + Some("http://schema/a") + )] + fn builder_with_schema_url( + #[case] schema_url_a: Option<&'static str>, + #[case] schema_url_b: Option<&'static str>, + #[case] expected_schema_url: Option<&'static str>, + ) { + let base_builder = if let Some(url) = schema_url_a { + ResourceBuilder { + resource: Resource::from_schema_url(vec![KeyValue::new("key", "")], url), + } + } else { + ResourceBuilder { + resource: Resource::empty(), + } + }; + + let resource = base_builder + .with_schema_url( + vec![KeyValue::new("key", "")], + schema_url_b.expect("should always be Some for this test"), + ) + .build(); + + assert_eq!( + resource.schema_url().map(|s| s as &str), + expected_schema_url, + "Merging schema_url_a {:?} with schema_url_b {:?} did not yield expected result {:?}", + schema_url_a, + schema_url_b, + expected_schema_url + ); + } + + #[test] + fn builder_detect_resource() { + temp_env::with_vars( + [ + ( + "OTEL_RESOURCE_ATTRIBUTES", + Some("key=value, k = v , a= x, a=z"), + ), + ("IRRELEVANT", Some("20200810")), + ], + || { + let resource = Resource::builder_empty() + .with_detector(Box::new(EnvResourceDetector::new())) + .with_service_name("testing_service") + .with_attribute(KeyValue::new("test1", "test_value")) + .with_attributes([ + KeyValue::new("test1", "test_value1"), + KeyValue::new("test2", "test_value2"), ]) + .build(); + + assert_eq!( + resource, + Resource::builder_empty() + .with_attributes([ + KeyValue::new("key", "value"), + KeyValue::new("test1", "test_value1"), + KeyValue::new("test2", "test_value2"), + KeyValue::new(SERVICE_NAME, "testing_service"), + KeyValue::new("k", "v"), + KeyValue::new("a", "x"), + KeyValue::new("a", "z"), + ]) + .build() ) }, ) diff --git a/opentelemetry-sdk/src/resource/telemetry.rs b/opentelemetry-sdk/src/resource/telemetry.rs index 10e359d3ac..cfc657ec2d 100644 --- a/opentelemetry-sdk/src/resource/telemetry.rs +++ b/opentelemetry-sdk/src/resource/telemetry.rs @@ -1,7 +1,6 @@ use crate::resource::ResourceDetector; use crate::Resource; use opentelemetry::KeyValue; -use std::time::Duration; /// Detect the telemetry SDK information used to capture data recorded by the instrumentation libraries. /// @@ -16,11 +15,13 @@ use std::time::Duration; pub struct TelemetryResourceDetector; impl ResourceDetector for TelemetryResourceDetector { - fn detect(&self, _timeout: Duration) -> Resource { - Resource::new(vec![ - KeyValue::new(super::TELEMETRY_SDK_NAME, "opentelemetry"), - KeyValue::new(super::TELEMETRY_SDK_LANGUAGE, "rust"), - KeyValue::new(super::TELEMETRY_SDK_VERSION, env!("CARGO_PKG_VERSION")), - ]) + fn detect(&self) -> Resource { + Resource::builder_empty() + .with_attributes([ + KeyValue::new(super::TELEMETRY_SDK_NAME, "opentelemetry"), + KeyValue::new(super::TELEMETRY_SDK_LANGUAGE, "rust"), + KeyValue::new(super::TELEMETRY_SDK_VERSION, env!("CARGO_PKG_VERSION")), + ]) + .build() } } diff --git a/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs b/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs index 958ab11fe1..4ed62e90a1 100644 --- a/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/testing/logs/in_memory_exporter.rs @@ -1,35 +1,34 @@ use crate::export::logs::{LogBatch, LogExporter}; use crate::logs::LogRecord; +use crate::logs::{LogError, LogResult}; use crate::Resource; use async_trait::async_trait; -use opentelemetry::logs::{LogError, LogResult}; -use opentelemetry::InstrumentationLibrary; +use opentelemetry::InstrumentationScope; use std::borrow::Cow; use std::sync::{Arc, Mutex}; /// An in-memory logs exporter that stores logs data in memory.. /// /// This exporter is useful for testing and debugging purposes. -/// It stores logs in a `Vec`. Logs can be retrieved using +/// It stores logs in a `Vec`. Logs can be retrieved using /// `get_emitted_logs` method. /// /// # Example /// ```no_run ///# use opentelemetry_sdk::logs::{BatchLogProcessor, LoggerProvider}; ///# use opentelemetry_sdk::runtime; -///# use opentelemetry_sdk::testing::logs::InMemoryLogsExporter; +///# use opentelemetry_sdk::testing::logs::InMemoryLogExporter; /// ///# #[tokio::main] ///# async fn main() { -/// // Create an InMemoryLogsExporter -/// let exporter: InMemoryLogsExporter = InMemoryLogsExporter::default(); +/// // Create an InMemoryLogExporter +/// let exporter: InMemoryLogExporter = InMemoryLogExporter::default(); /// //Create a LoggerProvider and register the exporter /// let logger_provider = LoggerProvider::builder() /// .with_log_processor(BatchLogProcessor::builder(exporter.clone(), runtime::Tokio).build()) /// .build(); /// // Setup Log Appenders and emit logs. (Not shown here) /// logger_provider.force_flush(); - /// let emitted_logs = exporter.get_emitted_logs().unwrap(); /// for log in emitted_logs { /// println!("{:?}", log); @@ -38,15 +37,15 @@ use std::sync::{Arc, Mutex}; /// ``` /// #[derive(Clone, Debug)] -pub struct InMemoryLogsExporter { +pub struct InMemoryLogExporter { logs: Arc>>, resource: Arc>, should_reset_on_shutdown: bool, } -impl Default for InMemoryLogsExporter { +impl Default for InMemoryLogExporter { fn default() -> Self { - InMemoryLogsExporterBuilder::new().build() + InMemoryLogExporterBuilder::new().build() } } @@ -56,33 +55,33 @@ pub struct OwnedLogData { /// Log record, which can be borrowed or owned. pub record: LogRecord, /// Instrumentation details for the emitter who produced this `LogEvent`. - pub instrumentation: InstrumentationLibrary, + pub instrumentation: InstrumentationScope, } /// `LogDataWithResource` associates a [`LogRecord`] with a [`Resource`] and -/// [`InstrumentationLibrary`]. +/// [`InstrumentationScope`]. #[derive(Clone, Debug)] pub struct LogDataWithResource { /// Log record pub record: LogRecord, - /// Instrumentation details for the emitter who produced this `LogData`. - pub instrumentation: InstrumentationLibrary, - /// Resource for the emitter who produced this `LogData`. + /// Instrumentation details for the emitter who produced this `LogRecord`. + pub instrumentation: InstrumentationScope, + /// Resource for the emitter who produced this `LogRecord`. pub resource: Cow<'static, Resource>, } -///Builder for ['InMemoryLogsExporter']. +///Builder for ['InMemoryLogExporter']. /// # Example /// /// ```no_run -///# use opentelemetry_sdk::testing::logs::{InMemoryLogsExporter, InMemoryLogsExporterBuilder}; +///# use opentelemetry_sdk::testing::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder}; ///# use opentelemetry_sdk::logs::{BatchLogProcessor, LoggerProvider}; ///# use opentelemetry_sdk::runtime; /// ///# #[tokio::main] ///# async fn main() { -/// //Create an InMemoryLogsExporter -/// let exporter: InMemoryLogsExporter = InMemoryLogsExporterBuilder::default().build(); +/// //Create an InMemoryLogExporter +/// let exporter: InMemoryLogExporter = InMemoryLogExporterBuilder::default().build(); /// //Create a LoggerProvider and register the exporter /// let logger_provider = LoggerProvider::builder() /// .with_log_processor(BatchLogProcessor::builder(exporter.clone(), runtime::Tokio).build()) @@ -98,18 +97,18 @@ pub struct LogDataWithResource { /// ``` /// #[derive(Debug, Clone)] -pub struct InMemoryLogsExporterBuilder { +pub struct InMemoryLogExporterBuilder { reset_on_shutdown: bool, } -impl Default for InMemoryLogsExporterBuilder { +impl Default for InMemoryLogExporterBuilder { fn default() -> Self { Self::new() } } -impl InMemoryLogsExporterBuilder { - /// Creates a new instance of `InMemoryLogsExporter`. +impl InMemoryLogExporterBuilder { + /// Creates a new instance of `InMemoryLogExporter`. /// pub fn new() -> Self { Self { @@ -117,17 +116,17 @@ impl InMemoryLogsExporterBuilder { } } - /// Creates a new instance of `InMemoryLogsExporter`. + /// Creates a new instance of `InMemoryLogExporter`. /// - pub fn build(&self) -> InMemoryLogsExporter { - InMemoryLogsExporter { + pub fn build(&self) -> InMemoryLogExporter { + InMemoryLogExporter { logs: Arc::new(Mutex::new(Vec::new())), - resource: Arc::new(Mutex::new(Resource::default())), + resource: Arc::new(Mutex::new(Resource::builder().build())), should_reset_on_shutdown: self.reset_on_shutdown, } } - /// If set, the records will not be [`InMemoryLogsExporter::reset`] on shutdown. + /// If set, the records will not be [`InMemoryLogExporter::reset`] on shutdown. #[cfg(test)] pub(crate) fn keep_records_on_shutdown(self) -> Self { Self { @@ -136,15 +135,15 @@ impl InMemoryLogsExporterBuilder { } } -impl InMemoryLogsExporter { - /// Returns the logs emitted via Logger as a vector of `LogData`. +impl InMemoryLogExporter { + /// Returns the logs emitted via Logger as a vector of `LogDataWithResource`. /// /// # Example /// /// ``` - /// use opentelemetry_sdk::testing::logs::{InMemoryLogsExporter, InMemoryLogsExporterBuilder}; + /// use opentelemetry_sdk::testing::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder}; /// - /// let exporter = InMemoryLogsExporterBuilder::default().build(); + /// let exporter = InMemoryLogExporterBuilder::default().build(); /// let emitted_logs = exporter.get_emitted_logs().unwrap(); /// ``` /// @@ -167,9 +166,9 @@ impl InMemoryLogsExporter { /// # Example /// /// ``` - /// use opentelemetry_sdk::testing::logs::{InMemoryLogsExporter, InMemoryLogsExporterBuilder}; + /// use opentelemetry_sdk::testing::logs::{InMemoryLogExporter, InMemoryLogExporterBuilder}; /// - /// let exporter = InMemoryLogsExporterBuilder::default().build(); + /// let exporter = InMemoryLogExporterBuilder::default().build(); /// exporter.reset(); /// ``` /// @@ -183,8 +182,8 @@ impl InMemoryLogsExporter { } #[async_trait] -impl LogExporter for InMemoryLogsExporter { - async fn export(&mut self, batch: LogBatch<'_>) -> LogResult<()> { +impl LogExporter for InMemoryLogExporter { + async fn export(&self, batch: LogBatch<'_>) -> LogResult<()> { let mut logs_guard = self.logs.lock().map_err(LogError::from)?; for (log_record, instrumentation) in batch.iter() { let owned_log = OwnedLogData { diff --git a/opentelemetry-sdk/src/testing/logs/mod.rs b/opentelemetry-sdk/src/testing/logs/mod.rs index 57d1eaf401..ed4d5d9560 100644 --- a/opentelemetry-sdk/src/testing/logs/mod.rs +++ b/opentelemetry-sdk/src/testing/logs/mod.rs @@ -3,4 +3,4 @@ /// The `in_memory_exporter` module provides in-memory log exporter. /// For detailed usage and examples, see `in_memory_exporter`. pub mod in_memory_exporter; -pub use in_memory_exporter::{InMemoryLogsExporter, InMemoryLogsExporterBuilder}; +pub use in_memory_exporter::{InMemoryLogExporter, InMemoryLogExporterBuilder}; diff --git a/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs b/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs index 3f85b360b7..29fb3e59fc 100644 --- a/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs @@ -1,10 +1,10 @@ -use crate::metrics::data::{Histogram, Metric, ResourceMetrics, ScopeMetrics, Temporality}; -use crate::metrics::exporter::PushMetricsExporter; -use crate::metrics::reader::{DefaultTemporalitySelector, TemporalitySelector}; -use crate::metrics::{data, InstrumentKind}; +use crate::metrics::data; +use crate::metrics::data::{Histogram, Metric, ResourceMetrics, ScopeMetrics}; +use crate::metrics::exporter::PushMetricExporter; +use crate::metrics::MetricError; +use crate::metrics::MetricResult; +use crate::metrics::Temporality; use async_trait::async_trait; -use opentelemetry::metrics::MetricsError; -use opentelemetry::metrics::Result; use std::collections::VecDeque; use std::fmt; use std::sync::{Arc, Mutex}; @@ -24,25 +24,25 @@ use std::sync::{Arc, Mutex}; /// # Example /// /// ``` -///# use opentelemetry_sdk::{metrics, runtime}; +///# use opentelemetry_sdk::metrics; ///# use opentelemetry::{KeyValue}; ///# use opentelemetry::metrics::MeterProvider; -///# use opentelemetry_sdk::testing::metrics::InMemoryMetricsExporter; +///# use opentelemetry_sdk::testing::metrics::InMemoryMetricExporter; ///# use opentelemetry_sdk::metrics::PeriodicReader; /// ///# #[tokio::main] ///# async fn main() { -/// // Create an InMemoryMetricsExporter -/// let exporter = InMemoryMetricsExporter::default(); +/// // Create an InMemoryMetricExporter +/// let exporter = InMemoryMetricExporter::default(); /// /// // Create a MeterProvider and register the exporter /// let meter_provider = metrics::SdkMeterProvider::builder() -/// .with_reader(PeriodicReader::builder(exporter.clone(), runtime::Tokio).build()) +/// .with_reader(PeriodicReader::builder(exporter.clone()).build()) /// .build(); /// /// // Create and record metrics using the MeterProvider -/// let meter = meter_provider.meter(std::borrow::Cow::Borrowed("example")); -/// let counter = meter.u64_counter("my_counter").init(); +/// let meter = meter_provider.meter("example"); +/// let counter = meter.u64_counter("my_counter").build(); /// counter.add(1, &[KeyValue::new("key", "value")]); /// /// meter_provider.force_flush().unwrap(); @@ -56,105 +56,98 @@ use std::sync::{Arc, Mutex}; /// } ///# } /// ``` -pub struct InMemoryMetricsExporter { +pub struct InMemoryMetricExporter { metrics: Arc>>, - temporality_selector: Arc, + temporality: Temporality, } -impl Clone for InMemoryMetricsExporter { +impl Clone for InMemoryMetricExporter { fn clone(&self) -> Self { - InMemoryMetricsExporter { + InMemoryMetricExporter { metrics: self.metrics.clone(), - temporality_selector: self.temporality_selector.clone(), + temporality: self.temporality, } } } -impl fmt::Debug for InMemoryMetricsExporter { +impl fmt::Debug for InMemoryMetricExporter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("InMemoryMetricsExporter").finish() + f.debug_struct("InMemoryMetricExporter").finish() } } -impl Default for InMemoryMetricsExporter { +impl Default for InMemoryMetricExporter { fn default() -> Self { - InMemoryMetricsExporterBuilder::new().build() + InMemoryMetricExporterBuilder::new().build() } } -/// Builder for [`InMemoryMetricsExporter`]. +/// Builder for [`InMemoryMetricExporter`]. /// # Example /// /// ``` -/// # use opentelemetry_sdk::testing::metrics::{InMemoryMetricsExporter, InMemoryMetricsExporterBuilder}; +/// # use opentelemetry_sdk::testing::metrics::{InMemoryMetricExporter, InMemoryMetricExporterBuilder}; /// -/// let exporter = InMemoryMetricsExporterBuilder::new().build(); +/// let exporter = InMemoryMetricExporterBuilder::new().build(); /// ``` -pub struct InMemoryMetricsExporterBuilder { - temporality_selector: Option>, +pub struct InMemoryMetricExporterBuilder { + temporality: Option, } -impl fmt::Debug for InMemoryMetricsExporterBuilder { +impl fmt::Debug for InMemoryMetricExporterBuilder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("InMemoryMetricsExporterBuilder").finish() + f.debug_struct("InMemoryMetricExporterBuilder").finish() } } -impl Default for InMemoryMetricsExporterBuilder { +impl Default for InMemoryMetricExporterBuilder { fn default() -> Self { Self::new() } } -impl InMemoryMetricsExporterBuilder { - /// Creates a new instance of the `InMemoryMetricsExporterBuilder`. +impl InMemoryMetricExporterBuilder { + /// Creates a new instance of the `InMemoryMetricExporterBuilder`. pub fn new() -> Self { - Self { - temporality_selector: None, - } + Self { temporality: None } } - /// Sets the temporality selector for the exporter. - pub fn with_temporality_selector(mut self, temporality_selector: T) -> Self - where - T: TemporalitySelector + Send + Sync + 'static, - { - self.temporality_selector = Some(Arc::new(temporality_selector)); + /// Set the [Temporality] of the exporter. + pub fn with_temporality(mut self, temporality: Temporality) -> Self { + self.temporality = Some(temporality); self } - /// Creates a new instance of the `InMemoryMetricsExporter`. + /// Creates a new instance of the `InMemoryMetricExporter`. /// - pub fn build(self) -> InMemoryMetricsExporter { - InMemoryMetricsExporter { + pub fn build(self) -> InMemoryMetricExporter { + InMemoryMetricExporter { metrics: Arc::new(Mutex::new(VecDeque::new())), - temporality_selector: self - .temporality_selector - .unwrap_or_else(|| Arc::new(DefaultTemporalitySelector::default())), + temporality: self.temporality.unwrap_or_default(), } } } -impl InMemoryMetricsExporter { +impl InMemoryMetricExporter { /// Returns the finished metrics as a vector of `ResourceMetrics`. /// /// # Errors /// - /// Returns a `MetricsError` if the internal lock cannot be acquired. + /// Returns a `MetricError` if the internal lock cannot be acquired. /// /// # Example /// /// ``` - /// # use opentelemetry_sdk::testing::metrics::InMemoryMetricsExporter; + /// # use opentelemetry_sdk::testing::metrics::InMemoryMetricExporter; /// - /// let exporter = InMemoryMetricsExporter::default(); + /// let exporter = InMemoryMetricExporter::default(); /// let finished_metrics = exporter.get_finished_metrics().unwrap(); /// ``` - pub fn get_finished_metrics(&self) -> Result> { + pub fn get_finished_metrics(&self) -> MetricResult> { self.metrics .lock() .map(|metrics_guard| metrics_guard.iter().map(Self::clone_metrics).collect()) - .map_err(MetricsError::from) + .map_err(MetricError::from) } /// Clears the internal storage of finished metrics. @@ -162,9 +155,9 @@ impl InMemoryMetricsExporter { /// # Example /// /// ``` - /// # use opentelemetry_sdk::testing::metrics::InMemoryMetricsExporter; + /// # use opentelemetry_sdk::testing::metrics::InMemoryMetricExporter; /// - /// let exporter = InMemoryMetricsExporter::default(); + /// let exporter = InMemoryMetricExporter::default(); /// exporter.reset(); /// ``` pub fn reset(&self) { @@ -202,47 +195,65 @@ impl InMemoryMetricsExporter { if let Some(hist) = data.as_any().downcast_ref::>() { Some(Box::new(Histogram { data_points: hist.data_points.clone(), + start_time: hist.start_time, + time: hist.time, temporality: hist.temporality, })) } else if let Some(hist) = data.as_any().downcast_ref::>() { Some(Box::new(Histogram { data_points: hist.data_points.clone(), + start_time: hist.start_time, + time: hist.time, temporality: hist.temporality, })) } else if let Some(hist) = data.as_any().downcast_ref::>() { Some(Box::new(Histogram { data_points: hist.data_points.clone(), + start_time: hist.start_time, + time: hist.time, temporality: hist.temporality, })) } else if let Some(sum) = data.as_any().downcast_ref::>() { Some(Box::new(data::Sum { data_points: sum.data_points.clone(), + start_time: sum.start_time, + time: sum.time, temporality: sum.temporality, is_monotonic: sum.is_monotonic, })) } else if let Some(sum) = data.as_any().downcast_ref::>() { Some(Box::new(data::Sum { data_points: sum.data_points.clone(), + start_time: sum.start_time, + time: sum.time, temporality: sum.temporality, is_monotonic: sum.is_monotonic, })) } else if let Some(sum) = data.as_any().downcast_ref::>() { Some(Box::new(data::Sum { data_points: sum.data_points.clone(), + start_time: sum.start_time, + time: sum.time, temporality: sum.temporality, is_monotonic: sum.is_monotonic, })) } else if let Some(gauge) = data.as_any().downcast_ref::>() { Some(Box::new(data::Gauge { data_points: gauge.data_points.clone(), + start_time: gauge.start_time, + time: gauge.time, })) } else if let Some(gauge) = data.as_any().downcast_ref::>() { Some(Box::new(data::Gauge { data_points: gauge.data_points.clone(), + start_time: gauge.start_time, + time: gauge.time, })) } else if let Some(gauge) = data.as_any().downcast_ref::>() { Some(Box::new(data::Gauge { data_points: gauge.data_points.clone(), + start_time: gauge.start_time, + time: gauge.time, })) } else { // unknown data type @@ -251,33 +262,31 @@ impl InMemoryMetricsExporter { } } -impl TemporalitySelector for InMemoryMetricsExporter { - fn temporality(&self, kind: InstrumentKind) -> Temporality { - self.temporality_selector.temporality(kind) - } -} - #[async_trait] -impl PushMetricsExporter for InMemoryMetricsExporter { - async fn export(&self, metrics: &mut ResourceMetrics) -> Result<()> { +impl PushMetricExporter for InMemoryMetricExporter { + async fn export(&self, metrics: &mut ResourceMetrics) -> MetricResult<()> { self.metrics .lock() .map(|mut metrics_guard| { - metrics_guard.push_back(InMemoryMetricsExporter::clone_metrics(metrics)) + metrics_guard.push_back(InMemoryMetricExporter::clone_metrics(metrics)) }) - .map_err(MetricsError::from) + .map_err(MetricError::from) } - async fn force_flush(&self) -> Result<()> { + async fn force_flush(&self) -> MetricResult<()> { Ok(()) // In this implementation, flush does nothing } - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { self.metrics .lock() .map(|mut metrics_guard| metrics_guard.clear()) - .map_err(MetricsError::from)?; + .map_err(MetricError::from)?; Ok(()) } + + fn temporality(&self) -> Temporality { + self.temporality + } } diff --git a/opentelemetry-sdk/src/testing/metrics/metric_reader.rs b/opentelemetry-sdk/src/testing/metrics/metric_reader.rs index 2056758a41..c535fb1c93 100644 --- a/opentelemetry-sdk/src/testing/metrics/metric_reader.rs +++ b/opentelemetry-sdk/src/testing/metrics/metric_reader.rs @@ -1,12 +1,9 @@ use std::sync::{Arc, Mutex, Weak}; use crate::metrics::{ - data::{ResourceMetrics, Temporality}, - instrument::InstrumentKind, - pipeline::Pipeline, - reader::{MetricReader, TemporalitySelector}, + data::ResourceMetrics, pipeline::Pipeline, reader::MetricReader, InstrumentKind, }; -use opentelemetry::metrics::Result; +use crate::metrics::{MetricResult, Temporality}; #[derive(Debug, Clone)] pub struct TestMetricReader { @@ -36,15 +33,15 @@ impl Default for TestMetricReader { impl MetricReader for TestMetricReader { fn register_pipeline(&self, _pipeline: Weak) {} - fn collect(&self, _rm: &mut ResourceMetrics) -> Result<()> { + fn collect(&self, _rm: &mut ResourceMetrics) -> MetricResult<()> { Ok(()) } - fn force_flush(&self) -> Result<()> { + fn force_flush(&self) -> MetricResult<()> { Ok(()) } - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { let result = self.force_flush(); { let mut is_shutdown = self.is_shutdown.lock().unwrap(); @@ -52,10 +49,8 @@ impl MetricReader for TestMetricReader { } result } -} -impl TemporalitySelector for TestMetricReader { fn temporality(&self, _kind: InstrumentKind) -> Temporality { - Temporality::Cumulative + Temporality::default() } } diff --git a/opentelemetry-sdk/src/testing/metrics/mod.rs b/opentelemetry-sdk/src/testing/metrics/mod.rs index cac9f58ce4..987588430e 100644 --- a/opentelemetry-sdk/src/testing/metrics/mod.rs +++ b/opentelemetry-sdk/src/testing/metrics/mod.rs @@ -3,7 +3,7 @@ /// The `in_memory_exporter` module provides in-memory metrics exporter. /// For detailed usage and examples, see `in_memory_exporter`. pub mod in_memory_exporter; -pub use in_memory_exporter::{InMemoryMetricsExporter, InMemoryMetricsExporterBuilder}; +pub use in_memory_exporter::{InMemoryMetricExporter, InMemoryMetricExporterBuilder}; #[doc(hidden)] pub mod metric_reader; diff --git a/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs b/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs index 5853558436..0ae261916a 100644 --- a/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/testing/trace/in_memory_exporter.rs @@ -87,7 +87,7 @@ impl InMemorySpanExporterBuilder { pub fn build(&self) -> InMemorySpanExporter { InMemorySpanExporter { spans: Arc::new(Mutex::new(Vec::new())), - resource: Arc::new(Mutex::new(Resource::default())), + resource: Arc::new(Mutex::new(Resource::builder().build())), } } } diff --git a/opentelemetry-sdk/src/testing/trace/span_exporters.rs b/opentelemetry-sdk/src/testing/trace/span_exporters.rs index c92a64f399..e9996e3fc8 100644 --- a/opentelemetry-sdk/src/testing/trace/span_exporters.rs +++ b/opentelemetry-sdk/src/testing/trace/span_exporters.rs @@ -1,15 +1,12 @@ use crate::{ - export::{ - trace::{ExportResult, SpanData, SpanExporter}, - ExportError, - }, + export::trace::{ExportResult, SpanData, SpanExporter}, trace::{SpanEvents, SpanLinks}, - InstrumentationLibrary, }; use futures_util::future::BoxFuture; pub use opentelemetry::testing::trace::TestSpan; -use opentelemetry::trace::{ - SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState, +use opentelemetry::{ + trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState}, + InstrumentationScope, }; use std::fmt::{Display, Formatter}; @@ -32,7 +29,7 @@ pub fn new_test_export_span_data() -> SpanData { events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, - instrumentation_lib: InstrumentationLibrary::default(), + instrumentation_scope: InstrumentationScope::default(), } } @@ -80,7 +77,7 @@ pub struct TestExportError(String); impl std::error::Error for TestExportError {} -impl ExportError for TestExportError { +impl opentelemetry::trace::ExportError for TestExportError { fn exporter_name(&self) -> &'static str { "test" } diff --git a/opentelemetry-sdk/src/trace/config.rs b/opentelemetry-sdk/src/trace/config.rs index 368f69dacd..92b570d669 100644 --- a/opentelemetry-sdk/src/trace/config.rs +++ b/opentelemetry-sdk/src/trace/config.rs @@ -4,7 +4,7 @@ //! can be set for the default OpenTelemetry limits and Sampler. use crate::trace::{span_limit::SpanLimits, IdGenerator, RandomIdGenerator, Sampler, ShouldSample}; use crate::Resource; -use opentelemetry::global::{handle_error, Error}; +use opentelemetry::otel_warn; use std::borrow::Cow; use std::env; use std::str::FromStr; @@ -34,54 +34,90 @@ pub struct Config { impl Config { /// Specify the sampler to be used. + #[deprecated( + since = "0.27.1", + note = "Config is becoming private. Please use Builder::with_sampler(...) instead." + )] pub fn with_sampler(mut self, sampler: T) -> Self { self.sampler = Box::new(sampler); self } /// Specify the id generator to be used. + #[deprecated( + since = "0.27.1", + note = "Config is becoming private. Please use Builder::with_id_generator(...) instead." + )] pub fn with_id_generator(mut self, id_generator: T) -> Self { self.id_generator = Box::new(id_generator); self } - /// Specify the number of events to be recorded per span. + /// Specify the maximum number of events that can be recorded per span. + #[deprecated( + since = "0.27.1", + note = "Config is becoming private. Please use Builder::with_max_events_per_span(...) instead." + )] pub fn with_max_events_per_span(mut self, max_events: u32) -> Self { self.span_limits.max_events_per_span = max_events; self } - /// Specify the number of attributes to be recorded per span. + /// Specify the maximum number of attributes that can be recorded per span. + #[deprecated( + since = "0.27.1", + note = "Config is becoming private. Please use Builder::with_max_attributes_per_span(...) instead." + )] pub fn with_max_attributes_per_span(mut self, max_attributes: u32) -> Self { self.span_limits.max_attributes_per_span = max_attributes; self } - /// Specify the number of events to be recorded per span. + /// Specify the maximum number of links that can be recorded per span. + #[deprecated( + since = "0.27.1", + note = "Config is becoming private. Please use Builder::with_max_links_per_span(...) instead." + )] pub fn with_max_links_per_span(mut self, max_links: u32) -> Self { self.span_limits.max_links_per_span = max_links; self } - /// Specify the number of attributes one event can have. + /// Specify the maximum number of attributes one event can have. + #[deprecated( + since = "0.27.1", + note = "Config is becoming private. Please use Builder::with_max_attributes_per_event(...) instead." + )] pub fn with_max_attributes_per_event(mut self, max_attributes: u32) -> Self { self.span_limits.max_attributes_per_event = max_attributes; self } - /// Specify the number of attributes one link can have. + /// Specify the maximum number of attributes one link can have. + #[deprecated( + since = "0.27.1", + note = "Config is becoming private. Please use Builder::with_max_attributes_per_link(...) instead." + )] pub fn with_max_attributes_per_link(mut self, max_attributes: u32) -> Self { self.span_limits.max_attributes_per_link = max_attributes; self } /// Specify all limit via the span_limits + #[deprecated( + since = "0.27.1", + note = "Config is becoming private. Please use Builder::with_span_limits(...) instead." + )] pub fn with_span_limits(mut self, span_limits: SpanLimits) -> Self { self.span_limits = span_limits; self } /// Specify the attributes representing the entity that produces telemetry + #[deprecated( + since = "0.27.1", + note = "Config is becoming private. Please use Builder::with_resource(...) instead." + )] pub fn with_resource(mut self, resource: Resource) -> Self { self.resource = Cow::Owned(resource); self @@ -95,7 +131,7 @@ impl Default for Config { sampler: Box::new(Sampler::ParentBased(Box::new(Sampler::AlwaysOn))), id_generator: Box::::default(), span_limits: SpanLimits::default(), - resource: Cow::Owned(Resource::default()), + resource: Cow::Owned(Resource::builder().build()), }; if let Some(max_attributes_per_span) = env::var("OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT") @@ -125,13 +161,14 @@ impl Default for Config { "always_on" => Box::new(Sampler::AlwaysOn), "always_off" => Box::new(Sampler::AlwaysOff), "traceidratio" => { - let ratio = sampler_arg.and_then(|r| r.parse::().ok()); + let ratio = sampler_arg.as_ref().and_then(|r| r.parse::().ok()); if let Some(r) = ratio { Box::new(Sampler::TraceIdRatioBased(r)) } else { - handle_error( - Error::Other(String::from( - "Missing or invalid OTEL_TRACES_SAMPLER_ARG value. Falling back to default: 1.0")) + otel_warn!( + name: "TracerProvider.Config.InvalidSamplerArgument", + message = "OTEL_TRACES_SAMPLER is set to 'traceidratio' but OTEL_TRACES_SAMPLER_ARG environment variable is missing or invalid. OTEL_TRACES_SAMPLER_ARG must be a valid float between 0.0 and 1.0 representing the desired sampling probability (0.0 = no traces sampled, 1.0 = all traces sampled, 0.5 = 50% of traces sampled). Falling back to default ratio: 1.0 (100% sampling)", + otel_traces_sampler_arg = format!("{:?}", sampler_arg) ); Box::new(Sampler::TraceIdRatioBased(1.0)) } @@ -143,43 +180,51 @@ impl Default for Config { Box::new(Sampler::ParentBased(Box::new(Sampler::AlwaysOff))) } "parentbased_traceidratio" => { - let ratio = sampler_arg.and_then(|r| r.parse::().ok()); + let ratio = sampler_arg.as_ref().and_then(|r| r.parse::().ok()); if let Some(r) = ratio { Box::new(Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased( r, )))) } else { - handle_error( - Error::Other(String::from( - "Missing or invalid OTEL_TRACES_SAMPLER_ARG value. Falling back to default: 1.0" - ))); + otel_warn!( + name: "TracerProvider.Config.InvalidSamplerArgument", + message = "OTEL_TRACES_SAMPLER is set to 'parentbased_traceidratio' but OTEL_TRACES_SAMPLER_ARG environment variable is missing or invalid. OTEL_TRACES_SAMPLER_ARG must be a valid float between 0.0 and 1.0 representing the desired sampling probability (0.0 = no traces sampled, 1.0 = all traces sampled, 0.5 = 50% of traces sampled). Falling back to default ratio: 1.0 (100% sampling)", + otel_traces_sampler_arg = format!("{:?}", sampler_arg) + ); Box::new(Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased( 1.0, )))) } } "parentbased_jaeger_remote" => { - handle_error( - Error::Other(String::from( - "Unimplemented parentbased_jaeger_remote sampler. Falling back to default: parentbased_always_on" - ))); + otel_warn!( + name: "TracerProvider.Config.UnsupportedSampler", + message = "OTEL_TRACES_SAMPLER is set to 'parentbased_jaeger_remote' which is not implemented in this SDK version. Using fallback sampler: ParentBased(AlwaysOn). Configure an alternative sampler using OTEL_TRACES_SAMPLER" + ); Box::new(Sampler::ParentBased(Box::new(Sampler::AlwaysOn))) } "jaeger_remote" => { - handle_error( - Error::Other(String::from("Unimplemented jaeger_remote sampler. Falling back to default: parentbased_always_on"))); + otel_warn!( + name: "TracerProvider.Config.UnsupportedSampler", + message = "OTEL_TRACES_SAMPLER is set to 'jaeger_remote' which is implemented in this SDK version. Using fallback sampler: ParentBased(AlwaysOn). Configure an alternative sampler using OTEL_TRACES_SAMPLER" + ); Box::new(Sampler::ParentBased(Box::new(Sampler::AlwaysOn))) } "xray" => { - handle_error( - Error::Other(String::from("Unimplemented xray sampler. Falling back to default: parentbased_always_on"))); + otel_warn!( + name: "TracerProvider.Config.UnsupportedSampler", + message = "OTEL_TRACES_SAMPLER is set to 'xray'. AWS X-Ray sampler is not implemented in this SDK version. Using fallback sampler: ParentBased(AlwaysOn). Configure an alternative sampler using OTEL_TRACES_SAMPLER" + ); Box::new(Sampler::ParentBased(Box::new(Sampler::AlwaysOn))) } s => { - handle_error( - Error::Other(format!("Unrecognised OTEL_TRACES_SAMPLER value: {}. Falling back to default: parentbased_always_on", - s - ))); + otel_warn!( + name: "TracerProvider.Config.InvalidSamplerType", + message = format!( + "Unrecognized sampler type '{}' in OTEL_TRACES_SAMPLER environment variable. Valid values are: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio. Using fallback sampler: ParentBased(AlwaysOn)", + s + ), + ); Box::new(Sampler::ParentBased(Box::new(Sampler::AlwaysOn))) } } diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index ccaae00638..43445c4a4c 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -40,15 +40,16 @@ mod runtime_tests; #[cfg(all(test, feature = "testing"))] mod tests { + use super::*; use crate::{ testing::trace::InMemorySpanExporterBuilder, trace::span_limit::{DEFAULT_MAX_EVENT_PER_SPAN, DEFAULT_MAX_LINKS_PER_SPAN}, }; - use opentelemetry::testing::trace::TestSpan; use opentelemetry::trace::{ SamplingDecision, SamplingResult, SpanKind, Status, TraceContextExt, TraceState, }; + use opentelemetry::{testing::trace::TestSpan, InstrumentationScope}; use opentelemetry::{ trace::{ Event, Link, Span, SpanBuilder, SpanContext, SpanId, TraceFlags, TraceId, Tracer, @@ -82,7 +83,7 @@ mod tests { assert_eq!(exported_spans.len(), 1); let span = &exported_spans[0]; assert_eq!(span.name, "span_name_updated"); - assert_eq!(span.instrumentation_lib.name, "test_tracer"); + assert_eq!(span.instrumentation_scope.name(), "test_tracer"); assert_eq!(span.attributes.len(), 1); assert_eq!(span.events.len(), 1); assert_eq!(span.events[0].name, "test-event"); @@ -117,7 +118,7 @@ mod tests { assert_eq!(exported_spans.len(), 1); let span = &exported_spans[0]; assert_eq!(span.name, "span_name"); - assert_eq!(span.instrumentation_lib.name, "test_tracer"); + assert_eq!(span.instrumentation_scope.name(), "test_tracer"); assert_eq!(span.attributes.len(), 1); assert_eq!(span.events.len(), 1); assert_eq!(span.events[0].name, "test-event"); @@ -154,7 +155,7 @@ mod tests { let span = &exported_spans[0]; assert_eq!(span.name, "span_name"); assert_eq!(span.span_kind, SpanKind::Server); - assert_eq!(span.instrumentation_lib.name, "test_tracer"); + assert_eq!(span.instrumentation_scope.name(), "test_tracer"); assert_eq!(span.attributes.len(), 1); assert_eq!(span.events.len(), 1); assert_eq!(span.events[0].name, "test-event"); @@ -239,7 +240,7 @@ mod tests { fn trace_state_for_dropped_sampler() { let exporter = InMemorySpanExporterBuilder::new().build(); let provider = TracerProvider::builder() - .with_config(Config::default().with_sampler(Sampler::AlwaysOff)) + .with_sampler(Sampler::AlwaysOff) .with_span_processor(SimpleSpanProcessor::new(Box::new(exporter.clone()))) .build(); @@ -292,7 +293,7 @@ mod tests { fn trace_state_for_record_only_sampler() { let exporter = InMemorySpanExporterBuilder::new().build(); let provider = TracerProvider::builder() - .with_config(Config::default().with_sampler(TestRecordOnlySampler::default())) + .with_sampler(TestRecordOnlySampler::default()) .with_span_processor(SimpleSpanProcessor::new(Box::new(exporter.clone()))) .build(); @@ -326,37 +327,14 @@ mod tests { #[test] fn tracer_attributes() { let provider = TracerProvider::builder().build(); - let tracer = provider - .tracer_builder("test_tracer") + let scope = InstrumentationScope::builder("basic") .with_attributes(vec![KeyValue::new("test_k", "test_v")]) .build(); - let instrumentation_library = tracer.instrumentation_library(); - let attributes = &instrumentation_library.attributes; - assert_eq!(attributes.len(), 1); - assert_eq!(attributes[0].key, "test_k".into()); - assert_eq!(attributes[0].value, "test_v".into()); - } - #[test] - #[allow(deprecated)] - fn versioned_tracer_options() { - let provider = TracerProvider::builder().build(); - let tracer = provider.versioned_tracer( - "test_tracer", - Some(String::from("v1.2.3")), - Some(String::from("https://opentelemetry.io/schema/1.0.0")), - Some(vec![(KeyValue::new("test_k", "test_v"))]), - ); - let instrumentation_library = tracer.instrumentation_library(); - let attributes = &instrumentation_library.attributes; - assert_eq!(instrumentation_library.name, "test_tracer"); - assert_eq!(instrumentation_library.version, Some("v1.2.3".into())); - assert_eq!( - instrumentation_library.schema_url, - Some("https://opentelemetry.io/schema/1.0.0".into()) - ); - assert_eq!(attributes.len(), 1); - assert_eq!(attributes[0].key, "test_k".into()); - assert_eq!(attributes[0].value, "test_v".into()); + let tracer = provider.tracer_with_scope(scope); + let instrumentation_scope = tracer.instrumentation_scope(); + assert!(instrumentation_scope + .attributes() + .eq(&[KeyValue::new("test_k", "test_v")])); } } diff --git a/opentelemetry-sdk/src/trace/provider.rs b/opentelemetry-sdk/src/trace/provider.rs index 9550ce11d2..4820d7e929 100644 --- a/opentelemetry-sdk/src/trace/provider.rs +++ b/opentelemetry-sdk/src/trace/provider.rs @@ -1,71 +1,156 @@ -//! # Trace Provider SDK -//! -//! ## Tracer Creation -//! -//! New [`Tracer`] instances are always created through a [`TracerProvider`]. -//! -//! All configuration objects and extension points (span processors, -//! propagators) are provided by the [`TracerProvider`]. [`Tracer`] instances do -//! not duplicate this data to avoid that different [`Tracer`] instances -//! of the [`TracerProvider`] have different versions of these data. +/// # Trace Provider SDK +/// +/// The `TracerProvider` handles the creation and management of [`Tracer`] instances and coordinates +/// span processing. It serves as the central configuration point for tracing, ensuring consistency +/// across all [`Tracer`] instances it creates. +/// +/// ## Tracer Creation +/// +/// New [`Tracer`] instances are always created through a `TracerProvider`. These `Tracer`s share +/// a common configuration, which includes the [`Resource`], span processors, sampling strategies, +/// and span limits. This avoids the need for each `Tracer` to maintain its own version of these +/// configurations, ensuring uniform behavior across all instances. +/// +/// ## Cloning and Shutdown +/// +/// The `TracerProvider` is designed to be clonable. Cloning a `TracerProvider` creates a +/// new reference to the same provider, not a new instance. Dropping the last reference +/// to the `TracerProvider` will automatically trigger its shutdown. During shutdown, the provider +/// will flush all remaining spans, ensuring they are passed to the configured processors. +/// Users can also manually trigger shutdown using the [`shutdown`](TracerProvider::shutdown) +/// method, which will ensure the same behavior. +/// +/// Once shut down, the `TracerProvider` transitions into a disabled state. In this state, further +/// operations on its associated `Tracer` instances will result in no-ops, ensuring that no spans +/// are processed or exported after shutdown. +/// +/// ## Span Processing and Force Flush +/// +/// The `TracerProvider` manages the lifecycle of span processors, which are responsible for +/// collecting, processing, and exporting spans. The [`force_flush`](TracerProvider::force_flush) method +/// invoked at any time will trigger an immediate flush of all pending spans (if any) to the exporters. +/// This will block the user thread till all the spans are passed to exporters. +/// +/// # Examples +/// +/// ``` +/// use opentelemetry::global; +/// use opentelemetry_sdk::trace::TracerProvider; +/// use opentelemetry::trace::Tracer; +/// +/// fn init_tracing() -> TracerProvider { +/// let provider = TracerProvider::default(); +/// +/// // Set the provider to be used globally +/// let _ = global::set_tracer_provider(provider.clone()); +/// +/// provider +/// } +/// +/// fn main() { +/// let provider = init_tracing(); +/// +/// // create tracer.. +/// let tracer = global::tracer("example/client"); +/// +/// // create span... +/// let span = tracer +/// .span_builder("test_span") +/// .start(&tracer); +/// +/// // Explicitly shut down the provider +/// provider.shutdown(); +/// } +/// ``` use crate::runtime::RuntimeChannel; use crate::trace::{ BatchSpanProcessor, Config, RandomIdGenerator, Sampler, SimpleSpanProcessor, SpanLimits, Tracer, }; +use crate::Resource; use crate::{export::trace::SpanExporter, trace::SpanProcessor}; -use crate::{InstrumentationLibrary, Resource}; -use once_cell::sync::{Lazy, OnceCell}; use opentelemetry::trace::TraceError; -use opentelemetry::{global, trace::TraceResult}; +use opentelemetry::InstrumentationScope; +use opentelemetry::{otel_debug, trace::TraceResult}; use std::borrow::Cow; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, OnceLock}; -/// Default tracer name if empty string is provided. -const DEFAULT_COMPONENT_NAME: &str = "rust.opentelemetry.io/sdk/tracer"; -static PROVIDER_RESOURCE: OnceCell = OnceCell::new(); +use super::IdGenerator; + +static PROVIDER_RESOURCE: OnceLock = OnceLock::new(); // a no nop tracer provider used as placeholder when the provider is shutdown -static NOOP_TRACER_PROVIDER: Lazy = Lazy::new(|| TracerProvider { - inner: Arc::new(TracerProviderInner { - processors: Vec::new(), - config: Config { - // cannot use default here as the default resource is not empty - sampler: Box::new(Sampler::ParentBased(Box::new(Sampler::AlwaysOn))), - id_generator: Box::::default(), - span_limits: SpanLimits::default(), - resource: Cow::Owned(Resource::empty()), - }, - }), - is_shutdown: Arc::new(AtomicBool::new(true)), -}); +// TODO Replace with LazyLock once it is stable +static NOOP_TRACER_PROVIDER: OnceLock = OnceLock::new(); +#[inline] +fn noop_tracer_provider() -> &'static TracerProvider { + NOOP_TRACER_PROVIDER.get_or_init(|| { + TracerProvider { + inner: Arc::new(TracerProviderInner { + processors: Vec::new(), + config: Config { + // cannot use default here as the default resource is not empty + sampler: Box::new(Sampler::ParentBased(Box::new(Sampler::AlwaysOn))), + id_generator: Box::::default(), + span_limits: SpanLimits::default(), + resource: Cow::Owned(Resource::empty()), + }, + is_shutdown: AtomicBool::new(true), + }), + } + }) +} /// TracerProvider inner type #[derive(Debug)] pub(crate) struct TracerProviderInner { processors: Vec>, config: crate::trace::Config, + is_shutdown: AtomicBool, } -impl Drop for TracerProviderInner { - fn drop(&mut self) { - for processor in &mut self.processors { +impl TracerProviderInner { + /// Crate-private shutdown method to be called both from explicit shutdown + /// and from Drop when the last reference is released. + pub(crate) fn shutdown(&self) -> Vec { + let mut errs = vec![]; + for processor in &self.processors { if let Err(err) = processor.shutdown() { - global::handle_error(err); + // Log at debug level because: + // - The error is also returned to the user for handling (if applicable) + // - Or the error occurs during `TracerProviderInner::Drop` as part of telemetry shutdown, + // which is non-actionable by the user + otel_debug!(name: "TracerProvider.Drop.ShutdownError", + error = format!("{err}")); + errs.push(err); } } + errs + } +} + +impl Drop for TracerProviderInner { + fn drop(&mut self) { + if !self.is_shutdown.load(Ordering::Relaxed) { + let _ = self.shutdown(); // errors are handled within shutdown + } else { + otel_debug!( + name: "TracerProvider.Drop.AlreadyShutdown" + ); + } } } /// Creator and registry of named [`Tracer`] instances. /// -/// `TracerProvider` is lightweight container holding pointers to `SpanProcessor` and other components. -/// Cloning and dropping them will not stop the span processing. To stop span processing, users -/// must either call `shutdown` method explicitly, or drop every clone of `TracerProvider`. +/// `TracerProvider` is a container holding pointers to `SpanProcessor` and other components. +/// Cloning a `TracerProvider` instance and dropping it will not stop span processing. To stop span processing, users +/// must either call the `shutdown` method explicitly or allow the last reference to the `TracerProvider` +/// to be dropped. When the last reference is dropped, the shutdown process will be automatically triggered +/// to ensure proper cleanup. #[derive(Clone, Debug)] pub struct TracerProvider { inner: Arc, - is_shutdown: Arc, } impl Default for TracerProvider { @@ -79,7 +164,6 @@ impl TracerProvider { pub(crate) fn new(inner: TracerProviderInner) -> Self { TracerProvider { inner: Arc::new(inner), - is_shutdown: Arc::new(AtomicBool::new(false)), } } @@ -101,7 +185,7 @@ impl TracerProvider { /// true if the provider has been shutdown /// Don't start span or export spans when provider is shutdown pub(crate) fn is_shutdown(&self) -> bool { - self.is_shutdown.load(Ordering::Relaxed) + self.inner.is_shutdown.load(Ordering::Relaxed) } /// Force flush all remaining spans in span processors and return results. @@ -135,10 +219,8 @@ impl TracerProvider { /// /// // create more spans.. /// - /// // dropping provider and shutting down global provider ensure all - /// // remaining spans are exported + /// // dropping provider ensures all remaining spans are exported /// drop(provider); - /// global::shutdown_tracer_provider(); /// } /// ``` pub fn force_flush(&self) -> Vec> { @@ -153,72 +235,47 @@ impl TracerProvider { /// Note that shut down doesn't means the TracerProvider has dropped pub fn shutdown(&self) -> TraceResult<()> { if self + .inner .is_shutdown .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) .is_ok() { // propagate the shutdown signal to processors - // it's up to the processor to properly block new spans after shutdown - let mut errs = vec![]; - for processor in &self.inner.processors { - if let Err(err) = processor.shutdown() { - errs.push(err); - } - } - + let errs = self.inner.shutdown(); if errs.is_empty() { Ok(()) } else { Err(TraceError::Other(format!("{errs:?}").into())) } } else { - Err(TraceError::Other( - "tracer provider already shut down".into(), - )) + Err(TraceError::TracerProviderAlreadyShutdown) } } } +/// Default tracer name if empty string is provided. +const DEFAULT_COMPONENT_NAME: &str = "rust.opentelemetry.io/sdk/tracer"; + impl opentelemetry::trace::TracerProvider for TracerProvider { /// This implementation of `TracerProvider` produces `Tracer` instances. type Tracer = Tracer; - /// Create a new versioned `Tracer` instance. - fn versioned_tracer( - &self, - name: impl Into>, - version: Option>>, - schema_url: Option>>, - attributes: Option>, - ) -> Self::Tracer { - // Use default value if name is invalid empty string - let name = name.into(); - let component_name = if name.is_empty() { - Cow::Borrowed(DEFAULT_COMPONENT_NAME) - } else { - name - }; - - let mut builder = self.tracer_builder(component_name); + fn tracer(&self, name: impl Into>) -> Self::Tracer { + let mut name = name.into(); - if let Some(v) = version { - builder = builder.with_version(v); - } - if let Some(s) = schema_url { - builder = builder.with_schema_url(s); - } - if let Some(a) = attributes { - builder = builder.with_attributes(a); - } + if name.is_empty() { + name = Cow::Borrowed(DEFAULT_COMPONENT_NAME) + }; - builder.build() + let scope = InstrumentationScope::builder(name).build(); + self.tracer_with_scope(scope) } - fn library_tracer(&self, library: Arc) -> Self::Tracer { - if self.is_shutdown.load(Ordering::Relaxed) { - return Tracer::new(library, NOOP_TRACER_PROVIDER.clone()); + fn tracer_with_scope(&self, scope: InstrumentationScope) -> Self::Tracer { + if self.inner.is_shutdown.load(Ordering::Relaxed) { + return Tracer::new(scope, noop_tracer_provider().clone()); } - Tracer::new(library, self.clone()) + Tracer::new(scope, self.clone()) } } @@ -257,10 +314,77 @@ impl Builder { } /// The sdk [`crate::trace::Config`] that this provider will use. + #[deprecated( + since = "0.27.1", + note = "Config is becoming a private type. Use Builder::with_{config_name}(resource) instead. ex: Builder::with_resource(resource)" + )] pub fn with_config(self, config: crate::trace::Config) -> Self { Builder { config, ..self } } + /// Specify the sampler to be used. + pub fn with_sampler(mut self, sampler: T) -> Self { + self.config.sampler = Box::new(sampler); + self + } + + /// Specify the id generator to be used. + pub fn with_id_generator(mut self, id_generator: T) -> Self { + self.config.id_generator = Box::new(id_generator); + self + } + + /// Specify the number of events to be recorded per span. + pub fn with_max_events_per_span(mut self, max_events: u32) -> Self { + self.config.span_limits.max_events_per_span = max_events; + self + } + + /// Specify the number of attributes to be recorded per span. + pub fn with_max_attributes_per_span(mut self, max_attributes: u32) -> Self { + self.config.span_limits.max_attributes_per_span = max_attributes; + self + } + + /// Specify the number of events to be recorded per span. + pub fn with_max_links_per_span(mut self, max_links: u32) -> Self { + self.config.span_limits.max_links_per_span = max_links; + self + } + + /// Specify the number of attributes one event can have. + pub fn with_max_attributes_per_event(mut self, max_attributes: u32) -> Self { + self.config.span_limits.max_attributes_per_event = max_attributes; + self + } + + /// Specify the number of attributes one link can have. + pub fn with_max_attributes_per_link(mut self, max_attributes: u32) -> Self { + self.config.span_limits.max_attributes_per_link = max_attributes; + self + } + + /// Specify all limit via the span_limits + pub fn with_span_limits(mut self, span_limits: SpanLimits) -> Self { + self.config.span_limits = span_limits; + self + } + + /// Associates a [Resource] with a [TracerProvider]. + /// + /// This [Resource] represents the entity producing telemetry and is associated + /// with all [Tracer]s the [TracerProvider] will create. + /// + /// By default, if this option is not used, the default [Resource] will be used. + /// + /// [Tracer]: opentelemetry::trace::Tracer + pub fn with_resource(self, resource: Resource) -> Self { + Builder { + config: self.config.with_resource(resource), + ..self + } + } + /// Create a new provider from this configuration. pub fn build(self) -> TracerProvider { let mut config = self.config; @@ -272,16 +396,13 @@ impl Builder { // For the uncommon case where there are multiple tracer providers with different resource // configurations, users can optionally provide their own borrowed static resource. if matches!(config.resource, Cow::Owned(_)) { - config.resource = match PROVIDER_RESOURCE.try_insert(config.resource.into_owned()) { - Ok(static_resource) => Cow::Borrowed(static_resource), - Err((prev, new)) => { - if prev == &new { - Cow::Borrowed(prev) - } else { - Cow::Owned(new) + config.resource = + match PROVIDER_RESOURCE.get_or_init(|| config.resource.clone().into_owned()) { + static_resource if *static_resource == *config.resource.as_ref() => { + Cow::Borrowed(static_resource) } - } - } + _ => config.resource, // Use the new resource if different + }; } // Create a new vector to hold the modified processors @@ -292,7 +413,12 @@ impl Builder { p.set_resource(config.resource.as_ref()); } - TracerProvider::new(TracerProviderInner { processors, config }) + let is_shutdown = AtomicBool::new(false); + TracerProvider::new(TracerProviderInner { + processors, + config, + is_shutdown, + }) } } @@ -307,7 +433,7 @@ mod tests { use crate::Resource; use opentelemetry::trace::{TraceError, TraceResult, Tracer, TracerProvider}; use opentelemetry::{Context, Key, KeyValue, Value}; - use std::borrow::Cow; + use std::env; use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; use std::sync::Arc; @@ -391,6 +517,7 @@ mod tests { Box::from(TestSpanProcessor::new(false)), ], config: Default::default(), + is_shutdown: AtomicBool::new(false), }); let results = tracer_provider.force_flush(); @@ -440,15 +567,13 @@ mod tests { assert_telemetry_resource(&default_config_provider); }); - // If user provided a resource, use that. + // If user provided config, use that. let custom_config_provider = super::TracerProvider::builder() - .with_config(Config { - resource: Cow::Owned(Resource::new(vec![KeyValue::new( - SERVICE_NAME, - "test_service", - )])), - ..Default::default() - }) + .with_resource( + Resource::builder_empty() + .with_service_name("test_service") + .build(), + ) .build(); assert_resource(&custom_config_provider, SERVICE_NAME, Some("test_service")); assert_eq!(custom_config_provider.config().resource.len(), 1); @@ -477,13 +602,14 @@ mod tests { Some("my-custom-key=env-val,k2=value2"), || { let user_provided_resource_config_provider = super::TracerProvider::builder() - .with_config(Config { - resource: Cow::Owned(Resource::default().merge(&mut Resource::new(vec![ - KeyValue::new("my-custom-key", "my-custom-value"), - KeyValue::new("my-custom-key2", "my-custom-value2"), - ]))), - ..Default::default() - }) + .with_resource( + Resource::builder() + .with_attributes([ + KeyValue::new("my-custom-key", "my-custom-value"), + KeyValue::new("my-custom-key2", "my-custom-value2"), + ]) + .build(), + ) .build(); assert_resource( &user_provided_resource_config_provider, @@ -518,10 +644,7 @@ mod tests { // If user provided a resource, it takes priority during collision. let no_service_name = super::TracerProvider::builder() - .with_config(Config { - resource: Cow::Owned(Resource::empty()), - ..Default::default() - }) + .with_resource(Resource::empty()) .build(); assert_eq!(no_service_name.config().resource.len(), 0) @@ -534,6 +657,7 @@ mod tests { let tracer_provider = super::TracerProvider::new(TracerProviderInner { processors: vec![Box::from(processor)], config: Default::default(), + is_shutdown: AtomicBool::new(false), }); let test_tracer_1 = tracer_provider.tracer("test1"); @@ -554,14 +678,128 @@ mod tests { // after shutdown we should get noop tracer let noop_tracer = tracer_provider.tracer("noop"); + // noop tracer cannot start anything let _ = noop_tracer.start("test"); assert!(assert_handle.started_span_count(2)); // noop tracer's tracer provider should be shutdown - assert!(noop_tracer.provider().is_shutdown.load(Ordering::SeqCst)); + assert!(noop_tracer.provider().is_shutdown()); // existing tracer becomes noops after shutdown let _ = test_tracer_1.start("test"); assert!(assert_handle.started_span_count(2)); + + // also existing tracer's tracer provider are in shutdown state + assert!(test_tracer_1.provider().is_shutdown()); + } + + #[derive(Debug)] + struct CountingShutdownProcessor { + shutdown_count: Arc, + } + + impl CountingShutdownProcessor { + fn new(shutdown_count: Arc) -> Self { + CountingShutdownProcessor { shutdown_count } + } + } + + impl SpanProcessor for CountingShutdownProcessor { + fn on_start(&self, _span: &mut Span, _cx: &Context) { + // No operation needed for this processor + } + + fn on_end(&self, _span: SpanData) { + // No operation needed for this processor + } + + fn force_flush(&self) -> TraceResult<()> { + Ok(()) + } + + fn shutdown(&self) -> TraceResult<()> { + self.shutdown_count.fetch_add(1, Ordering::SeqCst); + Ok(()) + } + } + + #[test] + fn drop_test_with_multiple_providers() { + let shutdown_count = Arc::new(AtomicU32::new(0)); + + { + // Create a shared TracerProviderInner and use it across multiple providers + let shared_inner = Arc::new(TracerProviderInner { + processors: vec![Box::new(CountingShutdownProcessor::new( + shutdown_count.clone(), + ))], + config: Config::default(), + is_shutdown: AtomicBool::new(false), + }); + + { + let tracer_provider1 = super::TracerProvider { + inner: shared_inner.clone(), + }; + let tracer_provider2 = super::TracerProvider { + inner: shared_inner.clone(), + }; + + let tracer1 = tracer_provider1.tracer("test-tracer1"); + let tracer2 = tracer_provider2.tracer("test-tracer2"); + + let _span1 = tracer1.start("span1"); + let _span2 = tracer2.start("span2"); + + // TracerProviderInner should not be dropped yet, since both providers and `shared_inner` + // are still holding a reference. + } + // At this point, both `tracer_provider1` and `tracer_provider2` are dropped, + // but `shared_inner` still holds a reference, so `TracerProviderInner` is NOT dropped yet. + assert_eq!(shutdown_count.load(Ordering::SeqCst), 0); + } + // Verify shutdown was called during the drop of the shared TracerProviderInner + assert_eq!(shutdown_count.load(Ordering::SeqCst), 1); + } + + #[test] + fn drop_after_shutdown_test_with_multiple_providers() { + let shutdown_count = Arc::new(AtomicU32::new(0)); + + // Create a shared TracerProviderInner and use it across multiple providers + let shared_inner = Arc::new(TracerProviderInner { + processors: vec![Box::new(CountingShutdownProcessor::new( + shutdown_count.clone(), + ))], + config: Config::default(), + is_shutdown: AtomicBool::new(false), + }); + + // Create a scope to test behavior when providers are dropped + { + let tracer_provider1 = super::TracerProvider { + inner: shared_inner.clone(), + }; + let tracer_provider2 = super::TracerProvider { + inner: shared_inner.clone(), + }; + + // Explicitly shut down the tracer provider + let shutdown_result = tracer_provider1.shutdown(); + assert!(shutdown_result.is_ok()); + + // Verify that shutdown was called exactly once + assert_eq!(shutdown_count.load(Ordering::SeqCst), 1); + + // TracerProvider2 should observe the shutdown state but not trigger another shutdown + let shutdown_result2 = tracer_provider2.shutdown(); + assert!(shutdown_result2.is_err()); + assert_eq!(shutdown_count.load(Ordering::SeqCst), 1); + + // Both tracer providers will be dropped at the end of this scope + } + + // Verify that shutdown was only called once, even after drop + assert_eq!(shutdown_count.load(Ordering::SeqCst), 1); } } diff --git a/opentelemetry-sdk/src/trace/runtime_tests.rs b/opentelemetry-sdk/src/trace/runtime_tests.rs index 610d140b7a..75cb1b4475 100644 --- a/opentelemetry-sdk/src/trace/runtime_tests.rs +++ b/opentelemetry-sdk/src/trace/runtime_tests.rs @@ -1,25 +1,32 @@ // Note that all tests here should be marked as ignore so that it won't be picked up by default We // need to run those tests one by one as the GlobalTracerProvider is a shared object between // threads Use cargo test -- --ignored --test-threads=1 to run those tests. +#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use crate::export::trace::{ExportResult, SpanExporter}; #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use crate::runtime; #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use crate::runtime::RuntimeChannel; +#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use futures_util::future::BoxFuture; #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use opentelemetry::global::*; #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use opentelemetry::trace::Tracer; +#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use std::fmt::Debug; +#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use std::sync::atomic::{AtomicUsize, Ordering}; +#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] use std::sync::Arc; #[derive(Debug)] +#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] struct SpanCountExporter { span_count: Arc, } +#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] impl SpanExporter for SpanCountExporter { fn export( &mut self, @@ -59,15 +66,18 @@ fn build_simple_tracer_provider(exporter: SpanCountExporter) -> crate::trace::Tr } #[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] -async fn test_set_provider_in_tokio(runtime: R) -> Arc { +async fn test_set_provider_in_tokio( + runtime: R, +) -> (Arc, crate::trace::TracerProvider) { let exporter = SpanCountExporter::new(); let span_count = exporter.span_count.clone(); - let _ = set_tracer_provider(build_batch_tracer_provider(exporter, runtime)); + let tracer_provider = build_batch_tracer_provider(exporter, runtime); + let _ = set_tracer_provider(tracer_provider.clone()); let tracer = tracer("opentelemetery"); tracer.in_span("test", |_cx| {}); - span_count + (span_count, tracer_provider) } // When using `tokio::spawn` to spawn the worker task in batch processor @@ -90,7 +100,7 @@ async fn test_set_provider_in_tokio(runtime: R) -> Arc 0); } @@ -112,12 +124,15 @@ async fn test_set_provider_multiple_thread_tokio_shutdown() { async fn test_set_provider_single_thread_tokio_with_simple_processor() { let exporter = SpanCountExporter::new(); let span_count = exporter.span_count.clone(); - let _ = set_tracer_provider(build_simple_tracer_provider(exporter)); + let tracer_provider = build_simple_tracer_provider(exporter); + let _ = set_tracer_provider(tracer_provider.clone()); let tracer = tracer("opentelemetry"); tracer.in_span("test", |_cx| {}); - shutdown_tracer_provider(); + tracer_provider + .shutdown() + .expect("TracerProvider should shutdown properly"); assert!(span_count.load(Ordering::SeqCst) > 0); } @@ -127,7 +142,7 @@ async fn test_set_provider_single_thread_tokio_with_simple_processor() { #[ignore = "requires --test-threads=1"] #[cfg(feature = "rt-tokio-current-thread")] async fn test_set_provider_single_thread_tokio() { - let span_count = test_set_provider_in_tokio(runtime::TokioCurrentThread).await; + let (span_count, _) = test_set_provider_in_tokio(runtime::TokioCurrentThread).await; assert_eq!(span_count.load(Ordering::SeqCst), 0) } @@ -136,7 +151,10 @@ async fn test_set_provider_single_thread_tokio() { #[ignore = "requires --test-threads=1"] #[cfg(feature = "rt-tokio-current-thread")] async fn test_set_provider_single_thread_tokio_shutdown() { - let span_count = test_set_provider_in_tokio(runtime::TokioCurrentThread).await; - shutdown_tracer_provider(); + let (span_count, tracer_provider) = + test_set_provider_in_tokio(runtime::TokioCurrentThread).await; + tracer_provider + .shutdown() + .expect("TracerProvider should shutdown properly"); assert!(span_count.load(Ordering::SeqCst) > 0) } diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/rate_limit.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/rate_limit.rs index 436df8f096..62a8dfef02 100644 --- a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/rate_limit.rs +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/rate_limit.rs @@ -1,4 +1,3 @@ -use opentelemetry::trace::TraceError; use std::time::SystemTime; // leaky bucket based rate limit @@ -9,6 +8,7 @@ pub(crate) struct LeakyBucket { bucket_size: f64, last_time: SystemTime, } +use opentelemetry::otel_debug; impl LeakyBucket { pub(crate) fn new(bucket_size: f64, span_per_sec: f64) -> LeakyBucket { @@ -53,10 +53,12 @@ impl LeakyBucket { false } } - Err(_) => { - opentelemetry::global::handle_error(TraceError::Other( - "jaeger remote sampler gets rewinded timestamp".into(), - )); + Err(err) => { + otel_debug!( + name: "JaegerRemoteSampler.LeakyBucket.ClockAdjustment", + message = "Jaeger remote sampler detected a rewind in system clock", + reason = format!("{:?}", err), + ); true } } diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/remote.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/remote.rs index 3e2aa2d3d1..4b28a6910b 100644 --- a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/remote.rs +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/remote.rs @@ -1,5 +1,4 @@ /// Generate types based on proto - /// ProbabilisticSamplingStrategy samples traces with a fixed probability. #[derive(serde::Serialize, serde::Deserialize, PartialOrd, PartialEq)] #[serde(rename_all = "camelCase")] diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs index 6f942cbd7f..1b7909f760 100644 --- a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampler.rs @@ -5,7 +5,7 @@ use crate::trace::{Sampler, ShouldSample}; use futures_util::{stream, StreamExt as _}; use http::Uri; use opentelemetry::trace::{Link, SamplingResult, SpanKind, TraceError, TraceId}; -use opentelemetry::{global, Context, KeyValue}; +use opentelemetry::{otel_warn, Context, KeyValue}; use opentelemetry_http::HttpClient; use std::str::FromStr; use std::sync::Arc; @@ -203,7 +203,13 @@ impl JaegerRemoteSampler { // send request match Self::request_new_strategy(&client, endpoint.clone()).await { Ok(remote_strategy_resp) => strategy.update(remote_strategy_resp), - Err(err_msg) => global::handle_error(TraceError::Other(err_msg.into())), + Err(err_msg) => { + otel_warn!( + name: "JaegerRemoteSampler.FailedToFetchStrategy", + message= "Failed to fetch the sampling strategy from the remote endpoint. The last successfully fetched configuration will be used if available; otherwise, the default sampler will be applied until a successful configuration fetch.", + reason = format!("{}", err_msg), + ); + } }; } else { // shutdown diff --git a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampling_strategy.rs b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampling_strategy.rs index b48642d478..0f1fd43679 100644 --- a/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampling_strategy.rs +++ b/opentelemetry-sdk/src/trace/sampler/jaeger_remote/sampling_strategy.rs @@ -4,9 +4,9 @@ use crate::trace::sampler::jaeger_remote::remote::{ }; use crate::trace::sampler::sample_based_on_probability; use opentelemetry::trace::{ - SamplingDecision, SamplingResult, TraceContextExt, TraceError, TraceId, TraceState, + SamplingDecision, SamplingResult, TraceContextExt, TraceId, TraceState, }; -use opentelemetry::{global, Context}; +use opentelemetry::{otel_warn, Context}; use std::collections::HashMap; use std::fmt::{Debug, Formatter}; use std::sync::Mutex; @@ -107,9 +107,10 @@ impl Inner { } }) .unwrap_or_else(|_err| { - global::handle_error(TraceError::Other( - "jaeger remote sampler mutex poisoned".into(), - )) + otel_warn!( + name: "JaegerRemoteSampler.MutexPoisoned", + message = "Unable to update Jaeger Remote sampling strategy: the sampler's internal mutex is poisoned, likely due to a panic in another thread holding the lock. No further attempts to update the strategy will be made until the application or process restarts, and the last known configuration will continue to be used.", + ); }); } @@ -137,7 +138,13 @@ impl Inner { (_, _, Some(probabilistic)) => { Some(Strategy::Probabilistic(probabilistic.sampling_rate)) } - _ => None, + _ => { + otel_warn!( + name: "JaegerRemoteSampler.InvalidStrategyReceived", + message = "Invalid sampling strategy received from the remote endpoint. Expected one of: OperationSampling, RateLimitingSampling, or ProbabilisticSampling. Continuing to use the previous strategy or default sampler until a successful update.", + ); + None + } } } diff --git a/opentelemetry-sdk/src/trace/span.rs b/opentelemetry-sdk/src/trace/span.rs index ea03d9ab53..25a5df0da1 100644 --- a/opentelemetry-sdk/src/trace/span.rs +++ b/opentelemetry-sdk/src/trace/span.rs @@ -263,7 +263,7 @@ fn build_export_data( events: data.events, links: data.links, status: data.status, - instrumentation_lib: tracer.instrumentation_library().clone(), + instrumentation_scope: tracer.instrumentation_scope().clone(), } } diff --git a/opentelemetry-sdk/src/trace/span_limit.rs b/opentelemetry-sdk/src/trace/span_limit.rs index 77ea183c30..7dedce089a 100644 --- a/opentelemetry-sdk/src/trace/span_limit.rs +++ b/opentelemetry-sdk/src/trace/span_limit.rs @@ -12,7 +12,6 @@ /// /// If the limit has been breached. The attributes, events or links will be dropped based on their /// index in the collection. The one added to collections later will be dropped first. - pub(crate) const DEFAULT_MAX_EVENT_PER_SPAN: u32 = 128; pub(crate) const DEFAULT_MAX_ATTRIBUTES_PER_SPAN: u32 = 128; pub(crate) const DEFAULT_MAX_LINKS_PER_SPAN: u32 = 128; diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index 6d57375614..5023ca2bc5 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -45,12 +45,13 @@ use futures_util::{ stream::{self, FusedStream, FuturesUnordered}, StreamExt as _, }; -use opentelemetry::global; +use opentelemetry::{otel_debug, otel_error, otel_warn}; use opentelemetry::{ trace::{TraceError, TraceResult}, Context, }; use std::cmp::min; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::{env, fmt, str::FromStr, time::Duration}; @@ -109,7 +110,8 @@ pub struct SimpleSpanProcessor { } impl SimpleSpanProcessor { - pub(crate) fn new(exporter: Box) -> Self { + /// Create a new [SimpleSpanProcessor] using the provided exporter. + pub fn new(exporter: Box) -> Self { Self { exporter: Mutex::new(exporter), } @@ -133,7 +135,11 @@ impl SpanProcessor for SimpleSpanProcessor { .and_then(|mut exporter| futures_executor::block_on(exporter.export(vec![span]))); if let Err(err) = result { - global::handle_error(err); + // TODO: check error type, and log `error` only if the error is user-actiobable, else log `debug` + otel_debug!( + name: "SimpleProcessor.OnEnd.Error", + reason = format!("{:?}", err) + ); } } @@ -222,6 +228,12 @@ impl SpanProcessor for SimpleSpanProcessor { /// [`async-std`]: https://async.rs pub struct BatchSpanProcessor { message_sender: R::Sender, + + // Track dropped spans + dropped_spans_count: AtomicUsize, + + // Track the maximum queue size that was configured for this processor + max_queue_size: usize, } impl fmt::Debug for BatchSpanProcessor { @@ -244,8 +256,14 @@ impl SpanProcessor for BatchSpanProcessor { let result = self.message_sender.try_send(BatchMessage::ExportSpan(span)); - if let Err(err) = result { - global::handle_error(TraceError::Other(err.into())); + // If the queue is full, and we can't buffer a span + if result.is_err() { + // Increment the number of dropped spans. If this is the first time we've had to drop, + // emit a warning. + if self.dropped_spans_count.fetch_add(1, Ordering::Relaxed) == 0 { + otel_warn!(name: "BatchSpanProcessor.SpanDroppingStarted", + message = "Beginning to drop span messages due to full/internal errors. No further log will be emitted for further drops until Shutdown. During Shutdown time, a log will be emitted with exact count of total spans dropped."); + } } } @@ -261,6 +279,17 @@ impl SpanProcessor for BatchSpanProcessor { } fn shutdown(&self) -> TraceResult<()> { + let dropped_spans = self.dropped_spans_count.load(Ordering::Relaxed); + let max_queue_size = self.max_queue_size; + if dropped_spans > 0 { + otel_warn!( + name: "BatchSpanProcessor.Shutdown", + dropped_spans = dropped_spans, + max_queue_size = max_queue_size, + message = "Spans were dropped due to a full or closed queue. The count represents the total count of span records dropped in the lifetime of the BatchLogProcessor. Consider increasing the queue size and/or decrease delay between intervals." + ); + } + let (res_sender, res_receiver) = oneshot::channel(); self.message_sender .try_send(BatchMessage::Shutdown(res_sender)) @@ -312,14 +341,22 @@ impl BatchSpanProcessorInternal { let result = export_task.await; if let Some(channel) = res_channel { + // If a response channel is provided, attempt to send the export result through it. if let Err(result) = channel.send(result) { - global::handle_error(TraceError::from(format!( - "failed to send flush result: {:?}", - result - ))); + otel_debug!( + name: "BatchSpanProcessor.Flush.SendResultError", + reason = format!("{:?}", result) + ); } } else if let Err(err) = result { - global::handle_error(err); + // If no channel is provided and the export operation encountered an error, + // log the error directly here. + // TODO: Consider returning the status instead of logging it. + otel_error!( + name: "BatchSpanProcessor.Flush.ExportError", + reason = format!("{:?}", err), + message = "Failed during the export process" + ); } Ok(()) @@ -353,7 +390,10 @@ impl BatchSpanProcessorInternal { let export_task = self.export(); let task = async move { if let Err(err) = export_task.await { - global::handle_error(err); + otel_error!( + name: "BatchSpanProcessor.Export.Error", + reason = format!("{}", err) + ); } Ok(()) @@ -450,6 +490,8 @@ impl BatchSpanProcessor { let (message_sender, message_receiver) = runtime.batch_message_channel(config.max_queue_size); + let max_queue_size = config.max_queue_size; + let inner_runtime = runtime.clone(); // Spawn worker process via user-defined spawn function. runtime.spawn(Box::pin(async move { @@ -474,7 +516,11 @@ impl BatchSpanProcessor { })); // Return batch processor with link to worker - BatchSpanProcessor { message_sender } + BatchSpanProcessor { + message_sender, + dropped_spans_count: AtomicUsize::new(0), + max_queue_size, + } } /// Create a new batch processor builder @@ -708,7 +754,6 @@ mod tests { OTEL_BSP_MAX_CONCURRENT_EXPORTS_DEFAULT, OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT, }; use crate::trace::{BatchConfig, BatchConfigBuilder, SpanEvents, SpanLinks}; - use async_trait::async_trait; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status}; use std::fmt::Debug; use std::future::Future; @@ -740,7 +785,7 @@ mod tests { events: SpanEvents::default(), links: SpanLinks::default(), status: Status::Unset, - instrumentation_lib: Default::default(), + instrumentation_scope: Default::default(), }; processor.on_end(unsampled); assert!(exporter.get_finished_spans().unwrap().is_empty()); @@ -944,7 +989,6 @@ mod tests { } } - #[async_trait] impl SpanExporter for BlockingExporter where D: Fn(Duration) -> DS + 'static + Send + Sync, diff --git a/opentelemetry-sdk/src/trace/tracer.rs b/opentelemetry-sdk/src/trace/tracer.rs index f3182d388e..b17dd37717 100644 --- a/opentelemetry-sdk/src/trace/tracer.rs +++ b/opentelemetry-sdk/src/trace/tracer.rs @@ -7,25 +7,21 @@ //! and exposes methods for creating and activating new `Spans`. //! //! Docs: -use crate::{ - trace::{ - provider::TracerProvider, - span::{Span, SpanData}, - IdGenerator, ShouldSample, SpanEvents, SpanLimits, SpanLinks, - }, - InstrumentationLibrary, +use crate::trace::{ + provider::TracerProvider, + span::{Span, SpanData}, + IdGenerator, ShouldSample, SpanEvents, SpanLimits, SpanLinks, }; use opentelemetry::{ trace::{SamplingDecision, SpanBuilder, SpanContext, SpanKind, TraceContextExt, TraceFlags}, - Context, KeyValue, + Context, InstrumentationScope, KeyValue, }; use std::fmt; -use std::sync::Arc; /// `Tracer` implementation to create and manage spans #[derive(Clone)] pub struct Tracer { - instrumentation_lib: Arc, + scope: InstrumentationScope, provider: TracerProvider, } @@ -34,22 +30,16 @@ impl fmt::Debug for Tracer { /// Omitting `provider` here is necessary to avoid cycles. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Tracer") - .field("name", &self.instrumentation_lib.name) - .field("version", &self.instrumentation_lib.version) + .field("name", &self.scope.name()) + .field("version", &self.scope.version()) .finish() } } impl Tracer { /// Create a new tracer (used internally by `TracerProvider`s). - pub(crate) fn new( - instrumentation_lib: Arc, - provider: TracerProvider, - ) -> Self { - Tracer { - instrumentation_lib, - provider, - } + pub(crate) fn new(scope: InstrumentationScope, provider: TracerProvider) -> Self { + Tracer { scope, provider } } /// TracerProvider associated with this tracer. @@ -57,9 +47,9 @@ impl Tracer { &self.provider } - /// Instrumentation library information of this tracer. - pub(crate) fn instrumentation_library(&self) -> &InstrumentationLibrary { - &self.instrumentation_lib + /// Instrumentation scope of this tracer. + pub(crate) fn instrumentation_scope(&self) -> &InstrumentationScope { + &self.scope } fn build_recording_span( @@ -295,7 +285,7 @@ impl opentelemetry::trace::Tracer for Tracer { mod tests { use crate::{ testing::trace::TestSpan, - trace::{Config, Sampler, ShouldSample}, + trace::{Sampler, ShouldSample}, }; use opentelemetry::{ trace::{ @@ -336,9 +326,8 @@ mod tests { fn allow_sampler_to_change_trace_state() { // Setup let sampler = TestSampler {}; - let config = Config::default().with_sampler(sampler); let tracer_provider = crate::trace::TracerProvider::builder() - .with_config(config) + .with_sampler(sampler) .build(); let tracer = tracer_provider.tracer("test"); let trace_state = TraceState::from_key_value(vec![("foo", "bar")]).unwrap(); @@ -361,9 +350,8 @@ mod tests { #[test] fn drop_parent_based_children() { let sampler = Sampler::ParentBased(Box::new(Sampler::AlwaysOn)); - let config = Config::default().with_sampler(sampler); let tracer_provider = crate::trace::TracerProvider::builder() - .with_config(config) + .with_sampler(sampler) .build(); let context = Context::current_with_span(TestSpan(SpanContext::empty_context())); @@ -376,9 +364,8 @@ mod tests { #[test] fn uses_current_context_for_builders_if_unset() { let sampler = Sampler::ParentBased(Box::new(Sampler::AlwaysOn)); - let config = Config::default().with_sampler(sampler); let tracer_provider = crate::trace::TracerProvider::builder() - .with_config(config) + .with_sampler(sampler) .build(); let tracer = tracer_provider.tracer("test"); diff --git a/opentelemetry-semantic-conventions/CHANGELOG.md b/opentelemetry-semantic-conventions/CHANGELOG.md index 3408698acf..e60f3d2446 100644 --- a/opentelemetry-semantic-conventions/CHANGELOG.md +++ b/opentelemetry-semantic-conventions/CHANGELOG.md @@ -2,6 +2,28 @@ ## vNext +- Update to [v1.29.0](https://github.com/open-telemetry/semantic-conventions/releases/tag/v1.29.0) of the semantic conventions. +- Update to [v0.11.0](https://github.com/open-telemetry/weaver/releases/tag/v0.11.0) of the semantic conventions. +- Bump msrv to 1.75.0. + + +## 0.27.0 + +Released 2024-Nov-11 + +- Bump MSRV to 1.70 [#2179](https://github.com/open-telemetry/opentelemetry-rust/pull/2179) +- Update to [v1.28.0](https://github.com/open-telemetry/semantic-conventions/releases/tag/v1.28.0) of the semantic conventions. + +## v0.26.0 +Released 2024-Sep-30 + +### Changed + +- Starting with this version, this crate will use Weaver for the generation of + the semantic conventions. +- **Breaking** Introduced a new feature `semconv_experimental` to enable experimental semantic conventions. + This feature is disabled by default. + ## v0.25.0 ### Changed diff --git a/opentelemetry-semantic-conventions/Cargo.toml b/opentelemetry-semantic-conventions/Cargo.toml index df8850e31d..f150c00881 100644 --- a/opentelemetry-semantic-conventions/Cargo.toml +++ b/opentelemetry-semantic-conventions/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-semantic-conventions" -version = "0.25.0" +version = "0.27.0" description = "Semantic conventions for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-semantic-conventions" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-semantic-conventions" @@ -13,12 +13,16 @@ categories = [ keywords = ["opentelemetry", "tracing", "async"] license = "Apache-2.0" edition = "2021" -rust-version = "1.65" +rust-version = "1.75.0" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] +[features] +default = [] +semconv_experimental = [] + [dev-dependencies] opentelemetry = { default-features = false, path = "../opentelemetry" } # for doctests opentelemetry_sdk = { features = ["trace"], path = "../opentelemetry-sdk" } # for doctests diff --git a/opentelemetry-semantic-conventions/README.md b/opentelemetry-semantic-conventions/README.md index 30cac21533..2d3459fe28 100644 --- a/opentelemetry-semantic-conventions/README.md +++ b/opentelemetry-semantic-conventions/README.md @@ -22,3 +22,18 @@ resources to help facilitate interoperability and compatibility with processing and visualization tools. [`opentelemetry`]: https://crates.io/crates/opentelemetry + +*[Supported Rust Versions](#supported-rust-versions)* + +## Supported Rust Versions + +OpenTelemetry is built against the latest stable release. The minimum supported +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build +on Rust versions earlier than the minimum supported version. + +The current stable Rust compiler and the three most recent minor versions +before it will always be supported. For example, if the current stable compiler +version is 1.49, the minimum supported version will not be increased past 1.46, +three minor versions prior. Increasing the minimum supported compiler version +is not considered a semver breaking change as long as doing so complies with +this policy. \ No newline at end of file diff --git a/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh b/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh index 280e8d7076..7dd423477b 100755 --- a/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh +++ b/opentelemetry-semantic-conventions/scripts/generate-consts-from-spec.sh @@ -5,8 +5,8 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" CRATE_DIR="${SCRIPT_DIR}/../" # freeze the spec version and generator version to make generation reproducible -SPEC_VERSION=1.27.0 -SEMCOVGEN_VERSION=0.25.0 +SPEC_VERSION=1.29.0 +WEAVER_VERSION=v0.11.0 cd "$CRATE_DIR" @@ -20,54 +20,24 @@ git fetch origin "v$SPEC_VERSION" git reset --hard FETCH_HEAD cd "$CRATE_DIR" -docker run --rm \ - -v "${CRATE_DIR}/semantic-conventions/model:/source" \ - -v "${CRATE_DIR}/scripts/templates:/templates" \ - -v "${CRATE_DIR}/src:/output" \ - otel/semconvgen:$SEMCOVGEN_VERSION \ - -f /source code \ - --template /templates/semantic_attributes.rs.j2 \ - --output /output/attribute.rs \ - --parameters conventions=attribute - -docker run --rm \ - -v "${CRATE_DIR}/semantic-conventions/model:/source" \ - -v "${CRATE_DIR}/scripts/templates:/templates" \ - -v "${CRATE_DIR}/src:/output" \ - otel/semconvgen:$SEMCOVGEN_VERSION \ - --only span,event \ - -f /source code \ - --template /templates/semantic_attributes.rs.j2 \ - --output /output/trace.rs \ - --parameters conventions=trace - -docker run --rm \ - -v "${CRATE_DIR}/semantic-conventions/model:/source" \ - -v "${CRATE_DIR}/scripts/templates:/templates" \ - -v "${CRATE_DIR}/src:/output" \ - otel/semconvgen:$SEMCOVGEN_VERSION \ - --only resource \ - -f /source code \ - --template /templates/semantic_attributes.rs.j2 \ - --output /output/resource.rs \ - --parameters conventions=resource - -docker run --rm \ - -v "${CRATE_DIR}/semantic-conventions/model:/source" \ - -v "${CRATE_DIR}/scripts/templates:/templates" \ - -v "${CRATE_DIR}/src:/output" \ - otel/semconvgen:$SEMCOVGEN_VERSION \ - -f /source code \ - --template /templates/semantic_metrics.rs.j2 \ - --output /output/metric.rs - SED=(sed -i) if [[ "$(uname)" = "Darwin" ]]; then SED=(sed -i "") fi # Keep `SCHEMA_URL` key in sync with spec version -"${SED[@]}" "s/\(opentelemetry.io\/schemas\/\)[^\"]*\"/\1$SPEC_VERSION\"/" src/lib.rs +"${SED[@]}" "s/\(opentelemetry.io\/schemas\/\)[^\"]*\"/\1$SPEC_VERSION\"/" scripts/templates/registry/rust/weaver.yaml + +docker run --rm \ + --mount type=bind,source=$CRATE_DIR/semantic-conventions/model,target=/home/weaver/source,readonly \ + --mount type=bind,source=$CRATE_DIR/scripts/templates,target=/home/weaver/templates,readonly \ + --mount type=bind,source=$CRATE_DIR/src,target=/home/weaver/target \ + otel/weaver:$WEAVER_VERSION \ + registry generate \ + --registry=/home/weaver/source \ + --templates=/home/weaver/templates \ + rust \ + /home/weaver/target/ # handle doc generation failures "${SED[@]}" 's/\[2\]\.$//' src/attribute.rs # remove trailing [2] from few of the doc comments diff --git a/opentelemetry-semantic-conventions/scripts/templates/header_attribute.rs b/opentelemetry-semantic-conventions/scripts/templates/header_attribute.rs deleted file mode 100644 index 903036665d..0000000000 --- a/opentelemetry-semantic-conventions/scripts/templates/header_attribute.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! # Semantic Attributes -//! -//! The entire set of semantic attributes (or [conventions](https://opentelemetry.io/docs/concepts/semantic-conventions/)) defined by the project. The resource, metric, and trace modules reference these attributes. diff --git a/opentelemetry-semantic-conventions/scripts/templates/header_metric.rs b/opentelemetry-semantic-conventions/scripts/templates/header_metric.rs deleted file mode 100755 index 1d7a455b8f..0000000000 --- a/opentelemetry-semantic-conventions/scripts/templates/header_metric.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! # Metric Semantic Conventions -//! -//! The [metric semantic conventions] define a set of standardized attributes to -//! be used in `Meter`s. -//! -//! [metric semantic conventions]: https://github.com/open-telemetry/semantic-conventions/tree/main/model/metric -//! -//! ## Usage -//! -//! ```rust -//! use opentelemetry::{global, KeyValue}; -//! use opentelemetry_semantic_conventions as semconv; -//! -//! // Assumes we already have an initialized `MeterProvider` -//! // See: https://github.com/open-telemetry/opentelemetry-rust/blob/main/examples/metrics-basic/src/main.rs -//! // for an example -//! let meter = global::meter("mylibraryname"); -//! let histogram = meter -//! .u64_histogram(semconv::metric::HTTP_SERVER_REQUEST_DURATION) -//! .with_unit("By") -//! .with_description("Duration of HTTP server requests.") -//! .init(); -//! ``` diff --git a/opentelemetry-semantic-conventions/scripts/templates/header_resource.rs b/opentelemetry-semantic-conventions/scripts/templates/header_resource.rs deleted file mode 100644 index ac7046e008..0000000000 --- a/opentelemetry-semantic-conventions/scripts/templates/header_resource.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! # Resource Semantic Conventions -//! -//! The [resource semantic conventions] define a set of standardized attributes -//! to be used in `Resource`s. -//! -//! [resource semantic conventions]: https://github.com/open-telemetry/semantic-conventions/tree/main/model/resource -//! -//! ## Usage -//! -//! ```rust -//! use opentelemetry::KeyValue; -//! use opentelemetry_sdk::{trace::{config, TracerProvider}, Resource}; -//! use opentelemetry_semantic_conventions as semconv; -//! -//! let _tracer = TracerProvider::builder() -//! .with_config(config().with_resource(Resource::new(vec![ -//! KeyValue::new(semconv::resource::SERVICE_NAME, "my-service"), -//! KeyValue::new(semconv::resource::SERVICE_NAMESPACE, "my-namespace"), -//! ]))) -//! .build(); -//! ``` diff --git a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/attribute.rs.j2 b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/attribute.rs.j2 new file mode 100644 index 0000000000..d81f6739f8 --- /dev/null +++ b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/attribute.rs.j2 @@ -0,0 +1,26 @@ +{%- import 'macros.j2' as attr_macros -%} +// DO NOT EDIT, this is an auto-generated file +// +// If you want to update the file: +// - Edit the template at scripts/templates/registry/rust/attributes.rs.j2 +// - Run the script at scripts/generate-consts-from-spec.sh + +//! # Semantic Attributes +//! +//! The entire set of semantic attributes (or [conventions](https://opentelemetry.io/docs/concepts/semantic-conventions/)) defined by the project. The resource, metric, and trace modules reference these attributes. + +{% for root_ns in ctx %} + {% for attr in root_ns.attributes | rejectattr("name", "in", params.excluded_attributes) %} +{# Escape any `<...>` tags to `[...]` to avoid Rustdoc warnings and HTML parsing issues. #} +{% set safe_note = attr.note | replace('<', '[') | replace('>', ']') %} +{{ [attr.brief, concat_if("\n\n## Notes\n\n", safe_note), attr_macros.examples(attr)] | comment }} + {% if attr is experimental %} +#[cfg(feature = "semconv_experimental")] + {% endif %} + {% if attr is deprecated %} +#[deprecated(note="{{ attr.deprecated.strip(" \n\"") }}")] + {% endif %} +pub const {{ attr.name | screaming_snake_case }}: &str = "{{ attr.name }}"; + + {% endfor %} +{% endfor %} \ No newline at end of file diff --git a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/lib.rs.j2 b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/lib.rs.j2 new file mode 100644 index 0000000000..d2793ca79d --- /dev/null +++ b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/lib.rs.j2 @@ -0,0 +1,25 @@ +//! OpenTelemetry semantic conventions are agreed standardized naming patterns +//! for OpenTelemetry things. This crate aims to be the centralized place to +//! interact with these conventions. +#![warn( + future_incompatible, + missing_debug_implementations, + missing_docs, + nonstandard_style, + rust_2018_idioms, + unreachable_pub, + unused +)] +#![cfg_attr(test, deny(warnings))] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo.svg" +)] + +pub mod attribute; +pub mod metric; +pub mod resource; +pub mod trace; + +/// The schema URL that matches the version of the semantic conventions that +/// this crate defines. +pub const SCHEMA_URL: &str = "{{ params.schema_url }}"; diff --git a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/macros.j2 b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/macros.j2 new file mode 100644 index 0000000000..c661ef2f1e --- /dev/null +++ b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/macros.j2 @@ -0,0 +1,13 @@ +{%- macro examples(entity) -%} + {% if entity.examples %} +# Examples + + {% if entity.examples is sequence %} + {% for example in entity.examples %} + - `{{ example | pprint }}` + {% endfor %} + {% else %} + - `{{ entity.examples | pprint }}` + {% endif %} + {% endif %} +{% endmacro %} \ No newline at end of file diff --git a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/metric.rs.j2 b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/metric.rs.j2 new file mode 100644 index 0000000000..6ba1ca2100 --- /dev/null +++ b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/metric.rs.j2 @@ -0,0 +1,79 @@ +{%- import 'macros.j2' as metric_macros -%} +// DO NOT EDIT, this is an auto-generated file +// +// If you want to update the file: +// - Edit the template at scripts/templates/registry/rust/metric.rs.j2 +// - Run the script at scripts/generate-consts-from-spec.sh + +//! # Metric Semantic Conventions +//! +//! The [metric semantic conventions] define a set of standardized attributes to +//! be used in `Meter`s. +//! +//! [metric semantic conventions]: https://github.com/open-telemetry/semantic-conventions/tree/main/model/metric +//! +//! ## Usage +//! +//! ```rust +//! use opentelemetry::{global, KeyValue}; +//! use opentelemetry_semantic_conventions as semconv; +//! +//! // Assumes we already have an initialized `MeterProvider` +//! // See: https://github.com/open-telemetry/opentelemetry-rust/blob/main/examples/metrics-basic/src/main.rs +//! // for an example +//! let meter = global::meter("mylibraryname"); +//! let histogram = meter +//! .u64_histogram(semconv::metric::HTTP_SERVER_REQUEST_DURATION) +//! .with_unit("By") +//! .with_description("Duration of HTTP server requests.") +//! .build(); +//! ``` + +{% for root_ns in ctx %} + {% for metric in root_ns.metrics %} +{{ ["## Description\n\n", metric.brief, concat_if("\n\n## Notes\n\n", metric.note), metric_macros.examples(metric)] | comment }} +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `{{ metric.instrument }}` | +/// | Unit: | `{{ metric.unit }}` | +/// | Status: | `{{ metric.stability | capitalize }}` | + {% if metric.attributes %} +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | + {% endif %} + {% for attribute in metric.attributes | rejectattr("name", "in", params.excluded_attributes) | sort(attribute="name") %} + {% if attribute.requirement_level %} + {% if attribute.requirement_level.conditionally_required %} + {% set req_level = "Conditionally_required" %} + {% set req_message = attribute.requirement_level.conditionally_required %} + {% else %} + {% set req_level = (attribute.requirement_level | capitalize) %} + {% set req_message = attribute.requirement_level_msg %} + {% endif %} + {% else %} + {% set req_level = "Unspecified" %} + {% set req_message = '' %} + {% endif %} +/// | [`crate::attribute::{{ attribute.name | screaming_snake_case }}`] | `{{ req_level }}`{{ (': ' + req_message.replace('\n', ' ') if req_message else '') }} + {% endfor %} + {% if metric.examples %} +/// +/// ## Examples +/// + {% for example in metric.examples %} +/// - `{{ example }}` + {% endfor %} + {% endif %} + {% if metric is experimental %} +#[cfg(feature = "semconv_experimental")] + {% endif %} + {% if metric is deprecated %} +#[deprecated(note="{{ metric.deprecated.strip(" \n\"") }}")] + {% endif %} +pub const {{ metric.metric_name | screaming_snake_case }}: &str = "{{ metric.metric_name }}"; + + {% endfor %} +{% endfor %} \ No newline at end of file diff --git a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/resource.rs.j2 b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/resource.rs.j2 new file mode 100644 index 0000000000..3ffcd91808 --- /dev/null +++ b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/resource.rs.j2 @@ -0,0 +1,36 @@ +{%- import 'macros.j2' as attr_macros -%} +// DO NOT EDIT, this is an auto-generated file +// +// If you want to update the file: +// - Edit the template at scripts/templates/registry/rust/resource.rs.j2 +// - Run the script at scripts/generate-consts-from-spec.sh + +//! # Resource Semantic Conventions +//! +//! The [resource semantic conventions] define a set of standardized attributes +//! to be used in `Resource`s. +//! +//! [resource semantic conventions]: https://github.com/open-telemetry/semantic-conventions/tree/main/model/resource +//! +//! ## Usage +//! +//! ```rust +//! use opentelemetry::KeyValue; +//! use opentelemetry_sdk::{trace::{config, TracerProvider}, Resource}; +//! use opentelemetry_semantic_conventions as semconv; +//! +//! let _tracer = TracerProvider::builder() +//! .with_config(config().with_resource(Resource::builder_empty().with_service_name("my-service").build())) +//! .build(); +//! ``` + +{% for attr in ctx | rejectattr("name", "in", params.excluded_attributes) %} + {% if attr is experimental %} +#[cfg(feature = "semconv_experimental")] + {% endif %} + {% if attr is deprecated %} +#[allow(deprecated)] + {% endif %} +pub use crate::attribute::{{ attr.name | screaming_snake_case }}; + +{% endfor %} diff --git a/opentelemetry-semantic-conventions/scripts/templates/header_trace.rs b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/trace.rs.j2 similarity index 54% rename from opentelemetry-semantic-conventions/scripts/templates/header_trace.rs rename to opentelemetry-semantic-conventions/scripts/templates/registry/rust/trace.rs.j2 index 6aa5390ce5..e9e338ecc6 100644 --- a/opentelemetry-semantic-conventions/scripts/templates/header_trace.rs +++ b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/trace.rs.j2 @@ -1,23 +1,41 @@ -//! # Trace Semantic Conventions -//! -//! The [trace semantic conventions] define a set of standardized attributes to -//! be used in `Span`s. -//! -//! [trace semantic conventions]: https://github.com/open-telemetry/semantic-conventions/tree/main/model/trace -//! -//! ## Usage -//! -//! ```rust -//! use opentelemetry::KeyValue; -//! use opentelemetry::{global, trace::Tracer as _}; -//! use opentelemetry_semantic_conventions as semconv; -//! -//! let tracer = global::tracer("my-component"); -//! let _span = tracer -//! .span_builder("span-name") -//! .with_attributes(vec![ -//! KeyValue::new(semconv::trace::CLIENT_ADDRESS, "example.org"), -//! KeyValue::new(semconv::trace::CLIENT_PORT, 80i64), -//! ]) -//! .start(&tracer); -//! ``` +{%- import 'macros.j2' as attr_macros -%} +// DO NOT EDIT, this is an auto-generated file +// +// If you want to update the file: +// - Edit the template at scripts/templates/registry/rust/attributes.rs.j2 +// - Run the script at scripts/generate-consts-from-spec.sh + +//! # Trace Semantic Conventions +//! +//! The [trace semantic conventions] define a set of standardized attributes to +//! be used in `Span`s. +//! +//! [trace semantic conventions]: https://github.com/open-telemetry/semantic-conventions/tree/main/model/trace +//! +//! ## Usage +//! +//! ```rust +//! use opentelemetry::KeyValue; +//! use opentelemetry::{global, trace::Tracer as _}; +//! use opentelemetry_semantic_conventions as semconv; +//! +//! let tracer = global::tracer("my-component"); +//! let _span = tracer +//! .span_builder("span-name") +//! .with_attributes([ +//! KeyValue::new(semconv::trace::CLIENT_ADDRESS, "example.org"), +//! KeyValue::new(semconv::trace::CLIENT_PORT, 80i64), +//! ]) +//! .start(&tracer); +//! ``` + +{% for attr in ctx | rejectattr("name", "in", params.excluded_attributes) %} + {% if attr is experimental %} +#[cfg(feature = "semconv_experimental")] + {% endif %} + {% if attr is deprecated %} +#[allow(deprecated)] + {% endif %} +pub use crate::attribute::{{ attr.name | screaming_snake_case }}; + +{% endfor %} \ No newline at end of file diff --git a/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml new file mode 100644 index 0000000000..e1ec32584b --- /dev/null +++ b/opentelemetry-semantic-conventions/scripts/templates/registry/rust/weaver.yaml @@ -0,0 +1,46 @@ +# Whitespace control settings to simplify the definition of templates +whitespace_control: + trim_blocks: true + lstrip_blocks: true + +# Configuration for the comment formatting +comment_formats: + rust: + format: markdown + prefix: "/// " + trim: true + remove_trailing_dots: true + escape_square_brackets: true +default_comment_format: rust + +params: + schema_url: "https://opentelemetry.io/schemas/1.29.0" + exclude_root_namespace: [] + excluded_attributes: ["messaging.client_id"] + +templates: + - pattern: attribute.rs.j2 + filter: semconv_grouped_attributes($params) + application_mode: single + - pattern: metric.rs.j2 + filter: semconv_grouped_metrics($params) + application_mode: single + - pattern: resource.rs.j2 + filter: > + semconv_signal("resource"; $params) + | map(.attributes[]) + | unique_by(.name) + | sort_by(.name) + | map({name, brief, examples, deprecated, requirement_level, stability, type}) + application_mode: single + - pattern: trace.rs.j2 + filter: > + semconv_signal("span"; $params) + semconv_signal("event"; $params) + | map(.attributes[]) + | unique_by(.name) + | sort_by(.name) + | map({name, brief, examples, deprecated, requirement_level, stability, type}) + application_mode: single + - pattern: lib.rs.j2 + filter: . + application_mode: single \ No newline at end of file diff --git a/opentelemetry-semantic-conventions/scripts/templates/semantic_attributes.rs.j2 b/opentelemetry-semantic-conventions/scripts/templates/semantic_attributes.rs.j2 deleted file mode 100644 index c71ceba10d..0000000000 --- a/opentelemetry-semantic-conventions/scripts/templates/semantic_attributes.rs.j2 +++ /dev/null @@ -1,41 +0,0 @@ -// DO NOT EDIT, this is an auto-generated file -// -// If you want to update the file: -// - Edit the template at scripts{{template}} -// - Run the script at scripts/generate-consts-from-spec.sh - -{% include 'header_' + conventions + '.rs' %} - -{%- for attribute in attributes %} -{%- set x=attribute.__setattr__("fqn_const_name", (attribute.fqn | to_const_name)) %} -{%- endfor %} - -{%- for name, attrs in (attributes | groupby('fqn_const_name')) %} -{%- set attribute = (attrs | selectattr('deprecated', 'none') | first) %} -{%- set attribute = attribute if attribute else (attrs | first) %} -{%- if conventions != 'attribute' %} -{%- if not attribute.deprecated %} -pub use crate::attribute::{{ attribute.fqn_const_name }}; -{%- endif %} -{%- else %} -/// {% filter escape %}{{attribute.brief | to_doc_brief}}.{% endfilter %} -{%- if attribute.note %} -/// -{%- for line in attribute.note.split('\n') %} -/// {% filter escape %}{{line}}{% endfilter %} -{%- endfor %} -{%- endif %} -{%- if attribute.examples %} -/// -/// # Examples -/// -{%- for example in attribute.examples %} -/// - `{{example}}` -{%- endfor %} -{%- endif %} -{%- if attribute.deprecated %} -#[deprecated] -{%- endif %} -pub const {{ attribute.fqn_const_name }}: &str = "{{attribute.fqn}}"; -{%- endif %} -{%- endfor %} diff --git a/opentelemetry-semantic-conventions/scripts/templates/semantic_metrics.rs.j2 b/opentelemetry-semantic-conventions/scripts/templates/semantic_metrics.rs.j2 deleted file mode 100644 index 703385bf26..0000000000 --- a/opentelemetry-semantic-conventions/scripts/templates/semantic_metrics.rs.j2 +++ /dev/null @@ -1,57 +0,0 @@ -// DO NOT EDIT, this is an auto-generated file -// -// If you want to update the file: -// - Edit the template at scripts{{template}} -// - Run the script at scripts/generate-consts-from-spec.sh - -{% include 'header_metric.rs' %} - -{%- for metric in metrics %} -/// ## Description -/// {% filter escape %}{{ metric.brief | to_doc_brief }}.{% endfilter %} -{%- if metric.note %} -/// -{%- for line in metric.note.split('\n') %} -/// {% filter escape %}{{ line }}{% endfilter %} -{%- endfor %} -{%- endif %} -/// ## Metadata -/// | | | -/// |:-|:- -/// | Instrument: | `{{ metric.instrument }}` | -/// | Unit: | `{{ metric.unit }}` | -/// | Status: | `{{ ((metric.stability | string()).split('.')[1].replace('_', ' ')) | capitalize }}` | -{%- if metric.attributes %} -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -{%- endif %} -{%- for attribute in metric.attributes %} -{%- if attribute.ref %} -{%- set ref = (attributes | selectattr('fqn', 'equalto', attribute.ref) | first) %} -{%- if ref %} -{%- if attribute.requirement_level %} -{%- set req_level = ((attribute.requirement_level | string()).split('.')[1].replace('_', ' ')) | capitalize %} -{%- set req_message = attribute.requirement_level_msg %} -{%- else %} -{%- set req_level = "Unspecified" %} -{%- set req_message = '' %} -{%- endif %} -/// | [`crate::attribute::{{ ref.fqn | to_const_name }}`] | `{{ req_level }}`{{ (': ' + req_message if req_message else '') }} -{%- endif %} -{%- endif %} -{%- endfor %} -{%- if metric.examples %} -/// -/// # Examples -/// -{%- for example in metric.examples %} -/// - `{{ example }}` -{%- endfor %} -{%- endif %} -{%- if (metric.deprecated) %} -#[deprecated] -{%- endif %} -pub const {{ metric.metric_name | to_const_name }}: &str = "{{ metric.metric_name }}"; -{%- endfor %} diff --git a/opentelemetry-semantic-conventions/src/attribute.rs b/opentelemetry-semantic-conventions/src/attribute.rs index fedcbed50b..91b70da95a 100644 --- a/opentelemetry-semantic-conventions/src/attribute.rs +++ b/opentelemetry-semantic-conventions/src/attribute.rs @@ -1,46 +1,70 @@ // DO NOT EDIT, this is an auto-generated file // // If you want to update the file: -// - Edit the template at scripts/templates/semantic_attributes.rs.j2 +// - Edit the template at scripts/templates/registry/rust/attributes.rs.j2 // - Run the script at scripts/generate-consts-from-spec.sh //! # Semantic Attributes //! //! The entire set of semantic attributes (or [conventions](https://opentelemetry.io/docs/concepts/semantic-conventions/)) defined by the project. The resource, metric, and trace modules reference these attributes. + /// Uniquely identifies the framework API revision offered by a version (`os.version`) of the android operating system. More information can be found [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels). /// +/// ## Notes +/// /// # Examples /// -/// - `33` -/// - `32` +/// - `"33"` +/// - `"32"` +#[cfg(feature = "semconv_experimental")] pub const ANDROID_OS_API_LEVEL: &str = "android.os.api_level"; + /// Deprecated use the `device.app.lifecycle` event definition including `android.state` as a payload field instead. /// -/// The Android lifecycle states are defined in [Activity lifecycle callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), and from which the `OS identifiers` are derived. +/// ## Notes +/// +/// The Android lifecycle states are defined in [Activity lifecycle callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), and from which the `OS identifiers` are derived +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `device.app.lifecycle`.")] pub const ANDROID_STATE: &str = "android.state"; + /// The provenance filename of the built attestation which directly relates to the build artifact filename. This filename SHOULD accompany the artifact at publish time. See the [SLSA Relationship](https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations) specification for more information. /// +/// ## Notes +/// /// # Examples /// -/// - `golang-binary-amd64-v0.1.0.attestation` -/// - `docker-image-amd64-v0.1.0.intoto.json1` -/// - `release-1.tar.gz.attestation` -/// - `file-name-package.tar.gz.intoto.json1` +/// - `"golang-binary-amd64-v0.1.0.attestation"` +/// - `"docker-image-amd64-v0.1.0.intoto.json1"` +/// - `"release-1.tar.gz.attestation"` +/// - `"file-name-package.tar.gz.intoto.json1"` +#[cfg(feature = "semconv_experimental")] pub const ARTIFACT_ATTESTATION_FILENAME: &str = "artifact.attestation.filename"; + /// The full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), of the built attestation. Some envelopes in the software attestation space also refer to this as the [digest](https://github.com/in-toto/attestation/blob/main/spec/README.md#in-toto-attestation-framework-spec). /// +/// ## Notes +/// /// # Examples /// -/// - `1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408` +/// - `"1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408"` +#[cfg(feature = "semconv_experimental")] pub const ARTIFACT_ATTESTATION_HASH: &str = "artifact.attestation.hash"; + /// The id of the build [software attestation](https://slsa.dev/attestation-model). /// +/// ## Notes +/// /// # Examples /// -/// - `123` +/// - `"123"` +#[cfg(feature = "semconv_experimental")] pub const ARTIFACT_ATTESTATION_ID: &str = "artifact.attestation.id"; + /// The human readable file name of the artifact, typically generated during build and release processes. Often includes the package name and version in the file name. /// +/// ## Notes +/// /// This file name can also act as the [Package Name](https://slsa.dev/spec/v1.0/terminology#package-model) /// in cases where the package ecosystem maps accordingly. /// Additionally, the artifact [can be published](https://slsa.dev/spec/v1.0/terminology#software-supply-chain) @@ -48,13 +72,17 @@ pub const ARTIFACT_ATTESTATION_ID: &str = "artifact.attestation.id"; /// /// # Examples /// -/// - `golang-binary-amd64-v0.1.0` -/// - `docker-image-amd64-v0.1.0` -/// - `release-1.tar.gz` -/// - `file-name-package.tar.gz` +/// - `"golang-binary-amd64-v0.1.0"` +/// - `"docker-image-amd64-v0.1.0"` +/// - `"release-1.tar.gz"` +/// - `"file-name-package.tar.gz"` +#[cfg(feature = "semconv_experimental")] pub const ARTIFACT_FILENAME: &str = "artifact.filename"; + /// The full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), often found in checksum.txt on a release of the artifact and used to verify package integrity. /// +/// ## Notes +/// /// The specific algorithm used to create the cryptographic hash value is /// not defined. In situations where an artifact has multiple /// cryptographic hashes, it is up to the implementer to choose which @@ -66,307 +94,510 @@ pub const ARTIFACT_FILENAME: &str = "artifact.filename"; /// /// # Examples /// -/// - `9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9` +/// - `"9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9"` +#[cfg(feature = "semconv_experimental")] pub const ARTIFACT_HASH: &str = "artifact.hash"; + /// The [Package URL](https://github.com/package-url/purl-spec) of the [package artifact](https://slsa.dev/spec/v1.0/terminology#package-model) provides a standard way to identify and locate the packaged artifact. /// +/// ## Notes +/// /// # Examples /// -/// - `pkg:github/package-url/purl-spec@1209109710924` -/// - `pkg:npm/foo@12.12.3` +/// - `"pkg:github/package-url/purl-spec@1209109710924"` +/// - `"pkg:npm/foo@12.12.3"` +#[cfg(feature = "semconv_experimental")] pub const ARTIFACT_PURL: &str = "artifact.purl"; + /// The version of the artifact. /// +/// ## Notes +/// /// # Examples /// -/// - `v0.1.0` -/// - `1.2.1` -/// - `122691-build` +/// - `"v0.1.0"` +/// - `"1.2.1"` +/// - `"122691-build"` +#[cfg(feature = "semconv_experimental")] pub const ARTIFACT_VERSION: &str = "artifact.version"; -/// ASP.NET Core exception middleware handling result. + +/// ASP.NET Core exception middleware handling result +/// +/// ## Notes /// /// # Examples /// -/// - `handled` -/// - `unhandled` +/// - `"handled"` +/// - `"unhandled"` pub const ASPNETCORE_DIAGNOSTICS_EXCEPTION_RESULT: &str = "aspnetcore.diagnostics.exception.result"; + /// Full type name of the [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) implementation that handled the exception. /// +/// ## Notes +/// /// # Examples /// -/// - `Contoso.MyHandler` +/// - `"Contoso.MyHandler"` pub const ASPNETCORE_DIAGNOSTICS_HANDLER_TYPE: &str = "aspnetcore.diagnostics.handler.type"; + /// Rate limiting policy name. /// +/// ## Notes +/// /// # Examples /// -/// - `fixed` -/// - `sliding` -/// - `token` +/// - `"fixed"` +/// - `"sliding"` +/// - `"token"` pub const ASPNETCORE_RATE_LIMITING_POLICY: &str = "aspnetcore.rate_limiting.policy"; -/// Rate-limiting result, shows whether the lease was acquired or contains a rejection reason. + +/// Rate-limiting result, shows whether the lease was acquired or contains a rejection reason +/// +/// ## Notes /// /// # Examples /// -/// - `acquired` -/// - `request_canceled` +/// - `"acquired"` +/// - `"request_canceled"` pub const ASPNETCORE_RATE_LIMITING_RESULT: &str = "aspnetcore.rate_limiting.result"; + /// Flag indicating if request was handled by the application pipeline. /// +/// ## Notes +/// /// # Examples /// -/// - `True` +/// - `true` pub const ASPNETCORE_REQUEST_IS_UNHANDLED: &str = "aspnetcore.request.is_unhandled"; + /// A value that indicates whether the matched route is a fallback route. /// +/// ## Notes +/// /// # Examples /// -/// - `True` +/// - `true` pub const ASPNETCORE_ROUTING_IS_FALLBACK: &str = "aspnetcore.routing.is_fallback"; -/// Match result - success or failure. + +/// Match result - success or failure +/// +/// ## Notes /// /// # Examples /// -/// - `success` -/// - `failure` +/// - `"success"` +/// - `"failure"` pub const ASPNETCORE_ROUTING_MATCH_STATUS: &str = "aspnetcore.routing.match_status"; + /// The JSON-serialized value of each item in the `AttributeDefinitions` request field. /// +/// ## Notes +/// /// # Examples /// -/// - `{ "AttributeName": "string", "AttributeType": "string" }` +/// - `[ +/// "{ \"AttributeName\": \"string\", \"AttributeType\": \"string\" }", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS: &str = "aws.dynamodb.attribute_definitions"; + /// The value of the `AttributesToGet` request parameter. /// +/// ## Notes +/// /// # Examples /// -/// - `lives` -/// - `id` +/// - `[ +/// "lives", +/// "id", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_ATTRIBUTES_TO_GET: &str = "aws.dynamodb.attributes_to_get"; + /// The value of the `ConsistentRead` request parameter. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_CONSISTENT_READ: &str = "aws.dynamodb.consistent_read"; + /// The JSON-serialized value of each item in the `ConsumedCapacity` response field. /// +/// ## Notes +/// /// # Examples /// -/// - `{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": "string", "WriteCapacityUnits": number }` +/// - `[ +/// "{ \"CapacityUnits\": number, \"GlobalSecondaryIndexes\": { \"string\" : { \"CapacityUnits\": number, \"ReadCapacityUnits\": number, \"WriteCapacityUnits\": number } }, \"LocalSecondaryIndexes\": { \"string\" : { \"CapacityUnits\": number, \"ReadCapacityUnits\": number, \"WriteCapacityUnits\": number } }, \"ReadCapacityUnits\": number, \"Table\": { \"CapacityUnits\": number, \"ReadCapacityUnits\": number, \"WriteCapacityUnits\": number }, \"TableName\": \"string\", \"WriteCapacityUnits\": number }", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_CONSUMED_CAPACITY: &str = "aws.dynamodb.consumed_capacity"; + /// The value of the `Count` response parameter. /// +/// ## Notes +/// /// # Examples /// /// - `10` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_COUNT: &str = "aws.dynamodb.count"; + /// The value of the `ExclusiveStartTableName` request parameter. /// +/// ## Notes +/// /// # Examples /// -/// - `Users` -/// - `CatsTable` +/// - `"Users"` +/// - `"CatsTable"` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_EXCLUSIVE_START_TABLE: &str = "aws.dynamodb.exclusive_start_table"; + /// The JSON-serialized value of each item in the `GlobalSecondaryIndexUpdates` request field. /// +/// ## Notes +/// /// # Examples /// -/// - `{ "Create": { "IndexName": "string", "KeySchema": [ { "AttributeName": "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": number } }` +/// - `[ +/// "{ \"Create\": { \"IndexName\": \"string\", \"KeySchema\": [ { \"AttributeName\": \"string\", \"KeyType\": \"string\" } ], \"Projection\": { \"NonKeyAttributes\": [ \"string\" ], \"ProjectionType\": \"string\" }, \"ProvisionedThroughput\": { \"ReadCapacityUnits\": number, \"WriteCapacityUnits\": number } }", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES: &str = "aws.dynamodb.global_secondary_index_updates"; -/// The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field. + +/// The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request field +/// +/// ## Notes /// /// # Examples /// -/// - `{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": number } }` +/// - `[ +/// "{ \"IndexName\": \"string\", \"KeySchema\": [ { \"AttributeName\": \"string\", \"KeyType\": \"string\" } ], \"Projection\": { \"NonKeyAttributes\": [ \"string\" ], \"ProjectionType\": \"string\" }, \"ProvisionedThroughput\": { \"ReadCapacityUnits\": number, \"WriteCapacityUnits\": number } }", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES: &str = "aws.dynamodb.global_secondary_indexes"; + /// The value of the `IndexName` request parameter. /// +/// ## Notes +/// /// # Examples /// -/// - `name_to_group` +/// - `"name_to_group"` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_INDEX_NAME: &str = "aws.dynamodb.index_name"; + /// The JSON-serialized value of the `ItemCollectionMetrics` response field. /// +/// ## Notes +/// /// # Examples /// -/// - `{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }` +/// - `"{ \"string\" : [ { \"ItemCollectionKey\": { \"string\" : { \"B\": blob, \"BOOL\": boolean, \"BS\": [ blob ], \"L\": [ \"AttributeValue\" ], \"M\": { \"string\" : \"AttributeValue\" }, \"N\": \"string\", \"NS\": [ \"string\" ], \"NULL\": boolean, \"S\": \"string\", \"SS\": [ \"string\" ] } }, \"SizeEstimateRangeGB\": [ number ] } ] }"` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_ITEM_COLLECTION_METRICS: &str = "aws.dynamodb.item_collection_metrics"; + /// The value of the `Limit` request parameter. /// +/// ## Notes +/// /// # Examples /// /// - `10` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_LIMIT: &str = "aws.dynamodb.limit"; + /// The JSON-serialized value of each item of the `LocalSecondaryIndexes` request field. /// +/// ## Notes +/// /// # Examples /// -/// - `{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }` +/// - `[ +/// "{ \"IndexArn\": \"string\", \"IndexName\": \"string\", \"IndexSizeBytes\": number, \"ItemCount\": number, \"KeySchema\": [ { \"AttributeName\": \"string\", \"KeyType\": \"string\" } ], \"Projection\": { \"NonKeyAttributes\": [ \"string\" ], \"ProjectionType\": \"string\" } }", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES: &str = "aws.dynamodb.local_secondary_indexes"; + /// The value of the `ProjectionExpression` request parameter. /// +/// ## Notes +/// /// # Examples /// -/// - `Title` -/// - `Title, Price, Color` -/// - `Title, Description, RelatedItems, ProductReviews` +/// - `"Title"` +/// - `"Title, Price, Color"` +/// - `"Title, Description, RelatedItems, ProductReviews"` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_PROJECTION: &str = "aws.dynamodb.projection"; + /// The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. /// +/// ## Notes +/// /// # Examples /// /// - `1.0` /// - `2.0` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_PROVISIONED_READ_CAPACITY: &str = "aws.dynamodb.provisioned_read_capacity"; + /// The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. /// +/// ## Notes +/// /// # Examples /// /// - `1.0` /// - `2.0` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY: &str = "aws.dynamodb.provisioned_write_capacity"; + /// The value of the `ScanIndexForward` request parameter. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_SCAN_FORWARD: &str = "aws.dynamodb.scan_forward"; + /// The value of the `ScannedCount` response parameter. /// +/// ## Notes +/// /// # Examples /// /// - `50` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_SCANNED_COUNT: &str = "aws.dynamodb.scanned_count"; + /// The value of the `Segment` request parameter. /// +/// ## Notes +/// /// # Examples /// /// - `10` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_SEGMENT: &str = "aws.dynamodb.segment"; + /// The value of the `Select` request parameter. /// +/// ## Notes +/// /// # Examples /// -/// - `ALL_ATTRIBUTES` -/// - `COUNT` +/// - `"ALL_ATTRIBUTES"` +/// - `"COUNT"` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_SELECT: &str = "aws.dynamodb.select"; + /// The number of items in the `TableNames` response parameter. /// +/// ## Notes +/// /// # Examples /// /// - `20` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_TABLE_COUNT: &str = "aws.dynamodb.table_count"; + /// The keys in the `RequestItems` object field. /// +/// ## Notes +/// /// # Examples /// -/// - `Users` -/// - `Cats` +/// - `[ +/// "Users", +/// "Cats", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_TABLE_NAMES: &str = "aws.dynamodb.table_names"; + /// The value of the `TotalSegments` request parameter. /// +/// ## Notes +/// /// # Examples /// /// - `100` +#[cfg(feature = "semconv_experimental")] pub const AWS_DYNAMODB_TOTAL_SEGMENTS: &str = "aws.dynamodb.total_segments"; + /// The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). /// +/// ## Notes +/// /// # Examples /// -/// - `arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster` +/// - `"arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"` +#[cfg(feature = "semconv_experimental")] pub const AWS_ECS_CLUSTER_ARN: &str = "aws.ecs.cluster.arn"; + /// The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). /// +/// ## Notes +/// /// # Examples /// -/// - `arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9` +/// - `"arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9"` +#[cfg(feature = "semconv_experimental")] pub const AWS_ECS_CONTAINER_ARN: &str = "aws.ecs.container.arn"; + /// The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const AWS_ECS_LAUNCHTYPE: &str = "aws.ecs.launchtype"; + /// The ARN of a running [ECS task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). /// +/// ## Notes +/// /// # Examples /// -/// - `arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b` -/// - `arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd` +/// - `"arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b"` +/// - `"arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"` +#[cfg(feature = "semconv_experimental")] pub const AWS_ECS_TASK_ARN: &str = "aws.ecs.task.arn"; + /// The family name of the [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) used to create the ECS task. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry-family` +/// - `"opentelemetry-family"` +#[cfg(feature = "semconv_experimental")] pub const AWS_ECS_TASK_FAMILY: &str = "aws.ecs.task.family"; + /// The ID of a running ECS task. The ID MUST be extracted from `task.arn`. /// +/// ## Notes +/// /// # Examples /// -/// - `10838bed-421f-43ef-870a-f43feacbbb5b` -/// - `23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd` +/// - `"10838bed-421f-43ef-870a-f43feacbbb5b"` +/// - `"23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"` +#[cfg(feature = "semconv_experimental")] pub const AWS_ECS_TASK_ID: &str = "aws.ecs.task.id"; + /// The revision for the task definition used to create the ECS task. /// +/// ## Notes +/// /// # Examples /// -/// - `8` -/// - `26` +/// - `"8"` +/// - `"26"` +#[cfg(feature = "semconv_experimental")] pub const AWS_ECS_TASK_REVISION: &str = "aws.ecs.task.revision"; + /// The ARN of an EKS cluster. /// +/// ## Notes +/// /// # Examples /// -/// - `arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster` +/// - `"arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"` +#[cfg(feature = "semconv_experimental")] pub const AWS_EKS_CLUSTER_ARN: &str = "aws.eks.cluster.arn"; + /// The full invoked ARN as provided on the `Context` passed to the function (`Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` applicable). /// +/// ## Notes +/// /// This may be different from `cloud.resource_id` if an alias is involved. /// /// # Examples /// -/// - `arn:aws:lambda:us-east-1:123456:function:myfunction:myalias` +/// - `"arn:aws:lambda:us-east-1:123456:function:myfunction:myalias"` +#[cfg(feature = "semconv_experimental")] pub const AWS_LAMBDA_INVOKED_ARN: &str = "aws.lambda.invoked_arn"; + /// The Amazon Resource Name(s) (ARN) of the AWS log group(s). /// +/// ## Notes +/// /// See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). /// /// # Examples /// -/// - `arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*` +/// - `[ +/// "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_LOG_GROUP_ARNS: &str = "aws.log.group.arns"; + /// The name(s) of the AWS log group(s) an application is writing to. /// +/// ## Notes +/// /// Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group. /// /// # Examples /// -/// - `/aws/lambda/my-function` -/// - `opentelemetry-service` +/// - `[ +/// "/aws/lambda/my-function", +/// "opentelemetry-service", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_LOG_GROUP_NAMES: &str = "aws.log.group.names"; + /// The ARN(s) of the AWS log stream(s). /// +/// ## Notes +/// /// See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream. /// /// # Examples /// -/// - `arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b` +/// - `[ +/// "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_LOG_STREAM_ARNS: &str = "aws.log.stream.arns"; + /// The name(s) of the AWS log stream(s) an application is writing to. /// +/// ## Notes +/// /// # Examples /// -/// - `logs/main/10838bed-421f-43ef-870a-f43feacbbb5b` +/// - `[ +/// "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const AWS_LOG_STREAM_NAMES: &str = "aws.log.stream.names"; + /// The AWS request ID as returned in the response headers `x-amz-request-id` or `x-amz-requestid`. /// +/// ## Notes +/// /// # Examples /// -/// - `79b9da39-b7ae-508a-a6bc-864b2829c622` -/// - `C9ER4AJX75574TDJ` +/// - `"79b9da39-b7ae-508a-a6bc-864b2829c622"` +/// - `"C9ER4AJX75574TDJ"` +#[cfg(feature = "semconv_experimental")] pub const AWS_REQUEST_ID: &str = "aws.request_id"; + /// The S3 bucket name the request refers to. Corresponds to the `--bucket` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. /// +/// ## Notes +/// /// The `bucket` attribute is applicable to all S3 operations that reference a bucket, i.e. that require the bucket name as a mandatory parameter. /// This applies to almost all S3 operations except `list-buckets`. /// /// # Examples /// -/// - `some-bucket-name` +/// - `"some-bucket-name"` +#[cfg(feature = "semconv_experimental")] pub const AWS_S3_BUCKET: &str = "aws.s3.bucket"; + /// The source object (in the form `bucket`/`key`) for the copy operation. /// +/// ## Notes +/// /// The `copy_source` attribute applies to S3 copy operations and corresponds to the `--copy-source` parameter /// of the [copy-object operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). /// This applies in particular to the following operations: @@ -376,20 +607,28 @@ pub const AWS_S3_BUCKET: &str = "aws.s3.bucket"; /// /// # Examples /// -/// - `someFile.yml` +/// - `"someFile.yml"` +#[cfg(feature = "semconv_experimental")] pub const AWS_S3_COPY_SOURCE: &str = "aws.s3.copy_source"; + /// The delete request container that specifies the objects to be deleted. /// +/// ## Notes +/// /// The `delete` attribute is only applicable to the [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) operation. /// The `delete` attribute corresponds to the `--delete` parameter of the /// [delete-objects operation within the S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). /// /// # Examples /// -/// - `Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean` +/// - `"Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean"` +#[cfg(feature = "semconv_experimental")] pub const AWS_S3_DELETE: &str = "aws.s3.delete"; + /// The S3 object key the request refers to. Corresponds to the `--key` parameter of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) operations. /// +/// ## Notes +/// /// The `key` attribute is applicable to all object-related S3 operations, i.e. that require the object key as a mandatory parameter. /// This applies in particular to the following operations: /// @@ -409,10 +648,14 @@ pub const AWS_S3_DELETE: &str = "aws.s3.delete"; /// /// # Examples /// -/// - `someFile.yml` +/// - `"someFile.yml"` +#[cfg(feature = "semconv_experimental")] pub const AWS_S3_KEY: &str = "aws.s3.key"; + /// The part number of the part being uploaded in a multipart-upload operation. This is a positive integer between 1 and 10,000. /// +/// ## Notes +/// /// The `part_number` attribute is only applicable to the [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) /// and [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) operations. /// The `part_number` attribute corresponds to the `--part-number` parameter of the @@ -421,9 +664,13 @@ pub const AWS_S3_KEY: &str = "aws.s3.key"; /// # Examples /// /// - `3456` +#[cfg(feature = "semconv_experimental")] pub const AWS_S3_PART_NUMBER: &str = "aws.s3.part_number"; + /// Upload ID that identifies the multipart upload. /// +/// ## Notes +/// /// The `upload_id` attribute applies to S3 multipart-upload operations and corresponds to the `--upload-id` parameter /// of the [S3 API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) multipart operations. /// This applies in particular to the following operations: @@ -436,143 +683,231 @@ pub const AWS_S3_PART_NUMBER: &str = "aws.s3.part_number"; /// /// # Examples /// -/// - `dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ` +/// - `"dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ"` +#[cfg(feature = "semconv_experimental")] pub const AWS_S3_UPLOAD_ID: &str = "aws.s3.upload_id"; -/// The unique identifier of the service request. It's generated by the Azure service and returned with the response. + +/// [Azure Resource Provider Namespace](https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers) as recognized by the client. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Microsoft.Storage"` +/// - `"Microsoft.KeyVault"` +/// - `"Microsoft.ServiceBus"` +#[cfg(feature = "semconv_experimental")] +pub const AZ_NAMESPACE: &str = "az.namespace"; + +/// The unique identifier of the service request. It's generated by the Azure service and returned with the response. +/// +/// ## Notes /// /// # Examples /// -/// - `00000000-0000-0000-0000-000000000000` +/// - `"00000000-0000-0000-0000-000000000000"` +#[cfg(feature = "semconv_experimental")] pub const AZ_SERVICE_REQUEST_ID: &str = "az.service_request_id"; -/// Array of brand name and version separated by a space. + +/// Array of brand name and version separated by a space +/// +/// ## Notes /// /// This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.brands`). /// /// # Examples /// -/// - ` Not A;Brand 99` -/// - `Chromium 99` -/// - `Chrome 99` +/// - `[ +/// " Not A;Brand 99", +/// "Chromium 99", +/// "Chrome 99", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const BROWSER_BRANDS: &str = "browser.brands"; -/// Preferred language of the user using the browser. + +/// Preferred language of the user using the browser +/// +/// ## Notes /// /// This value is intended to be taken from the Navigator API `navigator.language`. /// /// # Examples /// -/// - `en` -/// - `en-US` -/// - `fr` -/// - `fr-FR` +/// - `"en"` +/// - `"en-US"` +/// - `"fr"` +/// - `"fr-FR"` +#[cfg(feature = "semconv_experimental")] pub const BROWSER_LANGUAGE: &str = "browser.language"; -/// A boolean that is true if the browser is running on a mobile device. + +/// A boolean that is true if the browser is running on a mobile device +/// +/// ## Notes /// -/// This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be left unset. +/// This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be left unset +#[cfg(feature = "semconv_experimental")] pub const BROWSER_MOBILE: &str = "browser.mobile"; -/// The platform on which the browser is running. + +/// The platform on which the browser is running +/// +/// ## Notes /// /// This value is intended to be taken from the [UA client hints API](https://wicg.github.io/ua-client-hints/#interface) (`navigator.userAgentData.platform`). If unavailable, the legacy `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD be left unset in order for the values to be consistent. /// The list of possible values is defined in the [W3C User-Agent Client Hints specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). Note that some (but not all) of these values can overlap with values in the [`os.type` and `os.name` attributes](./os.md). However, for consistency, the values in the `browser.platform` attribute should capture the exact value that the user agent provides. /// /// # Examples /// -/// - `Windows` -/// - `macOS` -/// - `Android` +/// - `"Windows"` +/// - `"macOS"` +/// - `"Android"` +#[cfg(feature = "semconv_experimental")] pub const BROWSER_PLATFORM: &str = "browser.platform"; + /// The human readable name of the pipeline within a CI/CD system. /// +/// ## Notes +/// /// # Examples /// -/// - `Build and Test` -/// - `Lint` -/// - `Deploy Go Project` -/// - `deploy_to_environment` +/// - `"Build and Test"` +/// - `"Lint"` +/// - `"Deploy Go Project"` +/// - `"deploy_to_environment"` +#[cfg(feature = "semconv_experimental")] pub const CICD_PIPELINE_NAME: &str = "cicd.pipeline.name"; + /// The unique identifier of a pipeline run within a CI/CD system. /// +/// ## Notes +/// /// # Examples /// -/// - `120912` +/// - `"120912"` +#[cfg(feature = "semconv_experimental")] pub const CICD_PIPELINE_RUN_ID: &str = "cicd.pipeline.run.id"; -/// The human readable name of a task within a pipeline. Task here most closely aligns with a [computing process](https://en.wikipedia.org/wiki/Pipeline_(computing)) in a pipeline. Other terms for tasks include commands, steps, and procedures. + +/// The human readable name of a task within a pipeline. Task here most closely aligns with a [computing process](https://wikipedia.org/wiki/Pipeline_(computing)) in a pipeline. Other terms for tasks include commands, steps, and procedures. +/// +/// ## Notes /// /// # Examples /// -/// - `Run GoLang Linter` -/// - `Go Build` -/// - `go-test` -/// - `deploy_binary` +/// - `"Run GoLang Linter"` +/// - `"Go Build"` +/// - `"go-test"` +/// - `"deploy_binary"` +#[cfg(feature = "semconv_experimental")] pub const CICD_PIPELINE_TASK_NAME: &str = "cicd.pipeline.task.name"; + /// The unique identifier of a task run within a pipeline. /// +/// ## Notes +/// /// # Examples /// -/// - `12097` +/// - `"12097"` +#[cfg(feature = "semconv_experimental")] pub const CICD_PIPELINE_TASK_RUN_ID: &str = "cicd.pipeline.task.run.id"; -/// The [URL](https://en.wikipedia.org/wiki/URL) of the pipeline run providing the complete address in order to locate and identify the pipeline run. + +/// The [URL](https://wikipedia.org/wiki/URL) of the pipeline run providing the complete address in order to locate and identify the pipeline run. +/// +/// ## Notes /// /// # Examples /// -/// - `https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075` +/// - `"https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075"` +#[cfg(feature = "semconv_experimental")] pub const CICD_PIPELINE_TASK_RUN_URL_FULL: &str = "cicd.pipeline.task.run.url.full"; + /// The type of the task within a pipeline. /// +/// ## Notes +/// /// # Examples /// -/// - `build` -/// - `test` -/// - `deploy` +/// - `"build"` +/// - `"test"` +/// - `"deploy"` +#[cfg(feature = "semconv_experimental")] pub const CICD_PIPELINE_TASK_TYPE: &str = "cicd.pipeline.task.type"; + /// Client address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. /// -/// When observed from the server side, and when communicating through an intermediary, `client.address` SHOULD represent the client address behind any intermediaries, for example proxies, if it's available. +/// ## Notes +/// +/// When observed from the server side, and when communicating through an intermediary, `client.address` SHOULD represent the client address behind any intermediaries, for example proxies, if it's available. /// /// # Examples /// -/// - `client.example.com` -/// - `10.1.2.80` -/// - `/tmp/my.sock` +/// - `"client.example.com"` +/// - `"10.1.2.80"` +/// - `"/tmp/my.sock"` pub const CLIENT_ADDRESS: &str = "client.address"; + /// Client port number. /// -/// When observed from the server side, and when communicating through an intermediary, `client.port` SHOULD represent the client port behind any intermediaries, for example proxies, if it's available. +/// ## Notes +/// +/// When observed from the server side, and when communicating through an intermediary, `client.port` SHOULD represent the client port behind any intermediaries, for example proxies, if it's available. /// /// # Examples /// /// - `65123` pub const CLIENT_PORT: &str = "client.port"; + /// The cloud account ID the resource is assigned to. /// +/// ## Notes +/// /// # Examples /// -/// - `111111111111` -/// - `opentelemetry` +/// - `"111111111111"` +/// - `"opentelemetry"` +#[cfg(feature = "semconv_experimental")] pub const CLOUD_ACCOUNT_ID: &str = "cloud.account.id"; + /// Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running. /// -/// Availability zones are called "zones" on Alibaba Cloud and Google Cloud. +/// ## Notes +/// +/// Availability zones are called "zones" on Alibaba Cloud and Google Cloud. /// /// # Examples /// -/// - `us-east-1c` +/// - `"us-east-1c"` +#[cfg(feature = "semconv_experimental")] pub const CLOUD_AVAILABILITY_ZONE: &str = "cloud.availability_zone"; + /// The cloud platform in use. /// -/// The prefix of the service SHOULD match the one specified in `cloud.provider`. +/// ## Notes +/// +/// The prefix of the service SHOULD match the one specified in `cloud.provider` +#[cfg(feature = "semconv_experimental")] pub const CLOUD_PLATFORM: &str = "cloud.platform"; + /// Name of the cloud provider. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const CLOUD_PROVIDER: &str = "cloud.provider"; + /// The geographical region the resource is running. /// -/// Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://www.tencentcloud.com/document/product/213/6091). +/// ## Notes +/// +/// Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://www.tencentcloud.com/document/product/213/6091). /// /// # Examples /// -/// - `us-central1` -/// - `us-east-1` +/// - `"us-central1"` +/// - `"us-east-1"` +#[cfg(feature = "semconv_experimental")] pub const CLOUD_REGION: &str = "cloud.region"; -/// The [Fully Qualified Azure Resource ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) the log is emitted for. + +/// Cloud provider-specific native identifier of the monitored cloud resource (e.g. an [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) on AWS, a [fully qualified resource ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on Azure, a [full resource name](https://cloud.google.com/apis/design/resource_names#full_resource_name) on GCP) +/// +/// ## Notes /// /// On some cloud providers, it may not be possible to determine the full ID at startup, /// so it may be necessary to set `cloud.resource_id` as a span attribute instead. @@ -580,595 +915,1303 @@ pub const CLOUD_REGION: &str = "cloud.region"; /// The exact value to use for `cloud.resource_id` depends on the cloud provider. /// The following well-known definitions MUST be used if you set this attribute and they apply: /// -/// * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). -/// Take care not to use the "invoked ARN" directly but replace any +/// - **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). +/// Take care not to use the "invoked ARN" directly but replace any /// [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) /// with the resolved function version, as the same runtime instance may be invocable with /// multiple different aliases. -/// * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) -/// * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) of the invoked function, +/// - **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names) +/// - **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) of the invoked function, /// *not* the function app, having the form -/// `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`. +/// `/subscriptions/[SUBSCRIPTION_GUID]/resourceGroups/[RG]/providers/Microsoft.Web/sites/[FUNCAPP]/functions/[FUNC]`. /// This means that a span attribute MUST be used, as an Azure function app can host multiple functions that would usually share /// a TracerProvider. /// /// # Examples /// -/// - `arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function` -/// - `//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID` -/// - `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` +/// - `"arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function"` +/// - `"//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID"` +/// - `"/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/"` +#[cfg(feature = "semconv_experimental")] pub const CLOUD_RESOURCE_ID: &str = "cloud.resource_id"; + /// The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) uniquely identifies the event. /// +/// ## Notes +/// /// # Examples /// -/// - `123e4567-e89b-12d3-a456-426614174000` -/// - `0001` +/// - `"123e4567-e89b-12d3-a456-426614174000"` +/// - `"0001"` +#[cfg(feature = "semconv_experimental")] pub const CLOUDEVENTS_EVENT_ID: &str = "cloudevents.event_id"; + /// The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) identifies the context in which an event happened. /// +/// ## Notes +/// /// # Examples /// -/// - `https://github.com/cloudevents` -/// - `/cloudevents/spec/pull/123` -/// - `my-service` +/// - `"https://github.com/cloudevents"` +/// - `"/cloudevents/spec/pull/123"` +/// - `"my-service"` +#[cfg(feature = "semconv_experimental")] pub const CLOUDEVENTS_EVENT_SOURCE: &str = "cloudevents.event_source"; + /// The [version of the CloudEvents specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. /// +/// ## Notes +/// /// # Examples /// -/// - `1.0` +/// - `"1.0"` +#[cfg(feature = "semconv_experimental")] pub const CLOUDEVENTS_EVENT_SPEC_VERSION: &str = "cloudevents.event_spec_version"; + /// The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) of the event in the context of the event producer (identified by source). /// +/// ## Notes +/// /// # Examples /// -/// - `mynewfile.jpg` +/// - `"mynewfile.jpg"` +#[cfg(feature = "semconv_experimental")] pub const CLOUDEVENTS_EVENT_SUBJECT: &str = "cloudevents.event_subject"; + /// The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) contains a value describing the type of event related to the originating occurrence. /// +/// ## Notes +/// /// # Examples /// -/// - `com.github.pull_request.opened` -/// - `com.example.object.deleted.v2` +/// - `"com.github.pull_request.opened"` +/// - `"com.example.object.deleted.v2"` +#[cfg(feature = "semconv_experimental")] pub const CLOUDEVENTS_EVENT_TYPE: &str = "cloudevents.event_type"; -/// The column number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. + +/// The guid of the application. /// -/// # Examples +/// ## Notes /// -/// - `16` -pub const CODE_COLUMN: &str = "code.column"; -/// The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path). +/// Application instrumentation should use the value from environment +/// variable `VCAP_APPLICATION.application_id`. This is the same value as +/// reported by `cf app [app-name] --guid`. /// /// # Examples /// -/// - `/usr/local/MyApplication/content_root/app/index.php` -pub const CODE_FILEPATH: &str = "code.filepath"; -/// The method or function name, or equivalent (usually rightmost part of the code unit's name). +/// - `"218fc5a9-a5f1-4b54-aa05-46717d0ab26d"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_APP_ID: &str = "cloudfoundry.app.id"; + +/// The index of the application instance. 0 when just one instance is active. /// -/// # Examples +/// ## Notes /// -/// - `serveRequest` -pub const CODE_FUNCTION: &str = "code.function"; -/// The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. +/// CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope](https://github.com/cloudfoundry/loggregator-api#v2-envelope). +/// It is used for logs and metrics emitted by CloudFoundry. It is +/// supposed to contain the application instance index for applications +/// deployed on the runtime. +/// +/// Application instrumentation should use the value from environment +/// variable `CF_INSTANCE_INDEX`. /// /// # Examples /// -/// - `42` -pub const CODE_LINENO: &str = "code.lineno"; -/// The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit. +/// - `"0"` +/// - `"1"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_APP_INSTANCE_ID: &str = "cloudfoundry.app.instance.id"; + +/// The name of the application. /// -/// # Examples +/// ## Notes /// -/// - `com.example.MyHttpService` -pub const CODE_NAMESPACE: &str = "code.namespace"; -/// A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG. +/// Application instrumentation should use the value from environment +/// variable `VCAP_APPLICATION.application_name`. This is the same value +/// as reported by `cf apps`. /// /// # Examples /// -/// - `at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at com.example.GenerateTrace.main(GenerateTrace.java:5)` -pub const CODE_STACKTRACE: &str = "code.stacktrace"; -/// The command used to run the container (i.e. the command name). +/// - `"my-app-name"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_APP_NAME: &str = "cloudfoundry.app.name"; + +/// The guid of the CloudFoundry org the application is running in. /// -/// If using embedded credentials or sensitive data, it is recommended to remove them to prevent potential leakage. +/// ## Notes +/// +/// Application instrumentation should use the value from environment +/// variable `VCAP_APPLICATION.org_id`. This is the same value as +/// reported by `cf org [org-name] --guid`. /// /// # Examples /// -/// - `otelcontribcol` -pub const CONTAINER_COMMAND: &str = "container.command"; -/// All the command arguments (including the command/executable itself) run by the container. +/// - `"218fc5a9-a5f1-4b54-aa05-46717d0ab26d"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_ORG_ID: &str = "cloudfoundry.org.id"; + +/// The name of the CloudFoundry organization the app is running in. /// -/// # Examples +/// ## Notes /// -/// - `otelcontribcol, --config, config.yaml` -pub const CONTAINER_COMMAND_ARGS: &str = "container.command_args"; -/// The full command run by the container as a single string representing the full command. +/// Application instrumentation should use the value from environment +/// variable `VCAP_APPLICATION.org_name`. This is the same value as +/// reported by `cf orgs`. /// /// # Examples /// -/// - `otelcontribcol --config config.yaml` -pub const CONTAINER_COMMAND_LINE: &str = "container.command_line"; -/// Deprecated, use `cpu.mode` instead. +/// - `"my-org-name"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_ORG_NAME: &str = "cloudfoundry.org.name"; + +/// The UID identifying the process. /// -/// # Examples +/// ## Notes /// -/// - `user` -/// - `kernel` -#[deprecated] -pub const CONTAINER_CPU_STATE: &str = "container.cpu.state"; -/// Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated. +/// Application instrumentation should use the value from environment +/// variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to +/// `VCAP_APPLICATION.app_id` for applications deployed to the runtime. +/// For system components, this could be the actual PID. /// /// # Examples /// -/// - `a3bf90e006b2` -pub const CONTAINER_ID: &str = "container.id"; -/// Runtime specific image identifier. Usually a hash algorithm followed by a UUID. -/// -/// Docker defines a sha256 of the image id; `container.image.id` corresponds to the `Image` field from the Docker container inspect [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) endpoint. -/// K8s defines a link to the container registry repository with digest `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. -/// The ID is assigned by the container runtime and can vary in different environments. Consider using `oci.manifest.digest` if it is important to identify the same image in different environments/runtimes. +/// - `"218fc5a9-a5f1-4b54-aa05-46717d0ab26d"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_PROCESS_ID: &str = "cloudfoundry.process.id"; + +/// The type of process. /// -/// # Examples +/// ## Notes /// -/// - `sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f` -pub const CONTAINER_IMAGE_ID: &str = "container.image.id"; -/// Name of the image the container was built on. +/// CloudFoundry applications can consist of multiple jobs. Usually the +/// main process will be of type `web`. There can be additional background +/// tasks or side-cars with different process types. /// /// # Examples /// -/// - `gcr.io/opentelemetry/operator` -pub const CONTAINER_IMAGE_NAME: &str = "container.image.name"; -/// Repo digests of the container image as provided by the container runtime. +/// - `"web"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_PROCESS_TYPE: &str = "cloudfoundry.process.type"; + +/// The guid of the CloudFoundry space the application is running in. /// -/// [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) and [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) report those under the `RepoDigests` field. +/// ## Notes +/// +/// Application instrumentation should use the value from environment +/// variable `VCAP_APPLICATION.space_id`. This is the same value as +/// reported by `cf space [space-name] --guid`. /// /// # Examples /// -/// - `example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb` -/// - `internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578` -pub const CONTAINER_IMAGE_REPO_DIGESTS: &str = "container.image.repo_digests"; -/// Container image tags. An example can be found in [Docker Image Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). Should be only the `<tag>` section of the full name for example from `registry.example.com/my-org/my-image:<tag>`. +/// - `"218fc5a9-a5f1-4b54-aa05-46717d0ab26d"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_SPACE_ID: &str = "cloudfoundry.space.id"; + +/// The name of the CloudFoundry space the application is running in. /// -/// # Examples +/// ## Notes /// -/// - `v1.27.1` -/// - `3.5.7-0` -pub const CONTAINER_IMAGE_TAGS: &str = "container.image.tags"; -/// Container name used by container runtime. +/// Application instrumentation should use the value from environment +/// variable `VCAP_APPLICATION.space_name`. This is the same value as +/// reported by `cf spaces`. /// /// # Examples /// -/// - `opentelemetry-autoconf` -pub const CONTAINER_NAME: &str = "container.name"; -/// The container runtime managing this container. +/// - `"my-space-name"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_SPACE_NAME: &str = "cloudfoundry.space.name"; + +/// A guid or another name describing the event source. /// -/// # Examples +/// ## Notes /// -/// - `docker` -/// - `containerd` -/// - `rkt` -pub const CONTAINER_RUNTIME: &str = "container.runtime"; -/// The CPU mode for this data point. A container's CPU metric SHOULD be characterized _either_ by data points with no `mode` labels, _or only_ data points with `mode` labels. +/// CloudFoundry defines the `source_id` in the [Loggregator v2 envelope](https://github.com/cloudfoundry/loggregator-api#v2-envelope). +/// It is used for logs and metrics emitted by CloudFoundry. It is +/// supposed to contain the component name, e.g. "gorouter", for +/// CloudFoundry components. /// -/// Following states SHOULD be used: `user`, `system`, `kernel` +/// When system components are instrumented, values from the +/// [Bosh spec](https://bosh.io/docs/jobs/#properties-spec) +/// should be used. The `system.id` should be set to +/// `spec.deployment/spec.name`. /// /// # Examples /// -/// - `user` -/// - `system` -pub const CPU_MODE: &str = "cpu.mode"; -/// The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). -pub const DB_CASSANDRA_CONSISTENCY_LEVEL: &str = "db.cassandra.consistency_level"; -/// The data center of the coordinating node for a query. +/// - `"cf/gorouter"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_SYSTEM_ID: &str = "cloudfoundry.system.id"; + +/// A guid describing the concrete instance of the event source. /// -/// # Examples +/// ## Notes /// -/// - `us-west-2` -pub const DB_CASSANDRA_COORDINATOR_DC: &str = "db.cassandra.coordinator.dc"; -/// The ID of the coordinating node for a query. +/// CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope](https://github.com/cloudfoundry/loggregator-api#v2-envelope). +/// It is used for logs and metrics emitted by CloudFoundry. It is +/// supposed to contain the vm id for CloudFoundry components. +/// +/// When system components are instrumented, values from the +/// [Bosh spec](https://bosh.io/docs/jobs/#properties-spec) +/// should be used. The `system.instance.id` should be set to `spec.id`. /// /// # Examples /// -/// - `be13faa2-8574-4d71-926d-27f16cf8a7af` -pub const DB_CASSANDRA_COORDINATOR_ID: &str = "db.cassandra.coordinator.id"; -/// Whether or not the query is idempotent. -pub const DB_CASSANDRA_IDEMPOTENCE: &str = "db.cassandra.idempotence"; -/// The fetch size used for paging, i.e. how many rows will be returned at once. +/// - `"218fc5a9-a5f1-4b54-aa05-46717d0ab26d"` +#[cfg(feature = "semconv_experimental")] +pub const CLOUDFOUNDRY_SYSTEM_INSTANCE_ID: &str = "cloudfoundry.system.instance.id"; + +/// The column number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. +/// +/// ## Notes /// /// # Examples /// -/// - `5000` -pub const DB_CASSANDRA_PAGE_SIZE: &str = "db.cassandra.page_size"; -/// The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively. +/// - `16` +#[cfg(feature = "semconv_experimental")] +pub const CODE_COLUMN: &str = "code.column"; + +/// The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path). +/// +/// ## Notes /// /// # Examples /// -/// - `0` +/// - `"/usr/local/MyApplication/content_root/app/index.php"` +#[cfg(feature = "semconv_experimental")] +pub const CODE_FILEPATH: &str = "code.filepath"; + +/// The method or function name, or equivalent (usually rightmost part of the code unit's name). +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"serveRequest"` +#[cfg(feature = "semconv_experimental")] +pub const CODE_FUNCTION: &str = "code.function"; + +/// The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `42` +#[cfg(feature = "semconv_experimental")] +pub const CODE_LINENO: &str = "code.lineno"; + +/// The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"com.example.MyHttpService"` +#[cfg(feature = "semconv_experimental")] +pub const CODE_NAMESPACE: &str = "code.namespace"; + +/// A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at com.example.GenerateTrace.main(GenerateTrace.java:5)\n"` +#[cfg(feature = "semconv_experimental")] +pub const CODE_STACKTRACE: &str = "code.stacktrace"; + +/// The command used to run the container (i.e. the command name). +/// +/// ## Notes +/// +/// If using embedded credentials or sensitive data, it is recommended to remove them to prevent potential leakage. +/// +/// # Examples +/// +/// - `"otelcontribcol"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_COMMAND: &str = "container.command"; + +/// All the command arguments (including the command/executable itself) run by the container. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `[ +/// "otelcontribcol", +/// "--config", +/// "config.yaml", +/// ]` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_COMMAND_ARGS: &str = "container.command_args"; + +/// The full command run by the container as a single string representing the full command. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"otelcontribcol --config config.yaml"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_COMMAND_LINE: &str = "container.command_line"; + +/// Deprecated, use `cpu.mode` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"user"` +/// - `"kernel"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `cpu.mode`")] +pub const CONTAINER_CPU_STATE: &str = "container.cpu.state"; + +/// The name of the CSI ([Container Storage Interface](https://github.com/container-storage-interface/spec)) plugin used by the volume. +/// +/// ## Notes +/// +/// This can sometimes be referred to as a "driver" in CSI implementations. This should represent the `name` field of the GetPluginInfo RPC. +/// +/// # Examples +/// +/// - `"pd.csi.storage.gke.io"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_CSI_PLUGIN_NAME: &str = "container.csi.plugin.name"; + +/// The unique volume ID returned by the CSI ([Container Storage Interface](https://github.com/container-storage-interface/spec)) plugin. +/// +/// ## Notes +/// +/// This can sometimes be referred to as a "volume handle" in CSI implementations. This should represent the `Volume.volume_id` field in CSI spec. +/// +/// # Examples +/// +/// - `"projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_CSI_VOLUME_ID: &str = "container.csi.volume.id"; + +/// Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/containers/run/#container-identification). The UUID might be abbreviated. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"a3bf90e006b2"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_ID: &str = "container.id"; + +/// Runtime specific image identifier. Usually a hash algorithm followed by a UUID. +/// +/// ## Notes +/// +/// Docker defines a sha256 of the image id; `container.image.id` corresponds to the `Image` field from the Docker container inspect [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) endpoint. +/// K8s defines a link to the container registry repository with digest `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. +/// The ID is assigned by the container runtime and can vary in different environments. Consider using `oci.manifest.digest` if it is important to identify the same image in different environments/runtimes. +/// +/// # Examples +/// +/// - `"sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_IMAGE_ID: &str = "container.image.id"; + +/// Name of the image the container was built on. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"gcr.io/opentelemetry/operator"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_IMAGE_NAME: &str = "container.image.name"; + +/// Repo digests of the container image as provided by the container runtime. +/// +/// ## Notes +/// +/// [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) and [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) report those under the `RepoDigests` field. +/// +/// # Examples +/// +/// - `[ +/// "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", +/// "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578", +/// ]` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_IMAGE_REPO_DIGESTS: &str = "container.image.repo_digests"; + +/// Container image tags. An example can be found in [Docker Image Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). Should be only the `` section of the full name for example from `registry.example.com/my-org/my-image:`. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `[ +/// "v1.27.1", +/// "3.5.7-0", +/// ]` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_IMAGE_TAGS: &str = "container.image.tags"; + +/// Container labels, `` being the label name, the value being the label value. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"container.label.app=nginx"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_LABEL: &str = "container.label"; + +/// Deprecated, use `container.label` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"container.label.app=nginx"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `container.label`.")] +pub const CONTAINER_LABELS: &str = "container.labels"; + +/// Container name used by container runtime. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"opentelemetry-autoconf"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_NAME: &str = "container.name"; + +/// The container runtime managing this container. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"docker"` +/// - `"containerd"` +/// - `"rkt"` +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_RUNTIME: &str = "container.runtime"; + +/// The mode of the CPU +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"user"` +/// - `"system"` +#[cfg(feature = "semconv_experimental")] +pub const CPU_MODE: &str = "cpu.mode"; + +/// The consistency level of the query. Based on consistency values from [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const DB_CASSANDRA_CONSISTENCY_LEVEL: &str = "db.cassandra.consistency_level"; + +/// The data center of the coordinating node for a query. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"us-west-2"` +#[cfg(feature = "semconv_experimental")] +pub const DB_CASSANDRA_COORDINATOR_DC: &str = "db.cassandra.coordinator.dc"; + +/// The ID of the coordinating node for a query. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"be13faa2-8574-4d71-926d-27f16cf8a7af"` +#[cfg(feature = "semconv_experimental")] +pub const DB_CASSANDRA_COORDINATOR_ID: &str = "db.cassandra.coordinator.id"; + +/// Whether or not the query is idempotent. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const DB_CASSANDRA_IDEMPOTENCE: &str = "db.cassandra.idempotence"; + +/// The fetch size used for paging, i.e. how many rows will be returned at once. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `5000` +#[cfg(feature = "semconv_experimental")] +pub const DB_CASSANDRA_PAGE_SIZE: &str = "db.cassandra.page_size"; + +/// The number of times a query was speculatively executed. Not set or `0` if the query was not executed speculatively. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `0` /// - `2` +#[cfg(feature = "semconv_experimental")] pub const DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT: &str = "db.cassandra.speculative_execution_count"; + /// Deprecated, use `db.collection.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `mytable` -#[deprecated] +/// - `"mytable"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.collection.name`.")] pub const DB_CASSANDRA_TABLE: &str = "db.cassandra.table"; -/// The name of the connection pool; unique within the instrumented application. In case the connection pool implementation doesn't provide a name, instrumentation SHOULD use a combination of parameters that would make the name unique, for example, combining attributes `server.address`, `server.port`, and `db.namespace`, formatted as `server.address:server.port/db.namespace`. Instrumentations that generate connection pool name following different patterns SHOULD document it. + +/// The name of the connection pool; unique within the instrumented application. In case the connection pool implementation doesn't provide a name, instrumentation SHOULD use a combination of parameters that would make the name unique, for example, combining attributes `server.address`, `server.port`, and `db.namespace`, formatted as `server.address:server.port/db.namespace`. Instrumentations that generate connection pool name following different patterns SHOULD document it. +/// +/// ## Notes /// /// # Examples /// -/// - `myDataSource` +/// - `"myDataSource"` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_POOL_NAME: &str = "db.client.connection.pool.name"; -/// The state of a connection in the pool. + +/// The state of a connection in the pool +/// +/// ## Notes /// /// # Examples /// -/// - `idle` +/// - `"idle"` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_STATE: &str = "db.client.connection.state"; + /// Deprecated, use `db.client.connection.pool.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `myDataSource` -#[deprecated] +/// - `"myDataSource"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.pool.name`.")] pub const DB_CLIENT_CONNECTIONS_POOL_NAME: &str = "db.client.connections.pool.name"; + /// Deprecated, use `db.client.connection.state` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `idle` -#[deprecated] +/// - `"idle"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.state`.")] pub const DB_CLIENT_CONNECTIONS_STATE: &str = "db.client.connections.state"; + /// The name of a collection (table, container) within the database. /// +/// ## Notes +/// /// It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. -/// If the collection name is parsed from the query text, it SHOULD be the first collection name found in the query and it SHOULD match the value provided in the query text including any schema and database name prefix. -/// For batch operations, if the individual operations are known to have the same collection name then that collection name SHOULD be used, otherwise `db.collection.name` SHOULD NOT be captured. +/// +/// The collection name SHOULD NOT be extracted from `db.query.text`, +/// unless the query format is known to only ever have a single collection name present. +/// +/// For batch operations, if the individual operations are known to have the same collection name +/// then that collection name SHOULD be used. +/// +/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// -/// - `public.users` -/// - `customers` +/// - `"public.users"` +/// - `"customers"` +#[cfg(feature = "semconv_experimental")] pub const DB_COLLECTION_NAME: &str = "db.collection.name"; + /// Deprecated, use `server.address`, `server.port` attributes instead. /// +/// ## Notes +/// /// # Examples /// -/// - `Server=(localdb)\v11.0;Integrated Security=true;` -#[deprecated] +/// - `"Server=(localdb)\\v11.0;Integrated Security=true;"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `server.address` and `server.port`.")] pub const DB_CONNECTION_STRING: &str = "db.connection_string"; + /// Unique Cosmos client instance id. /// +/// ## Notes +/// /// # Examples /// -/// - `3ba4827d-4422-483f-b59f-85b74211c11d` +/// - `"3ba4827d-4422-483f-b59f-85b74211c11d"` +#[cfg(feature = "semconv_experimental")] pub const DB_COSMOSDB_CLIENT_ID: &str = "db.cosmosdb.client_id"; + /// Cosmos client connection mode. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const DB_COSMOSDB_CONNECTION_MODE: &str = "db.cosmosdb.connection_mode"; + +/// Account or request [consistency level](https://learn.microsoft.com/azure/cosmos-db/consistency-levels). +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Eventual"` +/// - `"ConsistentPrefix"` +/// - `"BoundedStaleness"` +/// - `"Strong"` +/// - `"Session"` +#[cfg(feature = "semconv_experimental")] +pub const DB_COSMOSDB_CONSISTENCY_LEVEL: &str = "db.cosmosdb.consistency_level"; + /// Deprecated, use `db.collection.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `mytable` -#[deprecated] +/// - `"mytable"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.collection.name`.")] pub const DB_COSMOSDB_CONTAINER: &str = "db.cosmosdb.container"; -/// CosmosDB Operation Type. + +/// Deprecated, no replacement at this time. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "No replacement at this time.")] pub const DB_COSMOSDB_OPERATION_TYPE: &str = "db.cosmosdb.operation_type"; -/// RU consumed for that operation. + +/// List of regions contacted during operation in the order that they were contacted. If there is more than one region listed, it indicates that the operation was performed on multiple regions i.e. cross-regional call. +/// +/// ## Notes +/// +/// Region name matches the format of `displayName` in [Azure Location API](https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location) +/// +/// # Examples +/// +/// - `[ +/// "North Central US", +/// "Australia East", +/// "Australia Southeast", +/// ]` +#[cfg(feature = "semconv_experimental")] +pub const DB_COSMOSDB_REGIONS_CONTACTED: &str = "db.cosmosdb.regions_contacted"; + +/// Request units consumed for the operation. +/// +/// ## Notes /// /// # Examples /// /// - `46.18` /// - `1.0` +#[cfg(feature = "semconv_experimental")] pub const DB_COSMOSDB_REQUEST_CHARGE: &str = "db.cosmosdb.request_charge"; + /// Request payload size in bytes. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const DB_COSMOSDB_REQUEST_CONTENT_LENGTH: &str = "db.cosmosdb.request_content_length"; -/// Cosmos DB status code. + +/// Deprecated, use `db.response.status_code` instead. +/// +/// ## Notes /// /// # Examples /// /// - `200` /// - `201` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.response.status_code`.")] pub const DB_COSMOSDB_STATUS_CODE: &str = "db.cosmosdb.status_code"; + /// Cosmos DB sub status code. /// +/// ## Notes +/// /// # Examples /// /// - `1000` /// - `1002` +#[cfg(feature = "semconv_experimental")] pub const DB_COSMOSDB_SUB_STATUS_CODE: &str = "db.cosmosdb.sub_status_code"; + /// Deprecated, use `db.namespace` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `e9106fc68e3044f0b1475b04bf4ffd5f` -#[deprecated] +/// - `"e9106fc68e3044f0b1475b04bf4ffd5f"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.namespace`.")] pub const DB_ELASTICSEARCH_CLUSTER_NAME: &str = "db.elasticsearch.cluster.name"; + /// Represents the human-readable identifier of the node/instance to which a request was routed. /// +/// ## Notes +/// /// # Examples /// -/// - `instance-0000000001` +/// - `"instance-0000000001"` +#[cfg(feature = "semconv_experimental")] pub const DB_ELASTICSEARCH_NODE_NAME: &str = "db.elasticsearch.node.name"; + +/// A dynamic value in the url path. +/// +/// ## Notes +/// +/// Many Elasticsearch url paths allow dynamic values. These SHOULD be recorded in span attributes in the format `db.elasticsearch.path_parts.[key]`, where `[key]` is the url path part name. The implementation SHOULD reference the [elasticsearch schema](https://raw.githubusercontent.com/elastic/elasticsearch-specification/main/output/schema/schema.json) in order to map the path part values to their names. +/// +/// # Examples +/// +/// - `"db.elasticsearch.path_parts.index=test-index"` +/// - `"db.elasticsearch.path_parts.doc_id=123"` +#[cfg(feature = "semconv_experimental")] +pub const DB_ELASTICSEARCH_PATH_PARTS: &str = "db.elasticsearch.path_parts"; + /// Deprecated, no general replacement at this time. For Elasticsearch, use `db.elasticsearch.node.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `mysql-e26b99z.example.com` -#[deprecated] +/// - `"mysql-e26b99z.example.com"` +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "Deprecated, no general replacement at this time. For Elasticsearch, use `db.elasticsearch.node.name` instead." +)] pub const DB_INSTANCE_ID: &str = "db.instance.id"; + /// Removed, no replacement at this time. /// +/// ## Notes +/// /// # Examples /// -/// - `org.postgresql.Driver` -/// - `com.microsoft.sqlserver.jdbc.SQLServerDriver` -#[deprecated] +/// - `"org.postgresql.Driver"` +/// - `"com.microsoft.sqlserver.jdbc.SQLServerDriver"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Removed as not used.")] pub const DB_JDBC_DRIVER_CLASSNAME: &str = "db.jdbc.driver_classname"; + /// Deprecated, use `db.collection.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `mytable` -#[deprecated] +/// - `"mytable"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.collection.name`.")] pub const DB_MONGODB_COLLECTION: &str = "db.mongodb.collection"; + /// Deprecated, SQL Server instance is now populated as a part of `db.namespace` attribute. /// +/// ## Notes +/// /// # Examples /// -/// - `MSSQLSERVER` -#[deprecated] +/// - `"MSSQLSERVER"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Deprecated, no replacement at this time.")] pub const DB_MSSQL_INSTANCE_NAME: &str = "db.mssql.instance_name"; + /// Deprecated, use `db.namespace` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `customers` -/// - `main` -#[deprecated] +/// - `"customers"` +/// - `"main"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.namespace`.")] pub const DB_NAME: &str = "db.name"; + /// The name of the database, fully qualified within the server address and port. /// -/// If a database system has multiple namespace components, they SHOULD be concatenated (potentially using database system specific conventions) from most general to most specific namespace component, and more specific namespaces SHOULD NOT be captured without the more general namespaces, to ensure that "startswith" queries for the more general namespaces will be valid. +/// ## Notes +/// +/// If a database system has multiple namespace components, they SHOULD be concatenated (potentially using database system specific conventions) from most general to most specific namespace component, and more specific namespaces SHOULD NOT be captured without the more general namespaces, to ensure that "startswith" queries for the more general namespaces will be valid. /// Semantic conventions for individual database systems SHOULD document what `db.namespace` means in the context of that system. /// It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. +/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// -/// - `customers` -/// - `test.users` +/// - `"customers"` +/// - `"test.users"` +#[cfg(feature = "semconv_experimental")] pub const DB_NAMESPACE: &str = "db.namespace"; + /// Deprecated, use `db.operation.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `findAndModify` -/// - `HMSET` -/// - `SELECT` -#[deprecated] +/// - `"findAndModify"` +/// - `"HMSET"` +/// - `"SELECT"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.operation.name`.")] pub const DB_OPERATION: &str = "db.operation"; -/// The number of queries included in a [batch operation](/docs/database/database-spans.md#batch-operations). + +/// The number of queries included in a batch operation. +/// +/// ## Notes /// /// Operations are only considered batches when they contain two or more operations, and so `db.operation.batch.size` SHOULD never be `1`. +/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// /// - `2` /// - `3` /// - `4` +#[cfg(feature = "semconv_experimental")] pub const DB_OPERATION_BATCH_SIZE: &str = "db.operation.batch.size"; + /// The name of the operation or command being executed. /// -/// It is RECOMMENDED to capture the value as provided by the application without attempting to do any case normalization. -/// If the operation name is parsed from the query text, it SHOULD be the first operation name found in the query. -/// For batch operations, if the individual operations are known to have the same operation name then that operation name SHOULD be used prepended by `BATCH `, otherwise `db.operation.name` SHOULD be `BATCH` or some other database system specific term if more applicable. +/// ## Notes +/// +/// It is RECOMMENDED to capture the value as provided by the application +/// without attempting to do any case normalization. +/// +/// The operation name SHOULD NOT be extracted from `db.query.text`, +/// unless the query format is known to only ever have a single operation name present. +/// +/// For batch operations, if the individual operations are known to have the same operation name +/// then that operation name SHOULD be used prepended by `BATCH `, +/// otherwise `db.operation.name` SHOULD be `BATCH` or some other database +/// system specific term if more applicable. +/// +/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// -/// - `findAndModify` -/// - `HMSET` -/// - `SELECT` +/// - `"findAndModify"` +/// - `"HMSET"` +/// - `"SELECT"` +#[cfg(feature = "semconv_experimental")] pub const DB_OPERATION_NAME: &str = "db.operation.name"; + +/// A database operation parameter, with `` being the parameter name, and the attribute value being a string representation of the parameter value. +/// +/// ## Notes +/// +/// If a parameter has no name and instead is referenced only by index, then `[key]` SHOULD be the 0-based index. +/// If `db.query.text` is also captured, then `db.operation.parameter.[key]` SHOULD match up with the parameterized placeholders present in `db.query.text`. +/// This attribute has stability level RELEASE CANDIDATE. +/// +/// # Examples +/// +/// - `"someval"` +/// - `"55"` +#[cfg(feature = "semconv_experimental")] +pub const DB_OPERATION_PARAMETER: &str = "db.operation.parameter"; + +/// A query parameter used in `db.query.text`, with `` being the parameter name, and the attribute value being a string representation of the parameter value. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"someval"` +/// - `"55"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.operation.parameter`.")] +pub const DB_QUERY_PARAMETER: &str = "db.query.parameter"; + +/// Low cardinality representation of a database query text. +/// +/// ## Notes +/// +/// `db.query.summary` provides static summary of the query text. It describes a class of database queries and is useful as a grouping key, especially when analyzing telemetry for database calls involving complex queries. +/// Summary may be available to the instrumentation through instrumentation hooks or other means. If it is not available, instrumentations that support query parsing SHOULD generate a summary following [Generating query summary](../../docs/database/database-spans.md#generating-a-summary-of-the-query-text) section. +/// This attribute has stability level RELEASE CANDIDATE. +/// +/// # Examples +/// +/// - `"SELECT wuser_table"` +/// - `"INSERT shipping_details SELECT orders"` +/// - `"get user by id"` +#[cfg(feature = "semconv_experimental")] +pub const DB_QUERY_SUMMARY: &str = "db.query.summary"; + /// The database query being executed. /// +/// ## Notes +/// /// For sanitization see [Sanitization of `db.query.text`](../../docs/database/database-spans.md#sanitization-of-dbquerytext). /// For batch operations, if the individual operations are known to have the same query text then that query text SHOULD be used, otherwise all of the individual query texts SHOULD be concatenated with separator `; ` or some other database system specific separator if more applicable. /// Even though parameterized query text can potentially have sensitive data, by using a parameterized query the user is giving a strong signal that any sensitive data will be passed as parameter values, and the benefit to observability of capturing the static part of the query text by default outweighs the risk. +/// This attribute has stability level RELEASE CANDIDATE. /// /// # Examples /// -/// - `SELECT * FROM wuser_table where username = ?` -/// - `SET mykey "WuValue"` +/// - `"SELECT * FROM wuser_table where username = ?"` +/// - `"SET mykey ?"` +#[cfg(feature = "semconv_experimental")] pub const DB_QUERY_TEXT: &str = "db.query.text"; + /// Deprecated, use `db.namespace` instead. /// +/// ## Notes +/// /// # Examples /// /// - `0` /// - `1` /// - `15` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.namespace`.")] pub const DB_REDIS_DATABASE_INDEX: &str = "db.redis.database_index"; + +/// Number of rows returned by the operation. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `10` +/// - `30` +/// - `1000` +#[cfg(feature = "semconv_experimental")] +pub const DB_RESPONSE_RETURNED_ROWS: &str = "db.response.returned_rows"; + +/// Database response status code. +/// +/// ## Notes +/// +/// The status code returned by the database. Usually it represents an error code, but may also represent partial success, warning, or differentiate between various types of successful outcomes. +/// Semantic conventions for individual database systems SHOULD document what `db.response.status_code` means in the context of that system. +/// This attribute has stability level RELEASE CANDIDATE. +/// +/// # Examples +/// +/// - `"102"` +/// - `"ORA-17002"` +/// - `"08P01"` +/// - `"404"` +#[cfg(feature = "semconv_experimental")] +pub const DB_RESPONSE_STATUS_CODE: &str = "db.response.status_code"; + /// Deprecated, use `db.collection.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `mytable` -#[deprecated] +/// - `"mytable"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.collection.name`.")] pub const DB_SQL_TABLE: &str = "db.sql.table"; + /// The database statement being executed. /// +/// ## Notes +/// /// # Examples /// -/// - `SELECT * FROM wuser_table` -/// - `SET mykey "WuValue"` -#[deprecated] +/// - `"SELECT * FROM wuser_table"` +/// - `"SET mykey \"WuValue\""` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.query.text`.")] pub const DB_STATEMENT: &str = "db.statement"; + /// The database management system (DBMS) product as identified by the client instrumentation. /// -/// The actual DBMS may differ from the one identified by the client. For example, when using PostgreSQL client libraries to connect to a CockroachDB, the `db.system` is set to `postgresql` based on the instrumentation's best knowledge. +/// ## Notes +/// +/// The actual DBMS may differ from the one identified by the client. For example, when using PostgreSQL client libraries to connect to a CockroachDB, the `db.system` is set to `postgresql` based on the instrumentation's best knowledge. +/// This attribute has stability level RELEASE CANDIDATE +#[cfg(feature = "semconv_experimental")] pub const DB_SYSTEM: &str = "db.system"; + /// Deprecated, no replacement at this time. /// +/// ## Notes +/// /// # Examples /// -/// - `readonly_user` -/// - `reporting_user` -#[deprecated] +/// - `"readonly_user"` +/// - `"reporting_user"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "No replacement at this time.")] pub const DB_USER: &str = "db.user"; -/// 'Deprecated, use `deployment.environment.name` instead.'. + +/// 'Deprecated, use `deployment.environment.name` instead.' +/// +/// ## Notes /// /// # Examples /// -/// - `staging` -/// - `production` -#[deprecated] +/// - `"staging"` +/// - `"production"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Deprecated, use `deployment.environment.name` instead.")] pub const DEPLOYMENT_ENVIRONMENT: &str = "deployment.environment"; + /// Name of the [deployment environment](https://wikipedia.org/wiki/Deployment_environment) (aka deployment tier). /// +/// ## Notes +/// /// `deployment.environment.name` does not affect the uniqueness constraints defined through /// the `service.namespace`, `service.name` and `service.instance.id` resource attributes. /// This implies that resources carrying the following attribute combinations MUST be /// considered to be identifying the same service: /// -/// * `service.name=frontend`, `deployment.environment.name=production` -/// * `service.name=frontend`, `deployment.environment.name=staging`. +/// - `service.name=frontend`, `deployment.environment.name=production` +/// - `service.name=frontend`, `deployment.environment.name=staging`. /// /// # Examples /// -/// - `staging` -/// - `production` +/// - `"staging"` +/// - `"production"` +#[cfg(feature = "semconv_experimental")] pub const DEPLOYMENT_ENVIRONMENT_NAME: &str = "deployment.environment.name"; + /// The id of the deployment. /// +/// ## Notes +/// /// # Examples /// -/// - `1208` +/// - `"1208"` +#[cfg(feature = "semconv_experimental")] pub const DEPLOYMENT_ID: &str = "deployment.id"; + /// The name of the deployment. /// +/// ## Notes +/// /// # Examples /// -/// - `deploy my app` -/// - `deploy-frontend` +/// - `"deploy my app"` +/// - `"deploy-frontend"` +#[cfg(feature = "semconv_experimental")] pub const DEPLOYMENT_NAME: &str = "deployment.name"; + /// The status of the deployment. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const DEPLOYMENT_STATUS: &str = "deployment.status"; + /// Destination address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. /// -/// When observed from the source side, and when communicating through an intermediary, `destination.address` SHOULD represent the destination address behind any intermediaries, for example proxies, if it's available. +/// ## Notes +/// +/// When observed from the source side, and when communicating through an intermediary, `destination.address` SHOULD represent the destination address behind any intermediaries, for example proxies, if it's available. /// /// # Examples /// -/// - `destination.example.com` -/// - `10.1.2.80` -/// - `/tmp/my.sock` +/// - `"destination.example.com"` +/// - `"10.1.2.80"` +/// - `"/tmp/my.sock"` +#[cfg(feature = "semconv_experimental")] pub const DESTINATION_ADDRESS: &str = "destination.address"; -/// Destination port number. + +/// Destination port number +/// +/// ## Notes /// /// # Examples /// /// - `3389` /// - `2888` +#[cfg(feature = "semconv_experimental")] pub const DESTINATION_PORT: &str = "destination.port"; -/// A unique identifier representing the device. + +/// A unique identifier representing the device +/// +/// ## Notes /// /// The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence. /// /// # Examples /// -/// - `2ab2916d-a51f-4ac8-80ee-45ac31a28092` +/// - `"2ab2916d-a51f-4ac8-80ee-45ac31a28092"` +#[cfg(feature = "semconv_experimental")] pub const DEVICE_ID: &str = "device.id"; -/// The name of the device manufacturer. + +/// The name of the device manufacturer +/// +/// ## Notes /// /// The Android OS provides this field via [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). iOS apps SHOULD hardcode the value `Apple`. /// /// # Examples /// -/// - `Apple` -/// - `Samsung` +/// - `"Apple"` +/// - `"Samsung"` +#[cfg(feature = "semconv_experimental")] pub const DEVICE_MANUFACTURER: &str = "device.manufacturer"; -/// The model identifier for the device. + +/// The model identifier for the device /// -/// It's recommended this value represents a machine-readable version of the model identifier rather than the market or consumer-friendly name of the device. +/// ## Notes +/// +/// It's recommended this value represents a machine-readable version of the model identifier rather than the market or consumer-friendly name of the device. /// /// # Examples /// -/// - `iPhone3,4` -/// - `SM-G920F` +/// - `"iPhone3,4"` +/// - `"SM-G920F"` +#[cfg(feature = "semconv_experimental")] pub const DEVICE_MODEL_IDENTIFIER: &str = "device.model.identifier"; -/// The marketing name for the device model. + +/// The marketing name for the device model /// -/// It's recommended this value represents a human-readable version of the device model rather than a machine-readable alternative. +/// ## Notes +/// +/// It's recommended this value represents a human-readable version of the device model rather than a machine-readable alternative. /// /// # Examples /// -/// - `iPhone 6s Plus` -/// - `Samsung Galaxy S6` +/// - `"iPhone 6s Plus"` +/// - `"Samsung Galaxy S6"` +#[cfg(feature = "semconv_experimental")] pub const DEVICE_MODEL_NAME: &str = "device.model.name"; + /// The disk IO operation direction. /// +/// ## Notes +/// /// # Examples /// -/// - `read` +/// - `"read"` +#[cfg(feature = "semconv_experimental")] pub const DISK_IO_DIRECTION: &str = "disk.io.direction"; + /// The name being queried. /// +/// ## Notes +/// /// If the name field contains non-printable characters (below 32 or above 126), those characters should be represented as escaped base 10 integers (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and line feeds should be converted to \t, \r, and \n respectively. /// /// # Examples /// -/// - `www.example.com` -/// - `dot.net` +/// - `"www.example.com"` +/// - `"opentelemetry.io"` +#[cfg(feature = "semconv_experimental")] pub const DNS_QUESTION_NAME: &str = "dns.question.name"; + +/// Name of the garbage collector managed heap generation. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"gen0"` +/// - `"gen1"` +/// - `"gen2"` +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_GC_HEAP_GENERATION: &str = "dotnet.gc.heap.generation"; + /// Deprecated, use `user.id` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `username` -#[deprecated] +/// - `"username"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `user.id` attribute.")] pub const ENDUSER_ID: &str = "enduser.id"; + /// Deprecated, use `user.roles` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `admin` -#[deprecated] +/// - `"admin"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `user.roles` attribute.")] pub const ENDUSER_ROLE: &str = "enduser.role"; + /// Deprecated, no replacement at this time. /// +/// ## Notes +/// /// # Examples /// -/// - `read:message, write:files` -#[deprecated] +/// - `"read:message, write:files"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Removed.")] pub const ENDUSER_SCOPE: &str = "enduser.scope"; + /// Describes a class of error the operation ended with. /// -/// The `error.type` SHOULD match the error code returned by the database or the client library, the canonical name of exception that occurred, or another low-cardinality error identifier. Instrumentations SHOULD document the list of errors they report. +/// ## Notes +/// +/// The `error.type` SHOULD be predictable, and SHOULD have low cardinality. +/// +/// When `error.type` is set to a type (e.g., an exception type), its +/// canonical class name identifying the type within the artifact SHOULD be used. +/// +/// Instrumentations SHOULD document the list of errors they report. +/// +/// The cardinality of `error.type` within one instrumentation library SHOULD be low. +/// Telemetry consumers that aggregate data from multiple instrumentation libraries and applications +/// should be prepared for `error.type` to have high cardinality at query time when no +/// additional filters are applied. +/// +/// If the operation has completed successfully, instrumentations SHOULD NOT set `error.type`. +/// +/// If a specific domain defines its own set of error identifiers (such as HTTP or gRPC status codes), +/// it's RECOMMENDED to: +/// +/// - Use a domain-specific attribute +/// - Set `error.type` to capture all errors, regardless of whether they are defined within the domain-specific set or not. /// /// # Examples /// -/// - `timeout` -/// - `java.net.UnknownHostException` -/// - `server_certificate_invalid` -/// - `500` +/// - `"timeout"` +/// - `"java.net.UnknownHostException"` +/// - `"server_certificate_invalid"` +/// - `"500"` pub const ERROR_TYPE: &str = "error.type"; + /// Identifies the class / type of event. /// +/// ## Notes +/// /// Event names are subject to the same rules as [attribute names](/docs/general/attribute-naming.md). Notably, event names are namespaced to avoid collisions and provide a clean separation of semantics for events in separate domains like browser, mobile, and kubernetes. /// /// # Examples /// -/// - `browser.mouse.click` -/// - `device.app.lifecycle` +/// - `"browser.mouse.click"` +/// - `"device.app.lifecycle"` +#[cfg(feature = "semconv_experimental")] pub const EVENT_NAME: &str = "event.name"; + /// SHOULD be set to true if the exception event is recorded at a point where it is known that the exception is escaping the scope of the span. /// +/// ## Notes +/// /// An exception is considered to have escaped (or left) the scope of a span, -/// if that span is ended while the exception is still logically "in flight". -/// This may be actually "in flight" in some languages (e.g. if the exception -/// is passed to a Context manager's `__exit__` method in Python) but will +/// if that span is ended while the exception is still logically "in flight". +/// This may be actually "in flight" in some languages (e.g. if the exception +/// is passed to a Context manager's `__exit__` method in Python) but will /// usually be caught at the point of recording the exception in most languages. /// /// It is usually not possible to determine at the point where an exception is thrown @@ -1180,102 +2223,162 @@ pub const EVENT_NAME: &str = "event.name"; /// It follows that an exception may still escape the scope of the span /// even if the `exception.escaped` attribute was not set or set to false, /// since the event might have been recorded at a time where it was not -/// clear whether the exception will escape. +/// clear whether the exception will escape pub const EXCEPTION_ESCAPED: &str = "exception.escaped"; + /// The exception message. /// +/// ## Notes +/// /// # Examples /// -/// - `Division by zero` -/// - `Can't convert 'int' object to str implicitly` +/// - `"Division by zero"` +/// - `"Can't convert 'int' object to str implicitly"` pub const EXCEPTION_MESSAGE: &str = "exception.message"; + /// A stacktrace as a string in the natural representation for the language runtime. The representation is to be determined and documented by each language SIG. /// +/// ## Notes +/// /// # Examples /// -/// - `Exception in thread "main" java.lang.RuntimeException: Test exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at com.example.GenerateTrace.main(GenerateTrace.java:5)` +/// - `"Exception in thread \"main\" java.lang.RuntimeException: Test exception\\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at com.example.GenerateTrace.main(GenerateTrace.java:5)\n"` pub const EXCEPTION_STACKTRACE: &str = "exception.stacktrace"; + /// The type of the exception (its fully-qualified class name, if applicable). The dynamic type of the exception should be preferred over the static type in languages that support it. /// +/// ## Notes +/// /// # Examples /// -/// - `java.net.ConnectException` -/// - `OSError` +/// - `"java.net.ConnectException"` +/// - `"OSError"` pub const EXCEPTION_TYPE: &str = "exception.type"; + /// A boolean that is true if the serverless function is executed for the first time (aka cold-start). +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const FAAS_COLDSTART: &str = "faas.coldstart"; + /// A string containing the schedule period as [Cron Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). /// +/// ## Notes +/// /// # Examples /// -/// - `0/5 * * * ? *` +/// - `"0/5 * * * ? *"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_CRON: &str = "faas.cron"; + /// The name of the source on which the triggering operation was performed. For example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database name. /// +/// ## Notes +/// /// # Examples /// -/// - `myBucketName` -/// - `myDbName` +/// - `"myBucketName"` +/// - `"myDbName"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_DOCUMENT_COLLECTION: &str = "faas.document.collection"; + /// The document name/table subjected to the operation. For example, in Cloud Storage or S3 is the name of the file, and in Cosmos DB the table name. /// +/// ## Notes +/// /// # Examples /// -/// - `myFile.txt` -/// - `myTableName` +/// - `"myFile.txt"` +/// - `"myTableName"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_DOCUMENT_NAME: &str = "faas.document.name"; + /// Describes the type of the operation that was performed on the data. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const FAAS_DOCUMENT_OPERATION: &str = "faas.document.operation"; + /// A string containing the time when the data was accessed in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). /// +/// ## Notes +/// /// # Examples /// -/// - `2020-01-23T13:47:06Z` +/// - `"2020-01-23T13:47:06Z"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_DOCUMENT_TIME: &str = "faas.document.time"; + /// The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version. /// -/// * **AWS Lambda:** Use the (full) log stream name. +/// ## Notes +/// +/// - **AWS Lambda:** Use the (full) log stream name. /// /// # Examples /// -/// - `2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de` +/// - `"2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_INSTANCE: &str = "faas.instance"; + /// The invocation ID of the current function invocation. /// +/// ## Notes +/// /// # Examples /// -/// - `af9d5aa4-a685-4c5f-a22b-444f80b3cc28` +/// - `"af9d5aa4-a685-4c5f-a22b-444f80b3cc28"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_INVOCATION_ID: &str = "faas.invocation_id"; + /// The name of the invoked function. /// +/// ## Notes +/// /// SHOULD be equal to the `faas.name` resource attribute of the invoked function. /// /// # Examples /// -/// - `my-function` +/// - `"my-function"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_INVOKED_NAME: &str = "faas.invoked_name"; + /// The cloud provider of the invoked function. /// -/// SHOULD be equal to the `cloud.provider` resource attribute of the invoked function. +/// ## Notes +/// +/// SHOULD be equal to the `cloud.provider` resource attribute of the invoked function +#[cfg(feature = "semconv_experimental")] pub const FAAS_INVOKED_PROVIDER: &str = "faas.invoked_provider"; + /// The cloud region of the invoked function. /// +/// ## Notes +/// /// SHOULD be equal to the `cloud.region` resource attribute of the invoked function. /// /// # Examples /// -/// - `eu-central-1` +/// - `"eu-central-1"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_INVOKED_REGION: &str = "faas.invoked_region"; + /// The amount of memory available to the serverless function converted to Bytes. /// -/// It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must be multiplied by 1,048,576). +/// ## Notes +/// +/// It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must be multiplied by 1,048,576). /// /// # Examples /// /// - `134217728` +#[cfg(feature = "semconv_experimental")] pub const FAAS_MAX_MEMORY: &str = "faas.max_memory"; + /// The name of the single function that this runtime instance executes. /// +/// ## Notes +/// /// This is the name of the function as configured/deployed on the FaaS /// platform and is usually different from the name of the callback /// function (which may be stored in the @@ -1286,7 +2389,7 @@ pub const FAAS_MAX_MEMORY: &str = "faas.max_memory"; /// definition of function name MUST be used for this attribute /// (and consequently the span name) for the listed cloud providers/products: /// -/// * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name +/// - **Azure:** The full name `[FUNCAPP]/[FUNC]`, i.e., function app name /// followed by a forward slash followed by the function name (this form /// can also be seen in the resource JSON for the function). /// This means that a span attribute MUST be used, as an Azure function @@ -1295,473 +2398,1080 @@ pub const FAAS_MAX_MEMORY: &str = "faas.max_memory"; /// /// # Examples /// -/// - `my-function` -/// - `myazurefunctionapp/some-function-name` +/// - `"my-function"` +/// - `"myazurefunctionapp/some-function-name"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_NAME: &str = "faas.name"; + /// A string containing the function invocation time in the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). /// +/// ## Notes +/// /// # Examples /// -/// - `2020-01-23T13:47:06Z` +/// - `"2020-01-23T13:47:06Z"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_TIME: &str = "faas.time"; + /// Type of the trigger which caused this function invocation. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const FAAS_TRIGGER: &str = "faas.trigger"; + /// The immutable version of the function being executed. /// +/// ## Notes +/// /// Depending on the cloud provider and platform, use: /// -/// * **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) +/// - **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) /// (an integer represented as a decimal string). -/// * **Google Cloud Run (Services):** The [revision](https://cloud.google.com/run/docs/managing/revisions) +/// - **Google Cloud Run (Services):** The [revision](https://cloud.google.com/run/docs/managing/revisions) /// (i.e., the function name plus the revision suffix). -/// * **Google Cloud Functions:** The value of the +/// - **Google Cloud Functions:** The value of the /// [`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). -/// * **Azure Functions:** Not applicable. Do not set this attribute. +/// - **Azure Functions:** Not applicable. Do not set this attribute. /// /// # Examples /// -/// - `26` -/// - `pinkfroid-00002` +/// - `"26"` +/// - `"pinkfroid-00002"` +#[cfg(feature = "semconv_experimental")] pub const FAAS_VERSION: &str = "faas.version"; -/// The unique identifier of the feature flag. + +/// The unique identifier for the flag evaluation context. For example, the targeting key. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"5157782b-2203-4c80-a857-dbbd5e7761db"` +#[cfg(feature = "semconv_experimental")] +pub const FEATURE_FLAG_CONTEXT_ID: &str = "feature_flag.context.id"; + +/// A message explaining the nature of an error occurring during flag evaluation. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Flag `header-color`expected type`string`but found type`number`"` +#[cfg(feature = "semconv_experimental")] +pub const FEATURE_FLAG_EVALUATION_ERROR_MESSAGE: &str = "feature_flag.evaluation.error.message"; + +/// The reason code which shows how a feature flag value was determined. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"static"` +/// - `"targeting_match"` +/// - `"error"` +/// - `"default"` +#[cfg(feature = "semconv_experimental")] +pub const FEATURE_FLAG_EVALUATION_REASON: &str = "feature_flag.evaluation.reason"; + +/// The lookup key of the feature flag. +/// +/// ## Notes /// /// # Examples /// -/// - `logo-color` +/// - `"logo-color"` +#[cfg(feature = "semconv_experimental")] pub const FEATURE_FLAG_KEY: &str = "feature_flag.key"; -/// The name of the service provider that performs the flag evaluation. + +/// Identifies the feature flag provider. +/// +/// ## Notes /// /// # Examples /// -/// - `Flag Manager` +/// - `"Flag Manager"` +#[cfg(feature = "semconv_experimental")] pub const FEATURE_FLAG_PROVIDER_NAME: &str = "feature_flag.provider_name"; -/// SHOULD be a semantic identifier for a value. If one is unavailable, a stringified version of the value can be used. + +/// The identifier of the [flag set](https://openfeature.dev/specification/glossary/#flag-set) to which the feature flag belongs. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"proj-1"` +/// - `"ab98sgs"` +/// - `"service1/dev"` +#[cfg(feature = "semconv_experimental")] +pub const FEATURE_FLAG_SET_ID: &str = "feature_flag.set.id"; + +/// A semantic identifier for an evaluated flag value. +/// +/// ## Notes /// /// A semantic identifier, commonly referred to as a variant, provides a means /// for referring to a value without including the value itself. This can /// provide additional context for understanding the meaning behind a value. /// For example, the variant `red` maybe be used for the value `#c05543`. /// -/// A stringified version of the value can be used in situations where a -/// semantic identifier is unavailable. String representation of the value -/// should be determined by the implementer. -/// /// # Examples /// -/// - `red` -/// - `true` -/// - `on` +/// - `"red"` +/// - `"true"` +/// - `"on"` +#[cfg(feature = "semconv_experimental")] pub const FEATURE_FLAG_VARIANT: &str = "feature_flag.variant"; + +/// The version of the ruleset used during the evaluation. This may be any stable value which uniquely identifies the ruleset. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"1"` +/// - `"01ABCDEF"` +#[cfg(feature = "semconv_experimental")] +pub const FEATURE_FLAG_VERSION: &str = "feature_flag.version"; + +/// Time when the file was last accessed, in ISO 8601 format. +/// +/// ## Notes +/// +/// This attribute might not be supported by some file systems — NFS, FAT32, in embedded OS, etc. +/// +/// # Examples +/// +/// - `"2021-01-01T12:00:00Z"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_ACCESSED: &str = "file.accessed"; + +/// Array of file attributes. +/// +/// ## Notes +/// +/// Attributes names depend on the OS or file system. Here’s a non-exhaustive list of values expected for this attribute: `archive`, `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, `write`. +/// +/// # Examples +/// +/// - `[ +/// "readonly", +/// "hidden", +/// ]` +#[cfg(feature = "semconv_experimental")] +pub const FILE_ATTRIBUTES: &str = "file.attributes"; + +/// Time when the file attributes or metadata was last changed, in ISO 8601 format. +/// +/// ## Notes +/// +/// `file.changed` captures the time when any of the file's properties or attributes (including the content) are changed, while `file.modified` captures the timestamp when the file content is modified. +/// +/// # Examples +/// +/// - `"2021-01-01T12:00:00Z"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_CHANGED: &str = "file.changed"; + +/// Time when the file was created, in ISO 8601 format. +/// +/// ## Notes +/// +/// This attribute might not be supported by some file systems — NFS, FAT32, in embedded OS, etc. +/// +/// # Examples +/// +/// - `"2021-01-01T12:00:00Z"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_CREATED: &str = "file.created"; + /// Directory where the file is located. It should include the drive letter, when appropriate. /// +/// ## Notes +/// /// # Examples /// -/// - `/home/user` -/// - `C:\Program Files\MyApp` +/// - `"/home/user"` +/// - `"C:\\Program Files\\MyApp"` +#[cfg(feature = "semconv_experimental")] pub const FILE_DIRECTORY: &str = "file.directory"; + /// File extension, excluding the leading dot. /// -/// When the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). +/// ## Notes +/// +/// When the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). /// /// # Examples /// -/// - `png` -/// - `gz` +/// - `"png"` +/// - `"gz"` +#[cfg(feature = "semconv_experimental")] pub const FILE_EXTENSION: &str = "file.extension"; + +/// Name of the fork. A fork is additional data associated with a filesystem object. +/// +/// ## Notes +/// +/// On Linux, a resource fork is used to store additional data with a filesystem object. A file always has at least one fork for the data portion, and additional forks may exist. +/// On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default data stream for a file is just called $DATA. Zone.Identifier is commonly used by Windows to track contents downloaded from the Internet. An ADS is typically of the form: C:\path\to\filename.extension:some_fork_name, and some_fork_name is the value that should populate `fork_name`. `filename.extension` should populate `file.name`, and `extension` should populate `file.extension`. The full path, `file.path`, will include the fork name. +/// +/// # Examples +/// +/// - `"Zone.Identifer"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_FORK_NAME: &str = "file.fork_name"; + +/// Primary Group ID (GID) of the file. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"1000"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_GROUP_ID: &str = "file.group.id"; + +/// Primary group name of the file. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"users"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_GROUP_NAME: &str = "file.group.name"; + +/// Inode representing the file in the filesystem. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"256383"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_INODE: &str = "file.inode"; + +/// Mode of the file in octal representation. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"0640"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_MODE: &str = "file.mode"; + +/// Time when the file content was last modified, in ISO 8601 format. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"2021-01-01T12:00:00Z"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_MODIFIED: &str = "file.modified"; + /// Name of the file including the extension, without the directory. /// +/// ## Notes +/// /// # Examples /// -/// - `example.png` +/// - `"example.png"` +#[cfg(feature = "semconv_experimental")] pub const FILE_NAME: &str = "file.name"; + +/// The user ID (UID) or security identifier (SID) of the file owner. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"1000"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_OWNER_ID: &str = "file.owner.id"; + +/// Username of the file owner. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"root"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_OWNER_NAME: &str = "file.owner.name"; + /// Full path to the file, including the file name. It should include the drive letter, when appropriate. /// +/// ## Notes +/// /// # Examples /// -/// - `/home/alice/example.png` -/// - `C:\Program Files\MyApp\myapp.exe` +/// - `"/home/alice/example.png"` +/// - `"C:\\Program Files\\MyApp\\myapp.exe"` +#[cfg(feature = "semconv_experimental")] pub const FILE_PATH: &str = "file.path"; + /// File size in bytes. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const FILE_SIZE: &str = "file.size"; + +/// Path to the target of a symbolic link. +/// +/// ## Notes +/// +/// This attribute is only applicable to symbolic links. +/// +/// # Examples +/// +/// - `"/usr/bin/python3"` +#[cfg(feature = "semconv_experimental")] +pub const FILE_SYMBOLIC_LINK_TARGET_PATH: &str = "file.symbolic_link.target_path"; + /// Identifies the Google Cloud service for which the official client library is intended. /// -/// Intended to be a stable identifier for Google Cloud client libraries that is uniform across implementation languages. The value should be derived from the canonical service domain for the service; for example, 'foo.googleapis.com' should result in a value of 'foo'. +/// ## Notes +/// +/// Intended to be a stable identifier for Google Cloud client libraries that is uniform across implementation languages. The value should be derived from the canonical service domain for the service; for example, 'foo.googleapis.com' should result in a value of 'foo'. /// /// # Examples /// -/// - `appengine` -/// - `run` -/// - `firestore` -/// - `alloydb` -/// - `spanner` +/// - `"appengine"` +/// - `"run"` +/// - `"firestore"` +/// - `"alloydb"` +/// - `"spanner"` +#[cfg(feature = "semconv_experimental")] pub const GCP_CLIENT_SERVICE: &str = "gcp.client.service"; + /// The name of the Cloud Run [execution](https://cloud.google.com/run/docs/managing/job-executions) being run for the Job, as set by the [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. /// +/// ## Notes +/// /// # Examples /// -/// - `job-name-xxxx` -/// - `sample-job-mdw84` +/// - `"job-name-xxxx"` +/// - `"sample-job-mdw84"` +#[cfg(feature = "semconv_experimental")] pub const GCP_CLOUD_RUN_JOB_EXECUTION: &str = "gcp.cloud_run.job.execution"; + /// The index for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) environment variable. /// +/// ## Notes +/// /// # Examples /// /// - `0` /// - `1` +#[cfg(feature = "semconv_experimental")] pub const GCP_CLOUD_RUN_JOB_TASK_INDEX: &str = "gcp.cloud_run.job.task_index"; + /// The hostname of a GCE instance. This is the full value of the default or [custom hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). /// +/// ## Notes +/// /// # Examples /// -/// - `my-host1234.example.com` -/// - `sample-vm.us-west1-b.c.my-project.internal` +/// - `"my-host1234.example.com"` +/// - `"sample-vm.us-west1-b.c.my-project.internal"` +#[cfg(feature = "semconv_experimental")] pub const GCP_GCE_INSTANCE_HOSTNAME: &str = "gcp.gce.instance.hostname"; + /// The instance name of a GCE instance. This is the value provided by `host.name`, the visible name of the instance in the Cloud Console UI, and the prefix for the default hostname of the instance as defined by the [default internal DNS name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). /// +/// ## Notes +/// /// # Examples /// -/// - `instance-1` -/// - `my-vm-name` +/// - `"instance-1"` +/// - `"my-vm-name"` +#[cfg(feature = "semconv_experimental")] pub const GCP_GCE_INSTANCE_NAME: &str = "gcp.gce.instance.name"; -/// The full response received from the GenAI model. + +/// Deprecated, use Event API to report completions contents. /// -/// It's RECOMMENDED to format completions as JSON string matching [OpenAI messages format](https://platform.openai.com/docs/guides/text-generation) +/// ## Notes /// /// # Examples /// -/// - `[{'role': 'assistant', 'content': 'The capital of France is Paris.'}]` +/// - `"[{'role': 'assistant', 'content': 'The capital of France is Paris.'}]"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Removed, no replacement at this time.")] pub const GEN_AI_COMPLETION: &str = "gen_ai.completion"; + +/// The response format that is requested. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"json"` +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: &str = "gen_ai.openai.request.response_format"; + +/// Requests with same seed value more likely to return same result. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `100` +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_OPENAI_REQUEST_SEED: &str = "gen_ai.openai.request.seed"; + +/// The service tier requested. May be a specific tier, default, or auto. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"auto"` +/// - `"default"` +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_OPENAI_REQUEST_SERVICE_TIER: &str = "gen_ai.openai.request.service_tier"; + +/// The service tier used for the response. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"scale"` +/// - `"default"` +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: &str = "gen_ai.openai.response.service_tier"; + +/// A fingerprint to track any eventual change in the Generative AI environment. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"fp_44709d6fcb"` +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT: &str = + "gen_ai.openai.response.system_fingerprint"; + /// The name of the operation being performed. /// -/// If one of the predefined values applies, but specific system uses a different name it's RECOMMENDED to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries SHOULD use applicable predefined value. +/// ## Notes +/// +/// If one of the predefined values applies, but specific system uses a different name it's RECOMMENDED to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries SHOULD use applicable predefined value +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_OPERATION_NAME: &str = "gen_ai.operation.name"; -/// The full prompt sent to the GenAI model. + +/// Deprecated, use Event API to report prompt contents. /// -/// It's RECOMMENDED to format prompts as JSON string matching [OpenAI messages format](https://platform.openai.com/docs/guides/text-generation) +/// ## Notes /// /// # Examples /// -/// - `[{'role': 'user', 'content': 'What is the capital of France?'}]` +/// - `"[{'role': 'user', 'content': 'What is the capital of France?'}]"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Removed, no replacement at this time.")] pub const GEN_AI_PROMPT: &str = "gen_ai.prompt"; + +/// The encoding formats requested in an embeddings operation, if specified. +/// +/// ## Notes +/// +/// In some GenAI systems the encoding formats are called embedding types. Also, some GenAI systems only accept a single format per request. +/// +/// # Examples +/// +/// - `[ +/// "base64", +/// ]` +/// - `[ +/// "float", +/// "binary", +/// ]` +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_REQUEST_ENCODING_FORMATS: &str = "gen_ai.request.encoding_formats"; + /// The frequency penalty setting for the GenAI request. /// +/// ## Notes +/// /// # Examples /// /// - `0.1` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_REQUEST_FREQUENCY_PENALTY: &str = "gen_ai.request.frequency_penalty"; + /// The maximum number of tokens the model generates for a request. /// +/// ## Notes +/// /// # Examples /// /// - `100` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_REQUEST_MAX_TOKENS: &str = "gen_ai.request.max_tokens"; + /// The name of the GenAI model a request is being made to. /// +/// ## Notes +/// /// # Examples /// -/// - `gpt-4` +/// - `"gpt-4"` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_REQUEST_MODEL: &str = "gen_ai.request.model"; + /// The presence penalty setting for the GenAI request. /// +/// ## Notes +/// /// # Examples /// /// - `0.1` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_REQUEST_PRESENCE_PENALTY: &str = "gen_ai.request.presence_penalty"; + /// List of sequences that the model will use to stop generating further tokens. /// +/// ## Notes +/// /// # Examples /// -/// - `forest` -/// - `lived` +/// - `[ +/// "forest", +/// "lived", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_REQUEST_STOP_SEQUENCES: &str = "gen_ai.request.stop_sequences"; + /// The temperature setting for the GenAI request. /// +/// ## Notes +/// /// # Examples /// /// - `0.0` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_REQUEST_TEMPERATURE: &str = "gen_ai.request.temperature"; + /// The top_k sampling setting for the GenAI request. /// +/// ## Notes +/// /// # Examples /// /// - `1.0` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_REQUEST_TOP_K: &str = "gen_ai.request.top_k"; + /// The top_p sampling setting for the GenAI request. /// +/// ## Notes +/// /// # Examples /// /// - `1.0` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_REQUEST_TOP_P: &str = "gen_ai.request.top_p"; + /// Array of reasons the model stopped generating tokens, corresponding to each generation received. /// +/// ## Notes +/// /// # Examples /// -/// - `stop` +/// - `[ +/// "stop", +/// ]` +/// - `[ +/// "stop", +/// "length", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_RESPONSE_FINISH_REASONS: &str = "gen_ai.response.finish_reasons"; + /// The unique identifier for the completion. /// +/// ## Notes +/// /// # Examples /// -/// - `chatcmpl-123` +/// - `"chatcmpl-123"` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_RESPONSE_ID: &str = "gen_ai.response.id"; + /// The name of the model that generated the response. /// +/// ## Notes +/// /// # Examples /// -/// - `gpt-4-0613` +/// - `"gpt-4-0613"` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_RESPONSE_MODEL: &str = "gen_ai.response.model"; + /// The Generative AI product as identified by the client or server instrumentation. /// +/// ## Notes +/// /// The `gen_ai.system` describes a family of GenAI models with specific model identified /// by `gen_ai.request.model` and `gen_ai.response.model` attributes. /// /// The actual GenAI product may differ from the one identified by the client. /// For example, when using OpenAI client libraries to communicate with Mistral, the `gen_ai.system` -/// is set to `openai` based on the instrumentation's best knowledge. +/// is set to `openai` based on the instrumentation's best knowledge. /// /// For custom model, a custom friendly name SHOULD be used. /// If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER`. /// /// # Examples /// -/// - `openai` +/// - `"openai"` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_SYSTEM: &str = "gen_ai.system"; + /// The type of token being counted. /// +/// ## Notes +/// /// # Examples /// -/// - `input` -/// - `output` +/// - `"input"` +/// - `"output"` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_TOKEN_TYPE: &str = "gen_ai.token.type"; + /// Deprecated, use `gen_ai.usage.output_tokens` instead. /// +/// ## Notes +/// /// # Examples /// /// - `42` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `gen_ai.usage.output_tokens` attribute.")] pub const GEN_AI_USAGE_COMPLETION_TOKENS: &str = "gen_ai.usage.completion_tokens"; + /// The number of tokens used in the GenAI input (prompt). /// +/// ## Notes +/// /// # Examples /// /// - `100` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_USAGE_INPUT_TOKENS: &str = "gen_ai.usage.input_tokens"; + /// The number of tokens used in the GenAI response (completion). /// +/// ## Notes +/// /// # Examples /// /// - `180` +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_USAGE_OUTPUT_TOKENS: &str = "gen_ai.usage.output_tokens"; + /// Deprecated, use `gen_ai.usage.input_tokens` instead. /// +/// ## Notes +/// /// # Examples /// /// - `42` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `gen_ai.usage.input_tokens` attribute.")] pub const GEN_AI_USAGE_PROMPT_TOKENS: &str = "gen_ai.usage.prompt_tokens"; + +/// Two-letter code representing continent’s name. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const GEO_CONTINENT_CODE: &str = "geo.continent.code"; + +/// Two-letter ISO Country Code ([ISO 3166-1 alpha2](https://wikipedia.org/wiki/ISO_3166-1#Codes)). +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"CA"` +#[cfg(feature = "semconv_experimental")] +pub const GEO_COUNTRY_ISO_CODE: &str = "geo.country.iso_code"; + +/// Locality name. Represents the name of a city, town, village, or similar populated place. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Montreal"` +/// - `"Berlin"` +#[cfg(feature = "semconv_experimental")] +pub const GEO_LOCALITY_NAME: &str = "geo.locality.name"; + +/// Latitude of the geo location in [WGS84](https://wikipedia.org/wiki/World_Geodetic_System#WGS84). +/// +/// ## Notes +/// +/// # Examples +/// +/// - `45.505918` +#[cfg(feature = "semconv_experimental")] +pub const GEO_LOCATION_LAT: &str = "geo.location.lat"; + +/// Longitude of the geo location in [WGS84](https://wikipedia.org/wiki/World_Geodetic_System#WGS84). +/// +/// ## Notes +/// +/// # Examples +/// +/// - `-73.61483` +#[cfg(feature = "semconv_experimental")] +pub const GEO_LOCATION_LON: &str = "geo.location.lon"; + +/// Postal code associated with the location. Values appropriate for this field may also be known as a postcode or ZIP code and will vary widely from country to country. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"94040"` +#[cfg(feature = "semconv_experimental")] +pub const GEO_POSTAL_CODE: &str = "geo.postal_code"; + +/// Region ISO code ([ISO 3166-2](https://wikipedia.org/wiki/ISO_3166-2)). +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"CA-QC"` +#[cfg(feature = "semconv_experimental")] +pub const GEO_REGION_ISO_CODE: &str = "geo.region.iso_code"; + /// The type of memory. /// +/// ## Notes +/// /// # Examples /// -/// - `other` -/// - `stack` +/// - `"other"` +/// - `"stack"` +#[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_TYPE: &str = "go.memory.type"; + /// The GraphQL document being executed. /// +/// ## Notes +/// /// The value may be sanitized to exclude sensitive information. /// /// # Examples /// -/// - `query findBookById { bookById(id: ?) { name } }` +/// - `"query findBookById { bookById(id: ?) { name } }"` +#[cfg(feature = "semconv_experimental")] pub const GRAPHQL_DOCUMENT: &str = "graphql.document"; + /// The name of the operation being executed. /// +/// ## Notes +/// /// # Examples /// -/// - `findBookById` +/// - `"findBookById"` +#[cfg(feature = "semconv_experimental")] pub const GRAPHQL_OPERATION_NAME: &str = "graphql.operation.name"; + /// The type of the operation being executed. /// +/// ## Notes +/// /// # Examples /// -/// - `query` -/// - `mutation` -/// - `subscription` +/// - `"query"` +/// - `"mutation"` +/// - `"subscription"` +#[cfg(feature = "semconv_experimental")] pub const GRAPHQL_OPERATION_TYPE: &str = "graphql.operation.type"; -/// Unique identifier for the application. + +/// Unique identifier for the application +/// +/// ## Notes /// /// # Examples /// -/// - `2daa2797-e42b-4624-9322-ec3f968df4da` +/// - `"2daa2797-e42b-4624-9322-ec3f968df4da"` +#[cfg(feature = "semconv_experimental")] pub const HEROKU_APP_ID: &str = "heroku.app.id"; -/// Commit hash for the current release. + +/// Commit hash for the current release +/// +/// ## Notes /// /// # Examples /// -/// - `e6134959463efd8966b20e75b913cafe3f5ec` +/// - `"e6134959463efd8966b20e75b913cafe3f5ec"` +#[cfg(feature = "semconv_experimental")] pub const HEROKU_RELEASE_COMMIT: &str = "heroku.release.commit"; -/// Time and date the release was created. + +/// Time and date the release was created +/// +/// ## Notes /// /// # Examples /// -/// - `2022-10-23T18:00:42Z` +/// - `"2022-10-23T18:00:42Z"` +#[cfg(feature = "semconv_experimental")] pub const HEROKU_RELEASE_CREATION_TIMESTAMP: &str = "heroku.release.creation_timestamp"; + /// The CPU architecture the host system is running on. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const HOST_ARCH: &str = "host.arch"; + /// The amount of level 2 memory cache available to the processor (in Bytes). /// +/// ## Notes +/// /// # Examples /// /// - `12288000` +#[cfg(feature = "semconv_experimental")] pub const HOST_CPU_CACHE_L2_SIZE: &str = "host.cpu.cache.l2.size"; + /// Family or generation of the CPU. /// +/// ## Notes +/// /// # Examples /// -/// - `6` -/// - `PA-RISC 1.1e` +/// - `"6"` +/// - `"PA-RISC 1.1e"` +#[cfg(feature = "semconv_experimental")] pub const HOST_CPU_FAMILY: &str = "host.cpu.family"; + /// Model identifier. It provides more granular information about the CPU, distinguishing it from other CPUs within the same family. /// +/// ## Notes +/// /// # Examples /// -/// - `6` -/// - `9000/778/B180L` +/// - `"6"` +/// - `"9000/778/B180L"` +#[cfg(feature = "semconv_experimental")] pub const HOST_CPU_MODEL_ID: &str = "host.cpu.model.id"; + /// Model designation of the processor. /// +/// ## Notes +/// /// # Examples /// -/// - `11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz` +/// - `"11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz"` +#[cfg(feature = "semconv_experimental")] pub const HOST_CPU_MODEL_NAME: &str = "host.cpu.model.name"; + /// Stepping or core revisions. /// +/// ## Notes +/// /// # Examples /// -/// - `1` -/// - `r1p1` +/// - `"1"` +/// - `"r1p1"` +#[cfg(feature = "semconv_experimental")] pub const HOST_CPU_STEPPING: &str = "host.cpu.stepping"; + /// Processor manufacturer identifier. A maximum 12-character string. /// +/// ## Notes +/// /// [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor ID string in EBX, EDX and ECX registers. Writing these to memory in this order results in a 12-character string. /// /// # Examples /// -/// - `GenuineIntel` +/// - `"GenuineIntel"` +#[cfg(feature = "semconv_experimental")] pub const HOST_CPU_VENDOR_ID: &str = "host.cpu.vendor.id"; + /// Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider. For non-containerized systems, this should be the `machine-id`. See the table below for the sources to use to determine the `machine-id` based on operating system. /// +/// ## Notes +/// /// # Examples /// -/// - `fdbf79e8af94cb7f9e8df36789187052` +/// - `"fdbf79e8af94cb7f9e8df36789187052"` +#[cfg(feature = "semconv_experimental")] pub const HOST_ID: &str = "host.id"; + /// VM image ID or host OS image ID. For Cloud, this value is from the provider. /// +/// ## Notes +/// /// # Examples /// -/// - `ami-07b06b442921831e5` +/// - `"ami-07b06b442921831e5"` +#[cfg(feature = "semconv_experimental")] pub const HOST_IMAGE_ID: &str = "host.image.id"; + /// Name of the VM image or OS install the host was instantiated from. /// +/// ## Notes +/// /// # Examples /// -/// - `infra-ami-eks-worker-node-7d4ec78312` -/// - `CentOS-8-x86_64-1905` +/// - `"infra-ami-eks-worker-node-7d4ec78312"` +/// - `"CentOS-8-x86_64-1905"` +#[cfg(feature = "semconv_experimental")] pub const HOST_IMAGE_NAME: &str = "host.image.name"; + /// The version string of the VM image or host OS as defined in [Version Attributes](/docs/resource/README.md#version-attributes). /// +/// ## Notes +/// /// # Examples /// -/// - `0.1` +/// - `"0.1"` +#[cfg(feature = "semconv_experimental")] pub const HOST_IMAGE_VERSION: &str = "host.image.version"; + /// Available IP addresses of the host, excluding loopback interfaces. /// +/// ## Notes +/// /// IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses MUST be specified in the [RFC 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. /// /// # Examples /// -/// - `192.168.1.140` -/// - `fe80::abc2:4a28:737a:609e` +/// - `[ +/// "192.168.1.140", +/// "fe80::abc2:4a28:737a:609e", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const HOST_IP: &str = "host.ip"; + /// Available MAC addresses of the host, excluding loopback interfaces. /// +/// ## Notes +/// /// MAC Addresses MUST be represented in [IEEE RA hexadecimal form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): as hyphen-separated octets in uppercase hexadecimal form from most to least significant. /// /// # Examples /// -/// - `AC-DE-48-23-45-67` -/// - `AC-DE-48-23-45-67-01-9F` +/// - `[ +/// "AC-DE-48-23-45-67", +/// "AC-DE-48-23-45-67-01-9F", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const HOST_MAC: &str = "host.mac"; + /// Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry-test` +/// - `"opentelemetry-test"` +#[cfg(feature = "semconv_experimental")] pub const HOST_NAME: &str = "host.name"; + /// Type of host. For Cloud, this must be the machine type. /// +/// ## Notes +/// /// # Examples /// -/// - `n1-standard-1` +/// - `"n1-standard-1"` +#[cfg(feature = "semconv_experimental")] pub const HOST_TYPE: &str = "host.type"; + /// Deprecated, use `client.address` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `83.164.160.102` -#[deprecated] +/// - `"83.164.160.102"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `client.address`.")] pub const HTTP_CLIENT_IP: &str = "http.client_ip"; + /// State of the HTTP connection in the HTTP connection pool. /// +/// ## Notes +/// /// # Examples /// -/// - `active` -/// - `idle` +/// - `"active"` +/// - `"idle"` +#[cfg(feature = "semconv_experimental")] pub const HTTP_CONNECTION_STATE: &str = "http.connection.state"; + /// Deprecated, use `network.protocol.name` instead. -#[deprecated] +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.protocol.name`.")] pub const HTTP_FLAVOR: &str = "http.flavor"; + /// Deprecated, use one of `server.address`, `client.address` or `http.request.header.host` instead, depending on the usage. /// +/// ## Notes +/// /// # Examples /// -/// - `www.example.org` -#[deprecated] +/// - `"www.example.org"` +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "Replaced by one of `server.address`, `client.address` or `http.request.header.host`, depending on the usage." +)] pub const HTTP_HOST: &str = "http.host"; + /// Deprecated, use `http.request.method` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `GET` -/// - `POST` -/// - `HEAD` -#[deprecated] +/// - `"GET"` +/// - `"POST"` +/// - `"HEAD"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `http.request.method`.")] pub const HTTP_METHOD: &str = "http.method"; + /// The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. /// +/// ## Notes +/// /// # Examples /// /// - `3495` +#[cfg(feature = "semconv_experimental")] pub const HTTP_REQUEST_BODY_SIZE: &str = "http.request.body.size"; -/// Deprecated, use `http.request.header.content-length` instead. + +/// HTTP request headers, `` being the normalized HTTP Header name (lowercase), the value being the header values. /// -/// # Examples +/// ## Notes /// -/// - `3495` -#[deprecated] -pub const HTTP_REQUEST_CONTENT_LENGTH: &str = "http.request_content_length"; -/// Deprecated, use `http.request.body.size` instead. +/// Instrumentations SHOULD require an explicit configuration of which headers are to be captured. Including all request headers can be a security risk - explicit configuration helps avoid leaking sensitive information. +/// The `User-Agent` header is already captured in the `user_agent.original` attribute. Users MAY explicitly configure instrumentations to capture them even though it is not recommended. +/// The attribute value MUST consist of either multiple header values as an array of strings or a single-item array containing a possibly comma-concatenated string, depending on the way the HTTP library provides access to headers. /// /// # Examples /// -/// - `5493` -#[deprecated] -pub const HTTP_REQUEST_CONTENT_LENGTH_UNCOMPRESSED: &str = - "http.request_content_length_uncompressed"; +/// - `"http.request.header.content-type=[\"application/json\"]"` +/// - `"http.request.header.x-forwarded-for=[\"1.2.3.4\", \"1.2.3.5\"]"` +pub const HTTP_REQUEST_HEADER: &str = "http.request.header"; + /// HTTP request method. /// -/// HTTP request method value SHOULD be "known" to the instrumentation. -/// By default, this convention defines "known" methods as the ones listed in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) +/// ## Notes +/// +/// HTTP request method value SHOULD be "known" to the instrumentation. +/// By default, this convention defines "known" methods as the ones listed in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) /// and the PATCH method defined in [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). /// /// If the HTTP request method is not known to instrumentation, it MUST set the `http.request.method` attribute to `_OTHER`. @@ -1777,186 +3487,356 @@ pub const HTTP_REQUEST_CONTENT_LENGTH_UNCOMPRESSED: &str = /// /// # Examples /// -/// - `GET` -/// - `POST` -/// - `HEAD` +/// - `"GET"` +/// - `"POST"` +/// - `"HEAD"` pub const HTTP_REQUEST_METHOD: &str = "http.request.method"; + /// Original HTTP method sent by the client in the request line. /// +/// ## Notes +/// /// # Examples /// -/// - `GeT` -/// - `ACL` -/// - `foo` +/// - `"GeT"` +/// - `"ACL"` +/// - `"foo"` pub const HTTP_REQUEST_METHOD_ORIGINAL: &str = "http.request.method_original"; + /// The ordinal number of request resending attempt (for any reason, including redirects). /// +/// ## Notes +/// /// The resend count SHOULD be updated each time an HTTP request gets resent by the client, regardless of what was the cause of the resending (e.g. redirection, authorization failure, 503 Server Unavailable, network issues, or any other). /// /// # Examples /// /// - `3` pub const HTTP_REQUEST_RESEND_COUNT: &str = "http.request.resend_count"; + /// The total size of the request in bytes. This should be the total number of bytes sent over the wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request body if any. /// +/// ## Notes +/// /// # Examples /// /// - `1437` +#[cfg(feature = "semconv_experimental")] pub const HTTP_REQUEST_SIZE: &str = "http.request.size"; -/// The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. + +/// Deprecated, use `http.request.header.` instead. +/// +/// ## Notes /// /// # Examples /// /// - `3495` -pub const HTTP_RESPONSE_BODY_SIZE: &str = "http.response.body.size"; -/// Deprecated, use `http.response.header.content-length` instead. +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `http.request.header.`.")] +pub const HTTP_REQUEST_CONTENT_LENGTH: &str = "http.request_content_length"; + +/// Deprecated, use `http.request.body.size` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `5493` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `http.request.body.size`.")] +pub const HTTP_REQUEST_CONTENT_LENGTH_UNCOMPRESSED: &str = + "http.request_content_length_uncompressed"; + +/// The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +/// +/// ## Notes /// /// # Examples /// /// - `3495` -#[deprecated] -pub const HTTP_RESPONSE_CONTENT_LENGTH: &str = "http.response_content_length"; -/// Deprecated, use `http.response.body.size` instead. +#[cfg(feature = "semconv_experimental")] +pub const HTTP_RESPONSE_BODY_SIZE: &str = "http.response.body.size"; + +/// HTTP response headers, `` being the normalized HTTP Header name (lowercase), the value being the header values. +/// +/// ## Notes +/// +/// Instrumentations SHOULD require an explicit configuration of which headers are to be captured. Including all response headers can be a security risk - explicit configuration helps avoid leaking sensitive information. +/// Users MAY explicitly configure instrumentations to capture them even though it is not recommended. +/// The attribute value MUST consist of either multiple header values as an array of strings or a single-item array containing a possibly comma-concatenated string, depending on the way the HTTP library provides access to headers. /// /// # Examples /// -/// - `5493` -#[deprecated] -pub const HTTP_RESPONSE_CONTENT_LENGTH_UNCOMPRESSED: &str = - "http.response_content_length_uncompressed"; +/// - `"http.response.header.content-type=[\"application/json\"]"` +/// - `"http.response.header.my-custom-header=[\"abc\", \"def\"]"` +pub const HTTP_RESPONSE_HEADER: &str = "http.response.header"; + /// The total size of the response in bytes. This should be the total number of bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and response body and trailers if any. /// +/// ## Notes +/// /// # Examples /// /// - `1437` +#[cfg(feature = "semconv_experimental")] pub const HTTP_RESPONSE_SIZE: &str = "http.response.size"; + /// [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). /// +/// ## Notes +/// /// # Examples /// /// - `200` pub const HTTP_RESPONSE_STATUS_CODE: &str = "http.response.status_code"; + +/// Deprecated, use `http.response.header.` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `3495` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `http.response.header.`.")] +pub const HTTP_RESPONSE_CONTENT_LENGTH: &str = "http.response_content_length"; + +/// Deprecated, use `http.response.body.size` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `5493` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replace by `http.response.body.size`.")] +pub const HTTP_RESPONSE_CONTENT_LENGTH_UNCOMPRESSED: &str = + "http.response_content_length_uncompressed"; + /// The matched route, that is, the path template in the format used by the respective server framework. /// +/// ## Notes +/// /// MUST NOT be populated when this is not supported by the HTTP server framework as the route attribute should have low-cardinality and the URI path can NOT substitute it. /// SHOULD include the [application root](/docs/http/http-spans.md#http-server-definitions) if there is one. /// /// # Examples /// -/// - `/users/:userID?` -/// - `{controller}/{action}/{id?}` +/// - `"/users/:userID?"` +/// - `"{controller}/{action}/{id?}"` pub const HTTP_ROUTE: &str = "http.route"; + /// Deprecated, use `url.scheme` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `http` -/// - `https` -#[deprecated] +/// - `"http"` +/// - `"https"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `url.scheme` instead.")] pub const HTTP_SCHEME: &str = "http.scheme"; + /// Deprecated, use `server.address` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `example.com` -#[deprecated] +/// - `"example.com"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `server.address`.")] pub const HTTP_SERVER_NAME: &str = "http.server_name"; + /// Deprecated, use `http.response.status_code` instead. /// +/// ## Notes +/// /// # Examples /// /// - `200` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `http.response.status_code`.")] pub const HTTP_STATUS_CODE: &str = "http.status_code"; + /// Deprecated, use `url.path` and `url.query` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `/search?q=OpenTelemetry#SemConv` -#[deprecated] +/// - `"/search?q=OpenTelemetry#SemConv"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Split to `url.path` and `url.query.")] pub const HTTP_TARGET: &str = "http.target"; + /// Deprecated, use `url.full` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `https://www.foo.bar/search?q=OpenTelemetry#SemConv` -#[deprecated] +/// - `"https://www.foo.bar/search?q=OpenTelemetry#SemConv"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `url.full`.")] pub const HTTP_URL: &str = "http.url"; + /// Deprecated, use `user_agent.original` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `CERN-LineMode/2.15 libwww/2.17b3` -/// - `Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Mobile/15E148 Safari/604.1` -#[deprecated] +/// - `"CERN-LineMode/2.15 libwww/2.17b3"` +/// - `"Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Mobile/15E148 Safari/604.1"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `user_agent.original`.")] pub const HTTP_USER_AGENT: &str = "http.user_agent"; + +/// An identifier for the hardware component, unique within the monitored host +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"win32battery_battery_testsysa33_1"` +#[cfg(feature = "semconv_experimental")] +pub const HW_ID: &str = "hw.id"; + +/// An easily-recognizable name for the hardware component +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"eth0"` +#[cfg(feature = "semconv_experimental")] +pub const HW_NAME: &str = "hw.name"; + +/// Unique identifier of the parent component (typically the `hw.id` attribute of the enclosure, or disk controller) +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"dellStorage_perc_0"` +#[cfg(feature = "semconv_experimental")] +pub const HW_PARENT: &str = "hw.parent"; + +/// The current state of the component +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const HW_STATE: &str = "hw.state"; + +/// Type of the component +/// +/// ## Notes +/// +/// Describes the category of the hardware component for which `hw.state` is being reported. For example, `hw.type=temperature` along with `hw.state=degraded` would indicate that the temperature of the hardware component has been reported as `degraded` +#[cfg(feature = "semconv_experimental")] +pub const HW_TYPE: &str = "hw.type"; + /// Deprecated use the `device.app.lifecycle` event definition including `ios.state` as a payload field instead. /// -/// The iOS lifecycle states are defined in the [UIApplicationDelegate documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), and from which the `OS terminology` column values are derived. -#[deprecated] +/// ## Notes +/// +/// The iOS lifecycle states are defined in the [UIApplicationDelegate documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), and from which the `OS terminology` column values are derived +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Moved to a payload field of `device.app.lifecycle`.")] pub const IOS_STATE: &str = "ios.state"; + /// Name of the buffer pool. /// +/// ## Notes +/// /// Pool names are generally obtained via [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). /// /// # Examples /// -/// - `mapped` -/// - `direct` +/// - `"mapped"` +/// - `"direct"` +#[cfg(feature = "semconv_experimental")] pub const JVM_BUFFER_POOL_NAME: &str = "jvm.buffer.pool.name"; + /// Name of the garbage collector action. /// +/// ## Notes +/// /// Garbage collector action is generally obtained via [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). /// /// # Examples /// -/// - `end of minor GC` -/// - `end of major GC` +/// - `"end of minor GC"` +/// - `"end of major GC"` pub const JVM_GC_ACTION: &str = "jvm.gc.action"; + /// Name of the garbage collector. /// +/// ## Notes +/// /// Garbage collector name is generally obtained via [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). /// /// # Examples /// -/// - `G1 Young Generation` -/// - `G1 Old Generation` +/// - `"G1 Young Generation"` +/// - `"G1 Old Generation"` pub const JVM_GC_NAME: &str = "jvm.gc.name"; + /// Name of the memory pool. /// +/// ## Notes +/// /// Pool names are generally obtained via [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). /// /// # Examples /// -/// - `G1 Old Gen` -/// - `G1 Eden space` -/// - `G1 Survivor Space` +/// - `"G1 Old Gen"` +/// - `"G1 Eden space"` +/// - `"G1 Survivor Space"` pub const JVM_MEMORY_POOL_NAME: &str = "jvm.memory.pool.name"; + /// The type of memory. /// +/// ## Notes +/// /// # Examples /// -/// - `heap` -/// - `non_heap` +/// - `"heap"` +/// - `"non_heap"` pub const JVM_MEMORY_TYPE: &str = "jvm.memory.type"; + /// Whether the thread is daemon or not. +/// +/// ## Notes pub const JVM_THREAD_DAEMON: &str = "jvm.thread.daemon"; + /// State of the thread. /// +/// ## Notes +/// /// # Examples /// -/// - `runnable` -/// - `blocked` +/// - `"runnable"` +/// - `"blocked"` pub const JVM_THREAD_STATE: &str = "jvm.thread.state"; + /// The name of the cluster. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry-cluster` +/// - `"opentelemetry-cluster"` +#[cfg(feature = "semconv_experimental")] pub const K8S_CLUSTER_NAME: &str = "k8s.cluster.name"; + /// A pseudo-ID for the cluster, set to the UID of the `kube-system` namespace. /// -/// K8s doesn't have support for obtaining a cluster ID. If this is ever +/// ## Notes +/// +/// K8s doesn't have support for obtaining a cluster ID. If this is ever /// added, we will recommend collecting the `k8s.cluster.uid` through the /// official APIs. In the meantime, we are able to use the `uid` of the /// `kube-system` namespace as a proxy for cluster ID. Read on for the @@ -1971,201 +3851,389 @@ pub const K8S_CLUSTER_NAME: &str = "k8s.cluster.name"; /// [ISO/IEC 9834-8 and ITU-T X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). /// Which states: /// -/// > If generated according to one of the mechanisms defined in Rec. -/// ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be -/// different from all other UUIDs generated before 3603 A.D., or is -/// extremely likely to be different (depending on the mechanism chosen). +/// \] If generated according to one of the mechanisms defined in Rec. +/// \] ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be +/// \] different from all other UUIDs generated before 3603 A.D., or is +/// \] extremely likely to be different (depending on the mechanism chosen). /// /// Therefore, UIDs between clusters should be extremely unlikely to /// conflict. /// /// # Examples /// -/// - `218fc5a9-a5f1-4b54-aa05-46717d0ab26d` +/// - `"218fc5a9-a5f1-4b54-aa05-46717d0ab26d"` +#[cfg(feature = "semconv_experimental")] pub const K8S_CLUSTER_UID: &str = "k8s.cluster.uid"; + /// The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`). /// +/// ## Notes +/// /// # Examples /// -/// - `redis` +/// - `"redis"` +#[cfg(feature = "semconv_experimental")] pub const K8S_CONTAINER_NAME: &str = "k8s.container.name"; + /// Number of times the container was restarted. This attribute can be used to identify a particular container (running or stopped) within a container spec. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const K8S_CONTAINER_RESTART_COUNT: &str = "k8s.container.restart_count"; + /// Last terminated reason of the Container. /// +/// ## Notes +/// /// # Examples /// -/// - `Evicted` -/// - `Error` +/// - `"Evicted"` +/// - `"Error"` +#[cfg(feature = "semconv_experimental")] pub const K8S_CONTAINER_STATUS_LAST_TERMINATED_REASON: &str = "k8s.container.status.last_terminated_reason"; + /// The name of the CronJob. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry` +/// - `"opentelemetry"` +#[cfg(feature = "semconv_experimental")] pub const K8S_CRONJOB_NAME: &str = "k8s.cronjob.name"; + /// The UID of the CronJob. /// +/// ## Notes +/// /// # Examples /// -/// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` +/// - `"275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"` +#[cfg(feature = "semconv_experimental")] pub const K8S_CRONJOB_UID: &str = "k8s.cronjob.uid"; + /// The name of the DaemonSet. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry` +/// - `"opentelemetry"` +#[cfg(feature = "semconv_experimental")] pub const K8S_DAEMONSET_NAME: &str = "k8s.daemonset.name"; + /// The UID of the DaemonSet. /// +/// ## Notes +/// /// # Examples /// -/// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` +/// - `"275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"` +#[cfg(feature = "semconv_experimental")] pub const K8S_DAEMONSET_UID: &str = "k8s.daemonset.uid"; + /// The name of the Deployment. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry` +/// - `"opentelemetry"` +#[cfg(feature = "semconv_experimental")] pub const K8S_DEPLOYMENT_NAME: &str = "k8s.deployment.name"; + /// The UID of the Deployment. /// +/// ## Notes +/// /// # Examples /// -/// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` +/// - `"275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"` +#[cfg(feature = "semconv_experimental")] pub const K8S_DEPLOYMENT_UID: &str = "k8s.deployment.uid"; + /// The name of the Job. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry` +/// - `"opentelemetry"` +#[cfg(feature = "semconv_experimental")] pub const K8S_JOB_NAME: &str = "k8s.job.name"; + /// The UID of the Job. /// +/// ## Notes +/// /// # Examples /// -/// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` +/// - `"275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"` +#[cfg(feature = "semconv_experimental")] pub const K8S_JOB_UID: &str = "k8s.job.uid"; + /// The name of the namespace that the pod is running in. /// +/// ## Notes +/// /// # Examples /// -/// - `default` +/// - `"default"` +#[cfg(feature = "semconv_experimental")] pub const K8S_NAMESPACE_NAME: &str = "k8s.namespace.name"; + /// The name of the Node. /// +/// ## Notes +/// /// # Examples /// -/// - `node-1` +/// - `"node-1"` +#[cfg(feature = "semconv_experimental")] pub const K8S_NODE_NAME: &str = "k8s.node.name"; + /// The UID of the Node. /// +/// ## Notes +/// /// # Examples /// -/// - `1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2` +/// - `"1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2"` +#[cfg(feature = "semconv_experimental")] pub const K8S_NODE_UID: &str = "k8s.node.uid"; + +/// The annotation key-value pairs placed on the Pod, the `` being the annotation name, the value being the annotation value. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"k8s.pod.annotation.kubernetes.io/enforce-mountable-secrets=true"` +/// - `"k8s.pod.annotation.mycompany.io/arch=x64"` +/// - `"k8s.pod.annotation.data="` +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_ANNOTATION: &str = "k8s.pod.annotation"; + +/// The label key-value pairs placed on the Pod, the `` being the label name, the value being the label value. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"k8s.pod.label.app=my-app"` +/// - `"k8s.pod.label.mycompany.io/arch=x64"` +/// - `"k8s.pod.label.data="` +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_LABEL: &str = "k8s.pod.label"; + +/// Deprecated, use `k8s.pod.label` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"k8s.pod.label.app=my-app"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `k8s.pod.label`.")] +pub const K8S_POD_LABELS: &str = "k8s.pod.labels"; + /// The name of the Pod. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry-pod-autoconf` +/// - `"opentelemetry-pod-autoconf"` +#[cfg(feature = "semconv_experimental")] pub const K8S_POD_NAME: &str = "k8s.pod.name"; + /// The UID of the Pod. /// +/// ## Notes +/// /// # Examples /// -/// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` +/// - `"275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"` +#[cfg(feature = "semconv_experimental")] pub const K8S_POD_UID: &str = "k8s.pod.uid"; + /// The name of the ReplicaSet. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry` +/// - `"opentelemetry"` +#[cfg(feature = "semconv_experimental")] pub const K8S_REPLICASET_NAME: &str = "k8s.replicaset.name"; + /// The UID of the ReplicaSet. /// +/// ## Notes +/// /// # Examples /// -/// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` +/// - `"275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"` +#[cfg(feature = "semconv_experimental")] pub const K8S_REPLICASET_UID: &str = "k8s.replicaset.uid"; + /// The name of the StatefulSet. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry` +/// - `"opentelemetry"` +#[cfg(feature = "semconv_experimental")] pub const K8S_STATEFULSET_NAME: &str = "k8s.statefulset.name"; + /// The UID of the StatefulSet. /// +/// ## Notes +/// /// # Examples /// -/// - `275ecb36-5aa8-4c2a-9c47-d8bb681b9aff` +/// - `"275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"` +#[cfg(feature = "semconv_experimental")] pub const K8S_STATEFULSET_UID: &str = "k8s.statefulset.uid"; -/// The Linux Slab memory state. + +/// The name of the K8s volume. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"volume0"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_VOLUME_NAME: &str = "k8s.volume.name"; + +/// The type of the K8s volume. +/// +/// ## Notes /// /// # Examples /// -/// - `reclaimable` -/// - `unreclaimable` +/// - `"emptyDir"` +/// - `"persistentVolumeClaim"` +#[cfg(feature = "semconv_experimental")] +pub const K8S_VOLUME_TYPE: &str = "k8s.volume.type"; + +/// The Linux Slab memory state +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"reclaimable"` +/// - `"unreclaimable"` +#[cfg(feature = "semconv_experimental")] pub const LINUX_MEMORY_SLAB_STATE: &str = "linux.memory.slab.state"; + /// The basename of the file. /// +/// ## Notes +/// /// # Examples /// -/// - `audit.log` +/// - `"audit.log"` +#[cfg(feature = "semconv_experimental")] pub const LOG_FILE_NAME: &str = "log.file.name"; + /// The basename of the file, with symlinks resolved. /// +/// ## Notes +/// /// # Examples /// -/// - `uuid.log` +/// - `"uuid.log"` +#[cfg(feature = "semconv_experimental")] pub const LOG_FILE_NAME_RESOLVED: &str = "log.file.name_resolved"; + /// The full path to the file. /// +/// ## Notes +/// /// # Examples /// -/// - `/var/log/mysql/audit.log` +/// - `"/var/log/mysql/audit.log"` +#[cfg(feature = "semconv_experimental")] pub const LOG_FILE_PATH: &str = "log.file.path"; + /// The full path to the file, with symlinks resolved. /// +/// ## Notes +/// /// # Examples /// -/// - `/var/lib/docker/uuid.log` +/// - `"/var/lib/docker/uuid.log"` +#[cfg(feature = "semconv_experimental")] pub const LOG_FILE_PATH_RESOLVED: &str = "log.file.path_resolved"; + /// The stream associated with the log. See below for a list of well-known values. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const LOG_IOSTREAM: &str = "log.iostream"; -/// The complete orignal Log Record. + +/// The complete original Log Record. +/// +/// ## Notes /// /// This value MAY be added when processing a Log Record which was originally transmitted as a string or equivalent data type AND the Body field of the Log Record does not contain the same value. (e.g. a syslog or a log record read from a file.) /// /// # Examples /// -/// - `77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - Something happened` -/// - `[INFO] 8/3/24 12:34:56 Something happened` +/// - `"77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - Something happened"` +/// - `"[INFO] 8/3/24 12:34:56 Something happened"` +#[cfg(feature = "semconv_experimental")] pub const LOG_RECORD_ORIGINAL: &str = "log.record.original"; + /// A unique identifier for the Log Record. /// +/// ## Notes +/// /// If an id is provided, other log records with the same id will be considered duplicates and can be removed safely. This means, that two distinguishable log records MUST have different values. /// The id MAY be an [Universally Unique Lexicographically Sortable Identifier (ULID)](https://github.com/ulid/spec), but other identifiers (e.g. UUID) may be used as needed. /// /// # Examples /// -/// - `01ARZ3NDEKTSV4RRFFQ69G5FAV` +/// - `"01ARZ3NDEKTSV4RRFFQ69G5FAV"` +#[cfg(feature = "semconv_experimental")] pub const LOG_RECORD_UID: &str = "log.record.uid"; + /// Deprecated, use `rpc.message.compressed_size` instead. -#[deprecated] +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `rpc.message.compressed_size`.")] pub const MESSAGE_COMPRESSED_SIZE: &str = "message.compressed_size"; + /// Deprecated, use `rpc.message.id` instead. -#[deprecated] +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `rpc.message.id`.")] pub const MESSAGE_ID: &str = "message.id"; + /// Deprecated, use `rpc.message.type` instead. -#[deprecated] +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `rpc.message.type`.")] pub const MESSAGE_TYPE: &str = "message.type"; + /// Deprecated, use `rpc.message.uncompressed_size` instead. -#[deprecated] +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `rpc.message.uncompressed_size`.")] pub const MESSAGE_UNCOMPRESSED_SIZE: &str = "message.uncompressed_size"; + /// The number of messages sent, received, or processed in the scope of the batching operation. /// +/// ## Notes +/// /// Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans that operate with a single message. When a messaging client library supports both batch and single-message API for the same operation, instrumentations SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use it for single-message APIs. /// /// # Examples @@ -2173,481 +4241,783 @@ pub const MESSAGE_UNCOMPRESSED_SIZE: &str = "message.uncompressed_size"; /// - `0` /// - `1` /// - `2` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_BATCH_MESSAGE_COUNT: &str = "messaging.batch.message_count"; + /// A unique identifier for the client that consumes or produces a message. /// +/// ## Notes +/// /// # Examples /// -/// - `client-5` -/// - `myhost@8742@s8083jm` +/// - `"client-5"` +/// - `"myhost@8742@s8083jm"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_CLIENT_ID: &str = "messaging.client.id"; + /// The name of the consumer group with which a consumer is associated. /// +/// ## Notes +/// /// Semantic conventions for individual messaging systems SHOULD document whether `messaging.consumer.group.name` is applicable and what it means in the context of that system. /// /// # Examples /// -/// - `my-group` -/// - `indexer` +/// - `"my-group"` +/// - `"indexer"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_CONSUMER_GROUP_NAME: &str = "messaging.consumer.group.name"; + /// A boolean that is true if the message destination is anonymous (could be unnamed or have auto-generated name). +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_DESTINATION_ANONYMOUS: &str = "messaging.destination.anonymous"; -/// The message destination name. + +/// The message destination name +/// +/// ## Notes /// /// Destination name SHOULD uniquely identify a specific queue, topic or other entity within the broker. If -/// the broker doesn't have such notion, the destination name SHOULD uniquely identify the broker. +/// the broker doesn't have such notion, the destination name SHOULD uniquely identify the broker. /// /// # Examples /// -/// - `MyQueue` -/// - `MyTopic` +/// - `"MyQueue"` +/// - `"MyTopic"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_DESTINATION_NAME: &str = "messaging.destination.name"; + /// The identifier of the partition messages are sent to or received from, unique within the `messaging.destination.name`. /// -/// # Examples -/// -/// - `1` -pub const MESSAGING_DESTINATION_PARTITION_ID: &str = "messaging.destination.partition.id"; -/// Deprecated, no replacement at this time. -#[deprecated] -pub const MESSAGING_DESTINATION_PUBLISH_ANONYMOUS: &str = "messaging.destination_publish.anonymous"; -/// Deprecated, no replacement at this time. +/// ## Notes /// /// # Examples /// -/// - `MyQueue` -/// - `MyTopic` -#[deprecated] -pub const MESSAGING_DESTINATION_PUBLISH_NAME: &str = "messaging.destination_publish.name"; +/// - `"1"` +#[cfg(feature = "semconv_experimental")] +pub const MESSAGING_DESTINATION_PARTITION_ID: &str = "messaging.destination.partition.id"; + /// The name of the destination subscription from which a message is consumed. /// +/// ## Notes +/// /// Semantic conventions for individual messaging systems SHOULD document whether `messaging.destination.subscription.name` is applicable and what it means in the context of that system. /// /// # Examples /// -/// - `subscription-a` +/// - `"subscription-a"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_DESTINATION_SUBSCRIPTION_NAME: &str = "messaging.destination.subscription.name"; -/// Low cardinality representation of the messaging destination name. + +/// Low cardinality representation of the messaging destination name +/// +/// ## Notes /// /// Destination names could be constructed from templates. An example would be a destination name involving a user name or product id. Although the destination name in this case is of high cardinality, the underlying template is of low cardinality and can be effectively used for grouping and aggregation. /// /// # Examples /// -/// - `/customers/{customerId}` +/// - `"/customers/{customerId}"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_DESTINATION_TEMPLATE: &str = "messaging.destination.template"; + /// A boolean that is true if the message destination is temporary and might not exist anymore after messages are processed. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_DESTINATION_TEMPORARY: &str = "messaging.destination.temporary"; + +/// Deprecated, no replacement at this time. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "No replacement at this time.")] +pub const MESSAGING_DESTINATION_PUBLISH_ANONYMOUS: &str = "messaging.destination_publish.anonymous"; + +/// Deprecated, no replacement at this time. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"MyQueue"` +/// - `"MyTopic"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "No replacement at this time.")] +pub const MESSAGING_DESTINATION_PUBLISH_NAME: &str = "messaging.destination_publish.name"; + /// Deprecated, use `messaging.consumer.group.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `$Default` -#[deprecated] +/// - `"$Default"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.consumer.group.name`.")] pub const MESSAGING_EVENTHUBS_CONSUMER_GROUP: &str = "messaging.eventhubs.consumer.group"; + /// The UTC epoch seconds at which the message has been accepted and stored in the entity. /// +/// ## Notes +/// /// # Examples /// /// - `1701393730` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_EVENTHUBS_MESSAGE_ENQUEUED_TIME: &str = "messaging.eventhubs.message.enqueued_time"; + /// The ack deadline in seconds set for the modify ack deadline request. /// +/// ## Notes +/// /// # Examples /// /// - `10` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_GCP_PUBSUB_MESSAGE_ACK_DEADLINE: &str = "messaging.gcp_pubsub.message.ack_deadline"; + /// The ack id for a given message. /// +/// ## Notes +/// /// # Examples /// -/// - `ack_id` +/// - `"ack_id"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_GCP_PUBSUB_MESSAGE_ACK_ID: &str = "messaging.gcp_pubsub.message.ack_id"; + /// The delivery attempt for a given message. /// +/// ## Notes +/// /// # Examples /// /// - `2` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_GCP_PUBSUB_MESSAGE_DELIVERY_ATTEMPT: &str = "messaging.gcp_pubsub.message.delivery_attempt"; + /// The ordering key for a given message. If the attribute is not present, the message does not have an ordering key. /// +/// ## Notes +/// /// # Examples /// -/// - `ordering_key` +/// - `"ordering_key"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_GCP_PUBSUB_MESSAGE_ORDERING_KEY: &str = "messaging.gcp_pubsub.message.ordering_key"; + /// Deprecated, use `messaging.consumer.group.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `my-group` -#[deprecated] +/// - `"my-group"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.consumer.group.name`.")] pub const MESSAGING_KAFKA_CONSUMER_GROUP: &str = "messaging.kafka.consumer.group"; + /// Deprecated, use `messaging.destination.partition.id` instead. /// +/// ## Notes +/// /// # Examples /// /// - `2` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.destination.partition.id`.")] pub const MESSAGING_KAFKA_DESTINATION_PARTITION: &str = "messaging.kafka.destination.partition"; -/// Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message.id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set. + +/// Message keys in Kafka are used for grouping alike messages to ensure they're processed on the same partition. They differ from `messaging.message.id` in that they're not unique. If the key is `null`, the attribute MUST NOT be set. +/// +/// ## Notes /// -/// If the key type is not string, it's string representation has to be supplied for the attribute. If the key has no unambiguous, canonical string form, don't include its value. +/// If the key type is not string, it's string representation has to be supplied for the attribute. If the key has no unambiguous, canonical string form, don't include its value. /// /// # Examples /// -/// - `myKey` +/// - `"myKey"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_KAFKA_MESSAGE_KEY: &str = "messaging.kafka.message.key"; + /// Deprecated, use `messaging.kafka.offset` instead. /// +/// ## Notes +/// /// # Examples /// /// - `42` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.kafka.offset`.")] pub const MESSAGING_KAFKA_MESSAGE_OFFSET: &str = "messaging.kafka.message.offset"; + /// A boolean that is true if the message is a tombstone. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_KAFKA_MESSAGE_TOMBSTONE: &str = "messaging.kafka.message.tombstone"; + /// The offset of a record in the corresponding Kafka partition. /// +/// ## Notes +/// /// # Examples /// /// - `42` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_KAFKA_OFFSET: &str = "messaging.kafka.offset"; + /// The size of the message body in bytes. /// +/// ## Notes +/// /// This can refer to both the compressed or uncompressed body size. If both sizes are known, the uncompressed /// body size should be used. /// /// # Examples /// /// - `1439` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_MESSAGE_BODY_SIZE: &str = "messaging.message.body.size"; -/// The conversation ID identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID". + +/// The conversation ID identifying the conversation to which the message belongs, represented as a string. Sometimes called "Correlation ID". +/// +/// ## Notes /// /// # Examples /// -/// - `MyConversationId` +/// - `"MyConversationId"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_MESSAGE_CONVERSATION_ID: &str = "messaging.message.conversation_id"; + /// The size of the message body and metadata in bytes. /// +/// ## Notes +/// /// This can refer to both the compressed or uncompressed size. If both sizes are known, the uncompressed /// size should be used. /// /// # Examples /// /// - `2738` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_MESSAGE_ENVELOPE_SIZE: &str = "messaging.message.envelope.size"; + /// A value used by the messaging system as an identifier for the message, represented as a string. /// +/// ## Notes +/// /// # Examples /// -/// - `452a7c7c7c7048c2f887f61572b18fc2` +/// - `"452a7c7c7c7048c2f887f61572b18fc2"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_MESSAGE_ID: &str = "messaging.message.id"; + /// Deprecated, use `messaging.operation.type` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `publish` -/// - `create` -/// - `process` -#[deprecated] +/// - `"publish"` +/// - `"create"` +/// - `"process"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.operation.type`.")] pub const MESSAGING_OPERATION: &str = "messaging.operation"; + /// The system-specific name of the messaging operation. /// +/// ## Notes +/// /// # Examples /// -/// - `ack` -/// - `nack` -/// - `send` +/// - `"ack"` +/// - `"nack"` +/// - `"send"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_OPERATION_NAME: &str = "messaging.operation.name"; + /// A string identifying the type of the messaging operation. /// -/// If a custom value is used, it MUST be of low cardinality. +/// ## Notes +/// +/// If a custom value is used, it MUST be of low cardinality +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_OPERATION_TYPE: &str = "messaging.operation.type"; + /// RabbitMQ message routing key. /// +/// ## Notes +/// /// # Examples /// -/// - `myKey` +/// - `"myKey"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_RABBITMQ_DESTINATION_ROUTING_KEY: &str = "messaging.rabbitmq.destination.routing_key"; -/// RabbitMQ message delivery tag. + +/// RabbitMQ message delivery tag +/// +/// ## Notes /// /// # Examples /// /// - `123` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_RABBITMQ_MESSAGE_DELIVERY_TAG: &str = "messaging.rabbitmq.message.delivery_tag"; + /// Deprecated, use `messaging.consumer.group.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `myConsumerGroup` -#[deprecated] +/// - `"myConsumerGroup"` +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "Replaced by `messaging.consumer.group.name` on the consumer spans. No replacement for producer spans." +)] pub const MESSAGING_ROCKETMQ_CLIENT_GROUP: &str = "messaging.rocketmq.client_group"; + /// Model of message consumption. This only applies to consumer spans. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_ROCKETMQ_CONSUMPTION_MODEL: &str = "messaging.rocketmq.consumption_model"; + /// The delay time level for delay message, which determines the message delay time. /// +/// ## Notes +/// /// # Examples /// /// - `3` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_ROCKETMQ_MESSAGE_DELAY_TIME_LEVEL: &str = "messaging.rocketmq.message.delay_time_level"; + /// The timestamp in milliseconds that the delay message is expected to be delivered to consumer. /// +/// ## Notes +/// /// # Examples /// /// - `1665987217045` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_ROCKETMQ_MESSAGE_DELIVERY_TIMESTAMP: &str = "messaging.rocketmq.message.delivery_timestamp"; + /// It is essential for FIFO message. Messages that belong to the same message group are always processed one by one within the same consumer group. /// +/// ## Notes +/// /// # Examples /// -/// - `myMessageGroup` +/// - `"myMessageGroup"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_ROCKETMQ_MESSAGE_GROUP: &str = "messaging.rocketmq.message.group"; + /// Key(s) of message, another way to mark message besides message id. /// +/// ## Notes +/// /// # Examples /// -/// - `keyA` -/// - `keyB` +/// - `[ +/// "keyA", +/// "keyB", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_ROCKETMQ_MESSAGE_KEYS: &str = "messaging.rocketmq.message.keys"; + /// The secondary classifier of message besides topic. /// +/// ## Notes +/// /// # Examples /// -/// - `tagA` +/// - `"tagA"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_ROCKETMQ_MESSAGE_TAG: &str = "messaging.rocketmq.message.tag"; + /// Type of message. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_ROCKETMQ_MESSAGE_TYPE: &str = "messaging.rocketmq.message.type"; + /// Namespace of RocketMQ resources, resources in different namespaces are individual. /// +/// ## Notes +/// /// # Examples /// -/// - `myNamespace` +/// - `"myNamespace"` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_ROCKETMQ_NAMESPACE: &str = "messaging.rocketmq.namespace"; -/// Deprecated, use `messaging.servicebus.destination.subscription_name` instead. + +/// Deprecated, use `messaging.destination.subscription.name` instead. +/// +/// ## Notes /// /// # Examples /// -/// - `subscription-a` -#[deprecated] +/// - `"subscription-a"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.destination.subscription.name`.")] pub const MESSAGING_SERVICEBUS_DESTINATION_SUBSCRIPTION_NAME: &str = "messaging.servicebus.destination.subscription_name"; + /// Describes the [settlement type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_SERVICEBUS_DISPOSITION_STATUS: &str = "messaging.servicebus.disposition_status"; + /// Number of deliveries that have been attempted for this message. /// +/// ## Notes +/// /// # Examples /// /// - `2` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_SERVICEBUS_MESSAGE_DELIVERY_COUNT: &str = "messaging.servicebus.message.delivery_count"; + /// The UTC epoch seconds at which the message has been accepted and stored in the entity. /// +/// ## Notes +/// /// # Examples /// /// - `1701393730` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_SERVICEBUS_MESSAGE_ENQUEUED_TIME: &str = "messaging.servicebus.message.enqueued_time"; + /// The messaging system as identified by the client instrumentation. /// -/// The actual messaging system may differ from the one known by the client. For example, when using Kafka client libraries to communicate with Azure Event Hubs, the `messaging.system` is set to `kafka` based on the instrumentation's best knowledge. +/// ## Notes +/// +/// The actual messaging system may differ from the one known by the client. For example, when using Kafka client libraries to communicate with Azure Event Hubs, the `messaging.system` is set to `kafka` based on the instrumentation's best knowledge +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_SYSTEM: &str = "messaging.system"; + /// Deprecated, use `network.local.address`. /// +/// ## Notes +/// /// # Examples /// -/// - `192.168.0.1` -#[deprecated] +/// - `"192.168.0.1"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.local.address`.")] pub const NET_HOST_IP: &str = "net.host.ip"; + /// Deprecated, use `server.address`. /// +/// ## Notes +/// /// # Examples /// -/// - `example.com` -#[deprecated] +/// - `"example.com"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `server.address`.")] pub const NET_HOST_NAME: &str = "net.host.name"; + /// Deprecated, use `server.port`. /// +/// ## Notes +/// /// # Examples /// /// - `8080` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `server.port`.")] pub const NET_HOST_PORT: &str = "net.host.port"; + /// Deprecated, use `network.peer.address`. /// +/// ## Notes +/// /// # Examples /// -/// - `127.0.0.1` -#[deprecated] +/// - `"127.0.0.1"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.peer.address`.")] pub const NET_PEER_IP: &str = "net.peer.ip"; + /// Deprecated, use `server.address` on client spans and `client.address` on server spans. /// +/// ## Notes +/// /// # Examples /// -/// - `example.com` -#[deprecated] +/// - `"example.com"` +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "Replaced by `server.address` on client spans and `client.address` on server spans." +)] pub const NET_PEER_NAME: &str = "net.peer.name"; + /// Deprecated, use `server.port` on client spans and `client.port` on server spans. /// +/// ## Notes +/// /// # Examples /// /// - `8080` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `server.port` on client spans and `client.port` on server spans.")] pub const NET_PEER_PORT: &str = "net.peer.port"; + /// Deprecated, use `network.protocol.name`. /// +/// ## Notes +/// /// # Examples /// -/// - `amqp` -/// - `http` -/// - `mqtt` -#[deprecated] +/// - `"amqp"` +/// - `"http"` +/// - `"mqtt"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.protocol.name`.")] pub const NET_PROTOCOL_NAME: &str = "net.protocol.name"; + /// Deprecated, use `network.protocol.version`. /// +/// ## Notes +/// /// # Examples /// -/// - `3.1.1` -#[deprecated] +/// - `"3.1.1"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.protocol.version`.")] pub const NET_PROTOCOL_VERSION: &str = "net.protocol.version"; + /// Deprecated, use `network.transport` and `network.type`. -#[deprecated] +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Split to `network.transport` and `network.type`.")] pub const NET_SOCK_FAMILY: &str = "net.sock.family"; + /// Deprecated, use `network.local.address`. /// +/// ## Notes +/// /// # Examples /// -/// - `/var/my.sock` -#[deprecated] +/// - `"/var/my.sock"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.local.address`.")] pub const NET_SOCK_HOST_ADDR: &str = "net.sock.host.addr"; + /// Deprecated, use `network.local.port`. /// +/// ## Notes +/// /// # Examples /// /// - `8080` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.local.port`.")] pub const NET_SOCK_HOST_PORT: &str = "net.sock.host.port"; + /// Deprecated, use `network.peer.address`. /// +/// ## Notes +/// /// # Examples /// -/// - `192.168.0.1` -#[deprecated] +/// - `"192.168.0.1"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.peer.address`.")] pub const NET_SOCK_PEER_ADDR: &str = "net.sock.peer.addr"; + /// Deprecated, no replacement at this time. /// +/// ## Notes +/// /// # Examples /// -/// - `/var/my.sock` -#[deprecated] +/// - `"/var/my.sock"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Removed.")] pub const NET_SOCK_PEER_NAME: &str = "net.sock.peer.name"; + /// Deprecated, use `network.peer.port`. /// +/// ## Notes +/// /// # Examples /// /// - `65531` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.peer.port`.")] pub const NET_SOCK_PEER_PORT: &str = "net.sock.peer.port"; + /// Deprecated, use `network.transport`. -#[deprecated] +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `network.transport`.")] pub const NET_TRANSPORT: &str = "net.transport"; + /// The ISO 3166-1 alpha-2 2-character country code associated with the mobile carrier network. /// +/// ## Notes +/// /// # Examples /// -/// - `DE` +/// - `"DE"` +#[cfg(feature = "semconv_experimental")] pub const NETWORK_CARRIER_ICC: &str = "network.carrier.icc"; + /// The mobile carrier country code. /// +/// ## Notes +/// /// # Examples /// -/// - `310` +/// - `"310"` +#[cfg(feature = "semconv_experimental")] pub const NETWORK_CARRIER_MCC: &str = "network.carrier.mcc"; + /// The mobile carrier network code. /// +/// ## Notes +/// /// # Examples /// -/// - `001` +/// - `"001"` +#[cfg(feature = "semconv_experimental")] pub const NETWORK_CARRIER_MNC: &str = "network.carrier.mnc"; + /// The name of the mobile carrier. /// +/// ## Notes +/// /// # Examples /// -/// - `sprint` +/// - `"sprint"` +#[cfg(feature = "semconv_experimental")] pub const NETWORK_CARRIER_NAME: &str = "network.carrier.name"; + /// This describes more details regarding the connection.type. It may be the type of cell technology connection, but it could be used for describing details about a wifi connection. /// +/// ## Notes +/// /// # Examples /// -/// - `LTE` +/// - `"LTE"` +#[cfg(feature = "semconv_experimental")] pub const NETWORK_CONNECTION_SUBTYPE: &str = "network.connection.subtype"; + /// The internet connection type. /// +/// ## Notes +/// /// # Examples /// -/// - `wifi` +/// - `"wifi"` +#[cfg(feature = "semconv_experimental")] pub const NETWORK_CONNECTION_TYPE: &str = "network.connection.type"; + +/// The network interface name. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"lo"` +/// - `"eth0"` +pub const NETWORK_INTERFACE_NAME: &str = "network.interface.name"; + /// The network IO operation direction. /// +/// ## Notes +/// /// # Examples /// -/// - `transmit` +/// - `"transmit"` +#[cfg(feature = "semconv_experimental")] pub const NETWORK_IO_DIRECTION: &str = "network.io.direction"; + /// Local address of the network connection - IP address or Unix domain socket name. /// +/// ## Notes +/// /// # Examples /// -/// - `10.1.2.80` -/// - `/tmp/my.sock` +/// - `"10.1.2.80"` +/// - `"/tmp/my.sock"` pub const NETWORK_LOCAL_ADDRESS: &str = "network.local.address"; + /// Local port number of the network connection. /// +/// ## Notes +/// /// # Examples /// /// - `65123` pub const NETWORK_LOCAL_PORT: &str = "network.local.port"; -/// Peer address of the database node where the operation was performed. + +/// Peer address of the network connection - IP address or Unix domain socket name. /// -/// Semantic conventions for individual database systems SHOULD document whether `network.peer.*` attributes are applicable. Network peer address and port are useful when the application interacts with individual database nodes directly. -/// If a database operation involved multiple network calls (for example retries), the address of the last contacted node SHOULD be used. +/// ## Notes /// /// # Examples /// -/// - `10.1.2.80` -/// - `/tmp/my.sock` +/// - `"10.1.2.80"` +/// - `"/tmp/my.sock"` pub const NETWORK_PEER_ADDRESS: &str = "network.peer.address"; + /// Peer port number of the network connection. /// +/// ## Notes +/// /// # Examples /// /// - `65123` pub const NETWORK_PEER_PORT: &str = "network.peer.port"; -/// [OSI application layer](https://osi-model.com/application-layer/) or non-OSI equivalent. + +/// [OSI application layer](https://wikipedia.org/wiki/Application_layer) or non-OSI equivalent. +/// +/// ## Notes /// /// The value SHOULD be normalized to lowercase. /// /// # Examples /// -/// - `http` -/// - `spdy` +/// - `"amqp"` +/// - `"http"` +/// - `"mqtt"` pub const NETWORK_PROTOCOL_NAME: &str = "network.protocol.name"; + /// The actual version of the protocol used for network communication. /// +/// ## Notes +/// /// If protocol version is subject to negotiation (for example using [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute SHOULD be set to the negotiated version. If the actual protocol version is not known, this attribute SHOULD NOT be set. /// /// # Examples /// -/// - `1.0` -/// - `1.1` -/// - `2` -/// - `3` +/// - `"1.1"` +/// - `"2"` pub const NETWORK_PROTOCOL_VERSION: &str = "network.protocol.version"; -/// [OSI transport layer](https://osi-model.com/transport-layer/) or [inter-process communication method](https://wikipedia.org/wiki/Inter-process_communication). + +/// [OSI transport layer](https://wikipedia.org/wiki/Transport_layer) or [inter-process communication method](https://wikipedia.org/wiki/Inter-process_communication). +/// +/// ## Notes /// /// The value SHOULD be normalized to lowercase. /// @@ -2657,334 +5027,726 @@ pub const NETWORK_PROTOCOL_VERSION: &str = "network.protocol.version"; /// /// # Examples /// -/// - `tcp` -/// - `unix` +/// - `"tcp"` +/// - `"udp"` pub const NETWORK_TRANSPORT: &str = "network.transport"; -/// [OSI network layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + +/// [OSI network layer](https://wikipedia.org/wiki/Network_layer) or non-OSI equivalent. +/// +/// ## Notes /// /// The value SHOULD be normalized to lowercase. /// /// # Examples /// -/// - `ipv4` -/// - `ipv6` +/// - `"ipv4"` +/// - `"ipv6"` pub const NETWORK_TYPE: &str = "network.type"; + +/// The state of event loop time. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +pub const NODEJS_EVENTLOOP_STATE: &str = "nodejs.eventloop.state"; + /// The digest of the OCI image manifest. For container images specifically is the digest by which the container image is known. /// +/// ## Notes +/// /// Follows [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), and specifically the [Digest property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). /// An example can be found in [Example Image Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). /// /// # Examples /// -/// - `sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4` +/// - `"sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4"` +#[cfg(feature = "semconv_experimental")] pub const OCI_MANIFEST_DIGEST: &str = "oci.manifest.digest"; -/// Parent-child Reference type. + +/// Parent-child Reference type /// -/// The causal relationship between a child Span and a parent Span. +/// ## Notes +/// +/// The causal relationship between a child Span and a parent Span +#[cfg(feature = "semconv_experimental")] pub const OPENTRACING_REF_TYPE: &str = "opentracing.ref_type"; + /// Unique identifier for a particular build or compilation of the operating system. /// +/// ## Notes +/// /// # Examples /// -/// - `TQ3C.230805.001.B2` -/// - `20E247` -/// - `22621` +/// - `"TQ3C.230805.001.B2"` +/// - `"20E247"` +/// - `"22621"` +#[cfg(feature = "semconv_experimental")] pub const OS_BUILD_ID: &str = "os.build_id"; + /// Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands. /// +/// ## Notes +/// /// # Examples /// -/// - `Microsoft Windows [Version 10.0.18363.778]` -/// - `Ubuntu 18.04.1 LTS` +/// - `"Microsoft Windows [Version 10.0.18363.778]"` +/// - `"Ubuntu 18.04.1 LTS"` +#[cfg(feature = "semconv_experimental")] pub const OS_DESCRIPTION: &str = "os.description"; + /// Human readable operating system name. /// +/// ## Notes +/// /// # Examples /// -/// - `iOS` -/// - `Android` -/// - `Ubuntu` +/// - `"iOS"` +/// - `"Android"` +/// - `"Ubuntu"` +#[cfg(feature = "semconv_experimental")] pub const OS_NAME: &str = "os.name"; + /// The operating system type. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const OS_TYPE: &str = "os.type"; + /// The version string of the operating system as defined in [Version Attributes](/docs/resource/README.md#version-attributes). /// +/// ## Notes +/// /// # Examples /// -/// - `14.2.1` -/// - `18.04.1` +/// - `"14.2.1"` +/// - `"18.04.1"` +#[cfg(feature = "semconv_experimental")] pub const OS_VERSION: &str = "os.version"; -/// . + +/// Deprecated. Use the `otel.scope.name` attribute +/// +/// ## Notes /// /// # Examples /// -/// - `io.opentelemetry.contrib.mongodb` -#[deprecated] +/// - `"io.opentelemetry.contrib.mongodb"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Use the `otel.scope.name` attribute.")] pub const OTEL_LIBRARY_NAME: &str = "otel.library.name"; -/// . + +/// Deprecated. Use the `otel.scope.version` attribute. +/// +/// ## Notes /// /// # Examples /// -/// - `1.0.0` -#[deprecated] +/// - `"1.0.0"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Use the `otel.scope.version` attribute.")] pub const OTEL_LIBRARY_VERSION: &str = "otel.library.version"; + /// The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). /// +/// ## Notes +/// /// # Examples /// -/// - `io.opentelemetry.contrib.mongodb` +/// - `"io.opentelemetry.contrib.mongodb"` pub const OTEL_SCOPE_NAME: &str = "otel.scope.name"; + /// The version of the instrumentation scope - (`InstrumentationScope.Version` in OTLP). /// +/// ## Notes +/// /// # Examples /// -/// - `1.0.0` +/// - `"1.0.0"` pub const OTEL_SCOPE_VERSION: &str = "otel.scope.version"; -/// Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is UNSET. + +/// Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is UNSET. +/// +/// ## Notes pub const OTEL_STATUS_CODE: &str = "otel.status_code"; + /// Description of the Status if it has a value, otherwise not set. /// +/// ## Notes +/// /// # Examples /// -/// - `resource not found` +/// - `"resource not found"` pub const OTEL_STATUS_DESCRIPTION: &str = "otel.status_description"; + +/// Deprecated, use `db.client.connection.state` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"idle"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.state`.")] +pub const STATE: &str = "state"; + /// The [`service.name`](/docs/resource/README.md#service) of the remote service. SHOULD be equal to the actual `service.name` resource attribute of the remote service if any. /// +/// ## Notes +/// /// # Examples /// -/// - `AuthTokenCache` +/// - `"AuthTokenCache"` +#[cfg(feature = "semconv_experimental")] pub const PEER_SERVICE: &str = "peer.service"; + /// Deprecated, use `db.client.connection.pool.name` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `myDataSource` -#[deprecated] +/// - `"myDataSource"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.pool.name`.")] pub const POOL_NAME: &str = "pool.name"; + +/// Length of the process.command_args array +/// +/// ## Notes +/// +/// This field can be useful for querying or performing bucket analysis on how many arguments were provided to start a process. More arguments may be an indication of suspicious activity. +/// +/// # Examples +/// +/// - `4` +#[cfg(feature = "semconv_experimental")] +pub const PROCESS_ARGS_COUNT: &str = "process.args_count"; + /// The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW`. /// +/// ## Notes +/// /// # Examples /// -/// - `cmd/otelcol` +/// - `"cmd/otelcol"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_COMMAND: &str = "process.command"; + /// All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main`. /// +/// ## Notes +/// /// # Examples /// -/// - `cmd/otecol` -/// - `--config=config.yaml` +/// - `[ +/// "cmd/otecol", +/// "--config=config.yaml", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_COMMAND_ARGS: &str = "process.command_args"; + /// The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `C:\cmd\otecol --config="my directory\config.yaml"` +/// - `"C:\\cmd\\otecol --config=\"my directory\\config.yaml\""` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_COMMAND_LINE: &str = "process.command_line"; + /// Specifies whether the context switches for this data point were voluntary or involuntary. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const PROCESS_CONTEXT_SWITCH_TYPE: &str = "process.context_switch_type"; + /// Deprecated, use `cpu.mode` instead. -#[deprecated] +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `cpu.mode`")] pub const PROCESS_CPU_STATE: &str = "process.cpu.state"; + /// The date and time the process was created, in ISO 8601 format. /// +/// ## Notes +/// /// # Examples /// -/// - `2023-11-21T09:25:34.853Z` +/// - `"2023-11-21T09:25:34.853Z"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_CREATION_TIME: &str = "process.creation.time"; + +/// The GNU build ID as found in the `.note.gnu.build-id` ELF section (hex string). +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"c89b11207f6479603b0d49bf291c092c2b719293"` +#[cfg(feature = "semconv_experimental")] +pub const PROCESS_EXECUTABLE_BUILD_ID_GNU: &str = "process.executable.build_id.gnu"; + +/// The Go build ID as retrieved by `go tool buildid `. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY"` +#[cfg(feature = "semconv_experimental")] +pub const PROCESS_EXECUTABLE_BUILD_ID_GO: &str = "process.executable.build_id.go"; + +/// Profiling specific build ID for executables. See the OTel specification for Profiles for more information. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"600DCAFE4A110000F2BF38C493F5FB92"` +#[cfg(feature = "semconv_experimental")] +pub const PROCESS_EXECUTABLE_BUILD_ID_HTLHASH: &str = "process.executable.build_id.htlhash"; + +/// "Deprecated, use `process.executable.build_id.htlhash` instead." +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"600DCAFE4A110000F2BF38C493F5FB92"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `process.executable.build_id.htlhash`")] +pub const PROCESS_EXECUTABLE_BUILD_ID_PROFILING: &str = "process.executable.build_id.profiling"; + /// The name of the process executable. On Linux based systems, can be set to the `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of `GetProcessImageFileNameW`. /// +/// ## Notes +/// /// # Examples /// -/// - `otelcol` +/// - `"otelcol"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_EXECUTABLE_NAME: &str = "process.executable.name"; + /// The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW`. /// +/// ## Notes +/// /// # Examples /// -/// - `/usr/bin/cmd/otelcol` +/// - `"/usr/bin/cmd/otelcol"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_EXECUTABLE_PATH: &str = "process.executable.path"; + /// The exit code of the process. /// +/// ## Notes +/// /// # Examples /// /// - `127` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_EXIT_CODE: &str = "process.exit.code"; + /// The date and time the process exited, in ISO 8601 format. /// +/// ## Notes +/// /// # Examples /// -/// - `2023-11-21T09:26:12.315Z` +/// - `"2023-11-21T09:26:12.315Z"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_EXIT_TIME: &str = "process.exit.time"; -/// The PID of the process's group leader. This is also the process group ID (PGID) of the process. + +/// The PID of the process's group leader. This is also the process group ID (PGID) of the process. +/// +/// ## Notes /// /// # Examples /// /// - `23` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_GROUP_LEADER_PID: &str = "process.group_leader.pid"; + /// Whether the process is connected to an interactive shell. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const PROCESS_INTERACTIVE: &str = "process.interactive"; + +/// The control group associated with the process. +/// +/// ## Notes +/// +/// Control groups (cgroups) are a kernel feature used to organize and manage process resources. This attribute provides the path(s) to the cgroup(s) associated with the process, which should match the contents of the [/proc/\[PID\]/cgroup](https://man7.org/linux/man-pages/man7/cgroups.7.html) file. +/// +/// # Examples +/// +/// - `"1:name=systemd:/user.slice/user-1000.slice/session-3.scope"` +/// - `"0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope"` +#[cfg(feature = "semconv_experimental")] +pub const PROCESS_LINUX_CGROUP: &str = "process.linux.cgroup"; + /// The username of the user that owns the process. /// +/// ## Notes +/// /// # Examples /// -/// - `root` +/// - `"root"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_OWNER: &str = "process.owner"; + /// The type of page fault for this data point. Type `major` is for major/hard page faults, and `minor` is for minor/soft page faults. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const PROCESS_PAGING_FAULT_TYPE: &str = "process.paging.fault_type"; + /// Parent Process identifier (PPID). /// +/// ## Notes +/// /// # Examples /// /// - `111` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_PARENT_PID: &str = "process.parent_pid"; + /// Process identifier (PID). /// +/// ## Notes +/// /// # Examples /// /// - `1234` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_PID: &str = "process.pid"; + /// The real user ID (RUID) of the process. /// +/// ## Notes +/// /// # Examples /// /// - `1000` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_REAL_USER_ID: &str = "process.real_user.id"; + /// The username of the real user of the process. /// +/// ## Notes +/// /// # Examples /// -/// - `operator` +/// - `"operator"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_REAL_USER_NAME: &str = "process.real_user.name"; + /// An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment. /// +/// ## Notes +/// /// # Examples /// -/// - `Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0` +/// - `"Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_RUNTIME_DESCRIPTION: &str = "process.runtime.description"; + /// The name of the runtime of this process. /// +/// ## Notes +/// /// # Examples /// -/// - `OpenJDK Runtime Environment` +/// - `"OpenJDK Runtime Environment"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_RUNTIME_NAME: &str = "process.runtime.name"; + /// The version of the runtime of this process, as returned by the runtime without modification. /// +/// ## Notes +/// /// # Examples /// -/// - `14.0.2` +/// - `"14.0.2"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_RUNTIME_VERSION: &str = "process.runtime.version"; + /// The saved user ID (SUID) of the process. /// +/// ## Notes +/// /// # Examples /// /// - `1002` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_SAVED_USER_ID: &str = "process.saved_user.id"; + /// The username of the saved user. /// +/// ## Notes +/// /// # Examples /// -/// - `operator` +/// - `"operator"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_SAVED_USER_NAME: &str = "process.saved_user.name"; -/// The PID of the process's session leader. This is also the session ID (SID) of the process. + +/// The PID of the process's session leader. This is also the session ID (SID) of the process. +/// +/// ## Notes /// /// # Examples /// /// - `14` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_SESSION_LEADER_PID: &str = "process.session_leader.pid"; + +/// Process title (proctitle) +/// +/// ## Notes +/// +/// In many Unix-like systems, process title (proctitle), is the string that represents the name or command line of a running process, displayed by system monitoring tools like ps, top, and htop. +/// +/// # Examples +/// +/// - `"cat /etc/hostname"` +/// - `"xfce4-session"` +/// - `"bash"` +#[cfg(feature = "semconv_experimental")] +pub const PROCESS_TITLE: &str = "process.title"; + /// The effective user ID (EUID) of the process. /// +/// ## Notes +/// /// # Examples /// /// - `1001` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_USER_ID: &str = "process.user.id"; + /// The username of the effective user of the process. /// +/// ## Notes +/// /// # Examples /// -/// - `root` +/// - `"root"` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_USER_NAME: &str = "process.user.name"; + /// Virtual process identifier. /// +/// ## Notes +/// /// The process ID within a PID namespace. This is not necessarily unique across all processes on the host but it is unique within the process namespace that the process exists within. /// /// # Examples /// /// - `12` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_VPID: &str = "process.vpid"; + +/// The working directory of the process. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"/root"` +#[cfg(feature = "semconv_experimental")] +pub const PROCESS_WORKING_DIRECTORY: &str = "process.working_directory"; + +/// Describes the interpreter or compiler of a single frame. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"cpython"` +#[cfg(feature = "semconv_experimental")] +pub const PROFILE_FRAME_TYPE: &str = "profile.frame.type"; + /// The [error codes](https://connect.build/docs/protocol/#error-codes) of the Connect request. Error codes are always string values. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const RPC_CONNECT_RPC_ERROR_CODE: &str = "rpc.connect_rpc.error_code"; + +/// Connect request metadata, `` being the normalized Connect Metadata key (lowercase), the value being the metadata values. +/// +/// ## Notes +/// +/// Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all request metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. +/// +/// # Examples +/// +/// - `"rpc.request.metadata.my-custom-metadata-attribute=[\"1.2.3.4\", \"1.2.3.5\"]"` +#[cfg(feature = "semconv_experimental")] +pub const RPC_CONNECT_RPC_REQUEST_METADATA: &str = "rpc.connect_rpc.request.metadata"; + +/// Connect response metadata, `` being the normalized Connect Metadata key (lowercase), the value being the metadata values. +/// +/// ## Notes +/// +/// Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all response metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. +/// +/// # Examples +/// +/// - `"rpc.response.metadata.my-custom-metadata-attribute=[\"attribute_value\"]"` +#[cfg(feature = "semconv_experimental")] +pub const RPC_CONNECT_RPC_RESPONSE_METADATA: &str = "rpc.connect_rpc.response.metadata"; + +/// gRPC request metadata, `` being the normalized gRPC Metadata key (lowercase), the value being the metadata values. +/// +/// ## Notes +/// +/// Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all request metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. +/// +/// # Examples +/// +/// - `"rpc.grpc.request.metadata.my-custom-metadata-attribute=[\"1.2.3.4\", \"1.2.3.5\"]"` +#[cfg(feature = "semconv_experimental")] +pub const RPC_GRPC_REQUEST_METADATA: &str = "rpc.grpc.request.metadata"; + +/// gRPC response metadata, `` being the normalized gRPC Metadata key (lowercase), the value being the metadata values. +/// +/// ## Notes +/// +/// Instrumentations SHOULD require an explicit configuration of which metadata values are to be captured. Including all response metadata values can be a security risk - explicit configuration helps avoid leaking sensitive information. +/// +/// # Examples +/// +/// - `"rpc.grpc.response.metadata.my-custom-metadata-attribute=[\"attribute_value\"]"` +#[cfg(feature = "semconv_experimental")] +pub const RPC_GRPC_RESPONSE_METADATA: &str = "rpc.grpc.response.metadata"; + /// The [numeric status code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC request. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const RPC_GRPC_STATUS_CODE: &str = "rpc.grpc.status_code"; + /// `error.code` property of response if it is an error response. /// +/// ## Notes +/// /// # Examples /// /// - `-32700` /// - `100` +#[cfg(feature = "semconv_experimental")] pub const RPC_JSONRPC_ERROR_CODE: &str = "rpc.jsonrpc.error_code"; + /// `error.message` property of response if it is an error response. /// +/// ## Notes +/// /// # Examples /// -/// - `Parse error` -/// - `User already exists` +/// - `"Parse error"` +/// - `"User already exists"` +#[cfg(feature = "semconv_experimental")] pub const RPC_JSONRPC_ERROR_MESSAGE: &str = "rpc.jsonrpc.error_message"; + /// `id` property of request or response. Since protocol allows id to be int, string, `null` or missing (for notifications), value is expected to be cast to string for simplicity. Use empty string in case of `null` value. Omit entirely if this is a notification. /// +/// ## Notes +/// /// # Examples /// -/// - `10` -/// - `request-7` -/// - `` +/// - `"10"` +/// - `"request-7"` +/// - `""` +#[cfg(feature = "semconv_experimental")] pub const RPC_JSONRPC_REQUEST_ID: &str = "rpc.jsonrpc.request_id"; -/// Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't specify this, the value can be omitted. + +/// Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't specify this, the value can be omitted. +/// +/// ## Notes /// /// # Examples /// -/// - `2.0` -/// - `1.0` +/// - `"2.0"` +/// - `"1.0"` +#[cfg(feature = "semconv_experimental")] pub const RPC_JSONRPC_VERSION: &str = "rpc.jsonrpc.version"; + /// Compressed size of the message in bytes. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const RPC_MESSAGE_COMPRESSED_SIZE: &str = "rpc.message.compressed_size"; + /// MUST be calculated as two different counters starting from `1` one for sent messages and one for received message. /// -/// This way we guarantee that the values will be consistent between different implementations. +/// ## Notes +/// +/// This way we guarantee that the values will be consistent between different implementations +#[cfg(feature = "semconv_experimental")] pub const RPC_MESSAGE_ID: &str = "rpc.message.id"; + /// Whether this is a received or sent message. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const RPC_MESSAGE_TYPE: &str = "rpc.message.type"; + /// Uncompressed size of the message in bytes. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const RPC_MESSAGE_UNCOMPRESSED_SIZE: &str = "rpc.message.uncompressed_size"; + /// The name of the (logical) method being called, must be equal to the $method part in the span name. /// +/// ## Notes +/// /// This is the logical name of the method from the RPC interface perspective, which can be different from the name of any implementing method/function. The `code.function` attribute may be used to store the latter (e.g., method actually executing the call on the server side, RPC client stub method on the client side). /// /// # Examples /// -/// - `exampleMethod` +/// - `"exampleMethod"` +#[cfg(feature = "semconv_experimental")] pub const RPC_METHOD: &str = "rpc.method"; + /// The full (logical) name of the service being called, including its package name, if applicable. /// +/// ## Notes +/// /// This is the logical name of the service from the RPC interface perspective, which can be different from the name of any implementing class. The `code.namespace` attribute may be used to store the latter (despite the attribute name, it may include a class name; e.g., class with method actually executing the call on the server side, RPC client stub class on the client side). /// /// # Examples /// -/// - `myservice.EchoService` +/// - `"myservice.EchoService"` +#[cfg(feature = "semconv_experimental")] pub const RPC_SERVICE: &str = "rpc.service"; + /// A string identifying the remoting system. See below for a list of well-known identifiers. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const RPC_SYSTEM: &str = "rpc.system"; -/// Name of the database host. + +/// Server domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +/// +/// ## Notes /// -/// When observed from the client side, and when communicating through an intermediary, `server.address` SHOULD represent the server address behind any intermediaries, for example proxies, if it's available. +/// When observed from the client side, and when communicating through an intermediary, `server.address` SHOULD represent the server address behind any intermediaries, for example proxies, if it's available. /// /// # Examples /// -/// - `example.com` -/// - `10.1.2.80` -/// - `/tmp/my.sock` +/// - `"example.com"` +/// - `"10.1.2.80"` +/// - `"/tmp/my.sock"` pub const SERVER_ADDRESS: &str = "server.address"; + /// Server port number. /// -/// When observed from the client side, and when communicating through an intermediary, `server.port` SHOULD represent the server port behind any intermediaries, for example proxies, if it's available. +/// ## Notes +/// +/// When observed from the client side, and when communicating through an intermediary, `server.port` SHOULD represent the server port behind any intermediaries, for example proxies, if it's available. /// /// # Examples /// @@ -2992,8 +5754,11 @@ pub const SERVER_ADDRESS: &str = "server.address"; /// - `8080` /// - `443` pub const SERVER_PORT: &str = "server.port"; + /// The string ID of the service instance. /// +/// ## Notes +/// /// MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words /// `service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to /// distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled @@ -3006,15 +5771,15 @@ pub const SERVER_PORT: &str = "server.port"; /// /// UUIDs are typically recommended, as only an opaque value for the purposes of identifying a service instance is /// needed. Similar to what can be seen in the man page for the -/// [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) file, the underlying -/// data, such as pod name and namespace should be treated as confidential, being the user's choice to expose it +/// [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/latest/machine-id.html) file, the underlying +/// data, such as pod name and namespace should be treated as confidential, being the user's choice to expose it /// or not via another resource attribute. /// /// For applications running behind an application server (like unicorn), we do not recommend using one identifier -/// for all processes participating in the application. Instead, it's recommended each division (e.g. a worker +/// for all processes participating in the application. Instead, it's recommended each division (e.g. a worker /// thread in unicorn) to have its own instance.id. /// -/// It's not recommended for a Collector to set `service.instance.id` if it can't unambiguously determine the +/// It's not recommended for a Collector to set `service.instance.id` if it can't unambiguously determine the /// service instance that is generating that telemetry. For instance, creating an UUID based on `pod.name` will /// likely be wrong, as the Collector might not know from which container within that pod the telemetry originated. /// However, Collectors can set the `service.instance.id` if they can unambiguously determine the service instance @@ -3023,681 +5788,1245 @@ pub const SERVER_PORT: &str = "server.port"; /// /// # Examples /// -/// - `627cc493-f310-47de-96bd-71410b7dec09` +/// - `"627cc493-f310-47de-96bd-71410b7dec09"` +#[cfg(feature = "semconv_experimental")] pub const SERVICE_INSTANCE_ID: &str = "service.instance.id"; + /// Logical name of the service. /// +/// ## Notes +/// /// MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service`. /// /// # Examples /// -/// - `shoppingcart` +/// - `"shoppingcart"` pub const SERVICE_NAME: &str = "service.name"; + /// A namespace for `service.name`. /// +/// ## Notes +/// /// A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace. /// /// # Examples /// -/// - `Shop` +/// - `"Shop"` +#[cfg(feature = "semconv_experimental")] pub const SERVICE_NAMESPACE: &str = "service.namespace"; + /// The version string of the service API or implementation. The format is not defined by these conventions. /// +/// ## Notes +/// /// # Examples /// -/// - `2.0.0` -/// - `a01dbef8a` +/// - `"2.0.0"` +/// - `"a01dbef8a"` pub const SERVICE_VERSION: &str = "service.version"; + /// A unique id to identify a session. /// +/// ## Notes +/// /// # Examples /// -/// - `00112233-4455-6677-8899-aabbccddeeff` +/// - `"00112233-4455-6677-8899-aabbccddeeff"` +#[cfg(feature = "semconv_experimental")] pub const SESSION_ID: &str = "session.id"; + /// The previous `session.id` for this user, when known. /// +/// ## Notes +/// /// # Examples /// -/// - `00112233-4455-6677-8899-aabbccddeeff` +/// - `"00112233-4455-6677-8899-aabbccddeeff"` +#[cfg(feature = "semconv_experimental")] pub const SESSION_PREVIOUS_ID: &str = "session.previous_id"; + /// SignalR HTTP connection closure status. /// +/// ## Notes +/// /// # Examples /// -/// - `app_shutdown` -/// - `timeout` +/// - `"app_shutdown"` +/// - `"timeout"` pub const SIGNALR_CONNECTION_STATUS: &str = "signalr.connection.status"; -/// [SignalR transport type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md). + +/// [SignalR transport type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) +/// +/// ## Notes /// /// # Examples /// -/// - `web_sockets` -/// - `long_polling` +/// - `"web_sockets"` +/// - `"long_polling"` pub const SIGNALR_TRANSPORT: &str = "signalr.transport"; + /// Source address - domain name if available without reverse DNS lookup; otherwise, IP address or Unix domain socket name. /// -/// When observed from the destination side, and when communicating through an intermediary, `source.address` SHOULD represent the source address behind any intermediaries, for example proxies, if it's available. +/// ## Notes +/// +/// When observed from the destination side, and when communicating through an intermediary, `source.address` SHOULD represent the source address behind any intermediaries, for example proxies, if it's available. /// /// # Examples /// -/// - `source.example.com` -/// - `10.1.2.80` -/// - `/tmp/my.sock` +/// - `"source.example.com"` +/// - `"10.1.2.80"` +/// - `"/tmp/my.sock"` +#[cfg(feature = "semconv_experimental")] pub const SOURCE_ADDRESS: &str = "source.address"; -/// Source port number. + +/// Source port number +/// +/// ## Notes /// /// # Examples /// /// - `3389` /// - `2888` +#[cfg(feature = "semconv_experimental")] pub const SOURCE_PORT: &str = "source.port"; -/// Deprecated, use `db.client.connection.state` instead. -/// -/// # Examples + +/// The logical CPU number \[0..n-1\] /// -/// - `idle` -#[deprecated] -pub const STATE: &str = "state"; -/// The logical CPU number [0..n-1]. +/// ## Notes /// /// # Examples /// /// - `1` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_CPU_LOGICAL_NUMBER: &str = "system.cpu.logical_number"; + /// Deprecated, use `cpu.mode` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `idle` -/// - `interrupt` -#[deprecated] +/// - `"idle"` +/// - `"interrupt"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `cpu.mode`")] pub const SYSTEM_CPU_STATE: &str = "system.cpu.state"; -/// The device identifier. + +/// The device identifier +/// +/// ## Notes /// /// # Examples /// -/// - `(identifier)` +/// - `"(identifier)"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_DEVICE: &str = "system.device"; -/// The filesystem mode. + +/// The filesystem mode +/// +/// ## Notes /// /// # Examples /// -/// - `rw, ro` +/// - `"rw, ro"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_FILESYSTEM_MODE: &str = "system.filesystem.mode"; -/// The filesystem mount path. + +/// The filesystem mount path +/// +/// ## Notes /// /// # Examples /// -/// - `/mnt/data` +/// - `"/mnt/data"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_FILESYSTEM_MOUNTPOINT: &str = "system.filesystem.mountpoint"; -/// The filesystem state. + +/// The filesystem state +/// +/// ## Notes /// /// # Examples /// -/// - `used` +/// - `"used"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_FILESYSTEM_STATE: &str = "system.filesystem.state"; -/// The filesystem type. + +/// The filesystem type +/// +/// ## Notes /// /// # Examples /// -/// - `ext4` +/// - `"ext4"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_FILESYSTEM_TYPE: &str = "system.filesystem.type"; -/// The memory state. + +/// The memory state +/// +/// ## Notes /// /// # Examples /// -/// - `free` -/// - `cached` +/// - `"free"` +/// - `"cached"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_MEMORY_STATE: &str = "system.memory.state"; -/// A stateless protocol MUST NOT set this attribute. + +/// A stateless protocol MUST NOT set this attribute +/// +/// ## Notes /// /// # Examples /// -/// - `close_wait` +/// - `"close_wait"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_NETWORK_STATE: &str = "system.network.state"; -/// The paging access direction. + +/// The paging access direction +/// +/// ## Notes /// /// # Examples /// -/// - `in` +/// - `"in"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PAGING_DIRECTION: &str = "system.paging.direction"; -/// The memory paging state. + +/// The memory paging state +/// +/// ## Notes /// /// # Examples /// -/// - `free` +/// - `"free"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PAGING_STATE: &str = "system.paging.state"; -/// The memory paging type. + +/// The memory paging type +/// +/// ## Notes /// /// # Examples /// -/// - `minor` +/// - `"minor"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PAGING_TYPE: &str = "system.paging.type"; -/// The process state, e.g., [Linux Process State Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES). + +/// The process state, e.g., [Linux Process State Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) +/// +/// ## Notes /// /// # Examples /// -/// - `running` +/// - `"running"` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PROCESS_STATUS: &str = "system.process.status"; + /// Deprecated, use `system.process.status` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `running` -#[deprecated] +/// - `"running"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `system.process.status`.")] pub const SYSTEM_PROCESSES_STATUS: &str = "system.processes.status"; + /// The name of the auto instrumentation agent or distribution, if used. /// +/// ## Notes +/// /// Official auto instrumentation agents and distributions SHOULD set the `telemetry.distro.name` attribute to /// a string starting with `opentelemetry-`, e.g. `opentelemetry-java-instrumentation`. /// /// # Examples /// -/// - `parts-unlimited-java` +/// - `"parts-unlimited-java"` +#[cfg(feature = "semconv_experimental")] pub const TELEMETRY_DISTRO_NAME: &str = "telemetry.distro.name"; + /// The version string of the auto instrumentation agent or distribution, if used. /// +/// ## Notes +/// /// # Examples /// -/// - `1.2.3` +/// - `"1.2.3"` +#[cfg(feature = "semconv_experimental")] pub const TELEMETRY_DISTRO_VERSION: &str = "telemetry.distro.version"; + /// The language of the telemetry SDK. +/// +/// ## Notes pub const TELEMETRY_SDK_LANGUAGE: &str = "telemetry.sdk.language"; + /// The name of the telemetry SDK as defined above. /// +/// ## Notes +/// /// The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to `opentelemetry`. /// If another SDK, like a fork or a vendor-provided implementation, is used, this SDK MUST set the -/// `telemetry.sdk.name` attribute to the fully-qualified class or module name of this SDK's main entry point +/// `telemetry.sdk.name` attribute to the fully-qualified class or module name of this SDK's main entry point /// or another suitable identifier depending on the language. /// The identifier `opentelemetry` is reserved and MUST NOT be used in this case. /// All custom identifiers SHOULD be stable across different versions of an implementation. /// /// # Examples /// -/// - `opentelemetry` +/// - `"opentelemetry"` pub const TELEMETRY_SDK_NAME: &str = "telemetry.sdk.name"; + /// The version string of the telemetry SDK. /// +/// ## Notes +/// /// # Examples /// -/// - `1.2.3` +/// - `"1.2.3"` pub const TELEMETRY_SDK_VERSION: &str = "telemetry.sdk.version"; -/// The fully qualified human readable name of the [test case](https://en.wikipedia.org/wiki/Test_case). + +/// The fully qualified human readable name of the [test case](https://wikipedia.org/wiki/Test_case). +/// +/// ## Notes /// /// # Examples /// -/// - `org.example.TestCase1.test1` -/// - `example/tests/TestCase1.test1` -/// - `ExampleTestCase1_test1` +/// - `"org.example.TestCase1.test1"` +/// - `"example/tests/TestCase1.test1"` +/// - `"ExampleTestCase1_test1"` +#[cfg(feature = "semconv_experimental")] pub const TEST_CASE_NAME: &str = "test.case.name"; + /// The status of the actual test case result from test execution. /// +/// ## Notes +/// /// # Examples /// -/// - `pass` -/// - `fail` +/// - `"pass"` +/// - `"fail"` +#[cfg(feature = "semconv_experimental")] pub const TEST_CASE_RESULT_STATUS: &str = "test.case.result.status"; -/// The human readable name of a [test suite](https://en.wikipedia.org/wiki/Test_suite). + +/// The human readable name of a [test suite](https://wikipedia.org/wiki/Test_suite). +/// +/// ## Notes /// /// # Examples /// -/// - `TestSuite1` +/// - `"TestSuite1"` +#[cfg(feature = "semconv_experimental")] pub const TEST_SUITE_NAME: &str = "test.suite.name"; + /// The status of the test suite run. /// +/// ## Notes +/// /// # Examples /// -/// - `success` -/// - `failure` -/// - `skipped` -/// - `aborted` -/// - `timed_out` -/// - `in_progress` +/// - `"success"` +/// - `"failure"` +/// - `"skipped"` +/// - `"aborted"` +/// - `"timed_out"` +/// - `"in_progress"` +#[cfg(feature = "semconv_experimental")] pub const TEST_SUITE_RUN_STATUS: &str = "test.suite.run.status"; -/// Current "managed" thread ID (as opposed to OS thread ID). + +/// Current "managed" thread ID (as opposed to OS thread ID). +/// +/// ## Notes /// /// # Examples /// /// - `42` +#[cfg(feature = "semconv_experimental")] pub const THREAD_ID: &str = "thread.id"; + /// Current thread name. /// +/// ## Notes +/// /// # Examples /// -/// - `main` +/// - `"main"` +#[cfg(feature = "semconv_experimental")] pub const THREAD_NAME: &str = "thread.name"; + /// String indicating the [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used during the current connection. /// +/// ## Notes +/// /// The values allowed for `tls.cipher` MUST be one of the `Descriptions` of the [registered TLS Cipher Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). /// /// # Examples /// -/// - `TLS_RSA_WITH_3DES_EDE_CBC_SHA` -/// - `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256` +/// - `"TLS_RSA_WITH_3DES_EDE_CBC_SHA"` +/// - `"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CIPHER: &str = "tls.cipher"; + /// PEM-encoded stand-alone certificate offered by the client. This is usually mutually-exclusive of `client.certificate_chain` since this value also exists in that list. /// +/// ## Notes +/// /// # Examples /// -/// - `MII...` +/// - `"MII..."` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_CERTIFICATE: &str = "tls.client.certificate"; + /// Array of PEM-encoded certificates that make up the certificate chain offered by the client. This is usually mutually-exclusive of `client.certificate` since that value should be the first certificate in the chain. /// +/// ## Notes +/// /// # Examples /// -/// - `MII...` -/// - `MI...` +/// - `[ +/// "MII...", +/// "MI...", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_CERTIFICATE_CHAIN: &str = "tls.client.certificate_chain"; + /// Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. /// +/// ## Notes +/// /// # Examples /// -/// - `0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC` +/// - `"0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_HASH_MD5: &str = "tls.client.hash.md5"; + /// Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. /// +/// ## Notes +/// /// # Examples /// -/// - `9E393D93138888D288266C2D915214D1D1CCEB2A` +/// - `"9E393D93138888D288266C2D915214D1D1CCEB2A"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_HASH_SHA1: &str = "tls.client.hash.sha1"; + /// Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the client. For consistency with other hash values, this value should be formatted as an uppercase hash. /// +/// ## Notes +/// /// # Examples /// -/// - `0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0` +/// - `"0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_HASH_SHA256: &str = "tls.client.hash.sha256"; + /// Distinguished name of [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of the issuer of the x.509 certificate presented by the client. /// +/// ## Notes +/// /// # Examples /// -/// - `CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com` +/// - `"CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_ISSUER: &str = "tls.client.issuer"; + /// A hash that identifies clients based on how they perform an SSL/TLS handshake. /// +/// ## Notes +/// /// # Examples /// -/// - `d4e5b18d6b55c71272893221c96ba240` +/// - `"d4e5b18d6b55c71272893221c96ba240"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_JA3: &str = "tls.client.ja3"; + /// Date/Time indicating when client certificate is no longer considered valid. /// +/// ## Notes +/// /// # Examples /// -/// - `2021-01-01T00:00:00.000Z` +/// - `"2021-01-01T00:00:00.000Z"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_NOT_AFTER: &str = "tls.client.not_after"; + /// Date/Time indicating when client certificate is first considered valid. /// +/// ## Notes +/// /// # Examples /// -/// - `1970-01-01T00:00:00.000Z` +/// - `"1970-01-01T00:00:00.000Z"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_NOT_BEFORE: &str = "tls.client.not_before"; + /// Deprecated, use `server.address` instead. /// +/// ## Notes +/// /// # Examples /// -/// - `opentelemetry.io` -#[deprecated] +/// - `"opentelemetry.io"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `server.address`.")] pub const TLS_CLIENT_SERVER_NAME: &str = "tls.client.server_name"; + /// Distinguished name of subject of the x.509 certificate presented by the client. /// +/// ## Notes +/// /// # Examples /// -/// - `CN=myclient, OU=Documentation Team, DC=example, DC=com` +/// - `"CN=myclient, OU=Documentation Team, DC=example, DC=com"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_SUBJECT: &str = "tls.client.subject"; + /// Array of ciphers offered by the client during the client hello. /// +/// ## Notes +/// /// # Examples /// -/// - `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` -/// - `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` -/// - `...` +/// - `[ +/// "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", +/// "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const TLS_CLIENT_SUPPORTED_CIPHERS: &str = "tls.client.supported_ciphers"; -/// String indicating the curve used for the given cipher, when applicable. + +/// String indicating the curve used for the given cipher, when applicable +/// +/// ## Notes /// /// # Examples /// -/// - `secp256r1` +/// - `"secp256r1"` +#[cfg(feature = "semconv_experimental")] pub const TLS_CURVE: &str = "tls.curve"; + /// Boolean flag indicating if the TLS negotiation was successful and transitioned to an encrypted tunnel. /// +/// ## Notes +/// /// # Examples /// -/// - `True` +/// - `true` +#[cfg(feature = "semconv_experimental")] pub const TLS_ESTABLISHED: &str = "tls.established"; + /// String indicating the protocol being tunneled. Per the values in the [IANA registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), this string should be lower case. /// +/// ## Notes +/// /// # Examples /// -/// - `http/1.1` +/// - `"http/1.1"` +#[cfg(feature = "semconv_experimental")] pub const TLS_NEXT_PROTOCOL: &str = "tls.next_protocol"; -/// Normalized lowercase protocol name parsed from original string of the negotiated [SSL/TLS protocol version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES). + +/// Normalized lowercase protocol name parsed from original string of the negotiated [SSL/TLS protocol version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const TLS_PROTOCOL_NAME: &str = "tls.protocol.name"; -/// Numeric part of the version parsed from the original string of the negotiated [SSL/TLS protocol version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES). + +/// Numeric part of the version parsed from the original string of the negotiated [SSL/TLS protocol version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +/// +/// ## Notes /// /// # Examples /// -/// - `1.2` -/// - `3` +/// - `"1.2"` +/// - `"3"` +#[cfg(feature = "semconv_experimental")] pub const TLS_PROTOCOL_VERSION: &str = "tls.protocol.version"; + /// Boolean flag indicating if this TLS connection was resumed from an existing TLS negotiation. /// +/// ## Notes +/// /// # Examples /// -/// - `True` +/// - `true` +#[cfg(feature = "semconv_experimental")] pub const TLS_RESUMED: &str = "tls.resumed"; + /// PEM-encoded stand-alone certificate offered by the server. This is usually mutually-exclusive of `server.certificate_chain` since this value also exists in that list. /// +/// ## Notes +/// /// # Examples /// -/// - `MII...` +/// - `"MII..."` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_CERTIFICATE: &str = "tls.server.certificate"; + /// Array of PEM-encoded certificates that make up the certificate chain offered by the server. This is usually mutually-exclusive of `server.certificate` since that value should be the first certificate in the chain. /// +/// ## Notes +/// /// # Examples /// -/// - `MII...` -/// - `MI...` +/// - `[ +/// "MII...", +/// "MI...", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_CERTIFICATE_CHAIN: &str = "tls.server.certificate_chain"; + /// Certificate fingerprint using the MD5 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. /// +/// ## Notes +/// /// # Examples /// -/// - `0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC` +/// - `"0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_HASH_MD5: &str = "tls.server.hash.md5"; + /// Certificate fingerprint using the SHA1 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. /// +/// ## Notes +/// /// # Examples /// -/// - `9E393D93138888D288266C2D915214D1D1CCEB2A` +/// - `"9E393D93138888D288266C2D915214D1D1CCEB2A"` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_HASH_SHA1: &str = "tls.server.hash.sha1"; + /// Certificate fingerprint using the SHA256 digest of DER-encoded version of certificate offered by the server. For consistency with other hash values, this value should be formatted as an uppercase hash. /// +/// ## Notes +/// /// # Examples /// -/// - `0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0` +/// - `"0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_HASH_SHA256: &str = "tls.server.hash.sha256"; + /// Distinguished name of [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of the issuer of the x.509 certificate presented by the client. /// +/// ## Notes +/// /// # Examples /// -/// - `CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com` +/// - `"CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_ISSUER: &str = "tls.server.issuer"; + /// A hash that identifies servers based on how they perform an SSL/TLS handshake. /// +/// ## Notes +/// /// # Examples /// -/// - `d4e5b18d6b55c71272893221c96ba240` +/// - `"d4e5b18d6b55c71272893221c96ba240"` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_JA3S: &str = "tls.server.ja3s"; + /// Date/Time indicating when server certificate is no longer considered valid. /// +/// ## Notes +/// /// # Examples /// -/// - `2021-01-01T00:00:00.000Z` +/// - `"2021-01-01T00:00:00.000Z"` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_NOT_AFTER: &str = "tls.server.not_after"; + /// Date/Time indicating when server certificate is first considered valid. /// +/// ## Notes +/// /// # Examples /// -/// - `1970-01-01T00:00:00.000Z` +/// - `"1970-01-01T00:00:00.000Z"` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_NOT_BEFORE: &str = "tls.server.not_before"; + /// Distinguished name of subject of the x.509 certificate presented by the server. /// +/// ## Notes +/// /// # Examples /// -/// - `CN=myserver, OU=Documentation Team, DC=example, DC=com` +/// - `"CN=myserver, OU=Documentation Team, DC=example, DC=com"` +#[cfg(feature = "semconv_experimental")] pub const TLS_SERVER_SUBJECT: &str = "tls.server.subject"; -/// Domain extracted from the `url.full`, such as "opentelemetry.io". + +/// Domain extracted from the `url.full`, such as "opentelemetry.io". +/// +/// ## Notes /// /// In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the domain field. If the URL contains a [literal IPv6 address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by `[` and `]`, the `[` and `]` characters should also be captured in the domain field. /// /// # Examples /// -/// - `www.foo.bar` -/// - `opentelemetry.io` -/// - `3.12.167.2` -/// - `[1080:0:0:0:8:800:200C:417A]` +/// - `"www.foo.bar"` +/// - `"opentelemetry.io"` +/// - `"3.12.167.2"` +/// - `"[1080:0:0:0:8:800:200C:417A]"` +#[cfg(feature = "semconv_experimental")] pub const URL_DOMAIN: &str = "url.domain"; + /// The file extension extracted from the `url.full`, excluding the leading dot. /// +/// ## Notes +/// /// The file extension is only set if it exists, as not every url has a file extension. When the file name has multiple extensions `example.tar.gz`, only the last one should be captured `gz`, not `tar.gz`. /// /// # Examples /// -/// - `png` -/// - `gz` +/// - `"png"` +/// - `"gz"` +#[cfg(feature = "semconv_experimental")] pub const URL_EXTENSION: &str = "url.extension"; -/// The [URI fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component. + +/// The [URI fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +/// +/// ## Notes /// /// # Examples /// -/// - `SemConv` +/// - `"SemConv"` pub const URL_FRAGMENT: &str = "url.fragment"; -/// Absolute URL describing a network resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986). + +/// Absolute URL describing a network resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +/// +/// ## Notes +/// +/// For network calls, URL usually has `scheme://host[:port][path][?query][#fragment]` format, where the fragment +/// is not transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. +/// +/// `url.full` MUST NOT contain credentials passed via URL in form of `https://username:password@www.example.com/`. +/// In such case username and password SHOULD be redacted and attribute's value SHOULD be `https://REDACTED:REDACTED@www.example.com/`. /// -/// For network calls, URL usually has `scheme://host[:port][path][?query][#fragment]` format, where the fragment is not transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. -/// `url.full` MUST NOT contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case username and password SHOULD be redacted and attribute's value SHOULD be `https://REDACTED:REDACTED@www.example.com/`. -/// `url.full` SHOULD capture the absolute URL when it is available (or can be reconstructed). Sensitive content provided in `url.full` SHOULD be scrubbed when instrumentations can identify it. +/// `url.full` SHOULD capture the absolute URL when it is available (or can be reconstructed). +/// +/// Sensitive content provided in `url.full` SHOULD be scrubbed when instrumentations can identify it. +/// +/// +/// Query string values for the following keys SHOULD be redacted by default and replaced by the +/// value `REDACTED`: +/// +/// - [`AWSAccessKeyId`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) +/// - [`Signature`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) +/// - [`sig`](https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token) +/// - [`X-Goog-Signature`](https://cloud.google.com/storage/docs/access-control/signed-urls) +/// +/// This list is subject to change over time. +/// +/// When a query string value is redacted, the query string key SHOULD still be preserved, e.g. +/// `https://www.example.com/path?color=blue&sig=REDACTED`. /// /// # Examples /// -/// - `https://www.foo.bar/search?q=OpenTelemetry#SemConv` -/// - `//localhost` +/// - `"https://www.foo.bar/search?q=OpenTelemetry#SemConv"` +/// - `"//localhost"` pub const URL_FULL: &str = "url.full"; + /// Unmodified original URL as seen in the event source. /// +/// ## Notes +/// /// In network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. This field is meant to represent the URL as it was observed, complete or not. -/// `url.original` might contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case password and username SHOULD NOT be redacted and attribute's value SHOULD remain the same. +/// `url.original` might contain credentials passed via URL in form of `https://username:password@www.example.com/`. In such case password and username SHOULD NOT be redacted and attribute's value SHOULD remain the same. /// /// # Examples /// -/// - `https://www.foo.bar/search?q=OpenTelemetry#SemConv` -/// - `search?q=OpenTelemetry` +/// - `"https://www.foo.bar/search?q=OpenTelemetry#SemConv"` +/// - `"search?q=OpenTelemetry"` +#[cfg(feature = "semconv_experimental")] pub const URL_ORIGINAL: &str = "url.original"; -/// The [URI path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component. + +/// The [URI path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +/// +/// ## Notes /// /// Sensitive content provided in `url.path` SHOULD be scrubbed when instrumentations can identify it. /// /// # Examples /// -/// - `/search` +/// - `"/search"` pub const URL_PATH: &str = "url.path"; -/// Port extracted from the `url.full`. + +/// Port extracted from the `url.full` +/// +/// ## Notes /// /// # Examples /// /// - `443` +#[cfg(feature = "semconv_experimental")] pub const URL_PORT: &str = "url.port"; -/// The [URI query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component. + +/// The [URI query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +/// +/// ## Notes /// /// Sensitive content provided in `url.query` SHOULD be scrubbed when instrumentations can identify it. /// +/// +/// Query string values for the following keys SHOULD be redacted by default and replaced by the value `REDACTED`: +/// +/// - [`AWSAccessKeyId`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) +/// - [`Signature`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth) +/// - [`sig`](https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token) +/// - [`X-Goog-Signature`](https://cloud.google.com/storage/docs/access-control/signed-urls) +/// +/// This list is subject to change over time. +/// +/// When a query string value is redacted, the query string key SHOULD still be preserved, e.g. +/// `q=OpenTelemetry&sig=REDACTED`. +/// /// # Examples /// -/// - `q=OpenTelemetry` +/// - `"q=OpenTelemetry"` pub const URL_QUERY: &str = "url.query"; + /// The highest registered url domain, stripped of the subdomain. /// +/// ## Notes +/// /// This value can be determined precisely with the [public suffix list](http://publicsuffix.org). For example, the registered domain for `foo.example.com` is `example.com`. Trying to approximate this by simply taking the last two labels will not work well for TLDs such as `co.uk`. /// /// # Examples /// -/// - `example.com` -/// - `foo.co.uk` +/// - `"example.com"` +/// - `"foo.co.uk"` +#[cfg(feature = "semconv_experimental")] pub const URL_REGISTERED_DOMAIN: &str = "url.registered_domain"; + /// The [URI scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component identifying the used protocol. /// +/// ## Notes +/// /// # Examples /// -/// - `http` -/// - `https` +/// - `"https"` +/// - `"ftp"` +/// - `"telnet"` pub const URL_SCHEME: &str = "url.scheme"; + /// The subdomain portion of a fully qualified domain name includes all of the names except the host name under the registered_domain. In a partially qualified domain, or if the qualification level of the full name cannot be determined, subdomain contains all of the names below the registered domain. /// +/// ## Notes +/// /// The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, with no trailing period. /// /// # Examples /// -/// - `east` -/// - `sub2.sub1` +/// - `"east"` +/// - `"sub2.sub1"` +#[cfg(feature = "semconv_experimental")] pub const URL_SUBDOMAIN: &str = "url.subdomain"; + /// The low-cardinality template of an [absolute path reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). /// -/// The `url.template` MUST have low cardinality. It is not usually available on HTTP clients, but may be known by the application or specialized HTTP instrumentation. +/// ## Notes /// /// # Examples /// -/// - `/users/{id}` -/// - `/users/:id` -/// - `/users?id={id}` +/// - `"/users/{id}"` +/// - `"/users/:id"` +/// - `"/users?id={id}"` +#[cfg(feature = "semconv_experimental")] pub const URL_TEMPLATE: &str = "url.template"; + /// The effective top level domain (eTLD), also known as the domain suffix, is the last part of the domain name. For example, the top level domain for example.com is `com`. /// +/// ## Notes +/// /// This value can be determined precisely with the [public suffix list](http://publicsuffix.org). /// /// # Examples /// -/// - `com` -/// - `co.uk` +/// - `"com"` +/// - `"co.uk"` +#[cfg(feature = "semconv_experimental")] pub const URL_TOP_LEVEL_DOMAIN: &str = "url.top_level_domain"; -/// Name of the user-agent extracted from original. Usually refers to the browser's name. -/// -/// [Example](https://www.whatsmyua.info) of extracting browser's name from original string. In the case of using a user-agent for non-browser products, such as microservices with multiple names/versions inside the `user_agent.original`, the most significant name SHOULD be selected. In such a scenario it should align with `user_agent.version` -/// -/// # Examples -/// -/// - `Safari` -/// - `YourApp` -pub const USER_AGENT_NAME: &str = "user_agent.name"; -/// Value of the [HTTP User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. -/// -/// # Examples -/// -/// - `CERN-LineMode/2.15 libwww/2.17b3` -/// - `Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Mobile/15E148 Safari/604.1` -/// - `YourApp/1.0.0 grpc-java-okhttp/1.27.2` -pub const USER_AGENT_ORIGINAL: &str = "user_agent.original"; -/// Version of the user-agent extracted from original. Usually refers to the browser's version. -/// -/// [Example](https://www.whatsmyua.info) of extracting browser's version from original string. In the case of using a user-agent for non-browser products, such as microservices with multiple names/versions inside the `user_agent.original`, the most significant version SHOULD be selected. In such a scenario it should align with `user_agent.name` -/// -/// # Examples -/// -/// - `14.1.2` -/// - `1.0.0` -pub const USER_AGENT_VERSION: &str = "user_agent.version"; + /// User email address. /// +/// ## Notes +/// /// # Examples /// -/// - `a.einstein@example.com` +/// - `"a.einstein@example.com"` +#[cfg(feature = "semconv_experimental")] pub const USER_EMAIL: &str = "user.email"; -/// User's full name. + +/// User's full name +/// +/// ## Notes /// /// # Examples /// -/// - `Albert Einstein` +/// - `"Albert Einstein"` +#[cfg(feature = "semconv_experimental")] pub const USER_FULL_NAME: &str = "user.full_name"; + /// Unique user hash to correlate information for a user in anonymized form. /// +/// ## Notes +/// /// Useful if `user.id` or `user.name` contain confidential information and cannot be used. /// /// # Examples /// -/// - `364fc68eaf4c8acec74a4e52d7d1feaa` +/// - `"364fc68eaf4c8acec74a4e52d7d1feaa"` +#[cfg(feature = "semconv_experimental")] pub const USER_HASH: &str = "user.hash"; + /// Unique identifier of the user. /// +/// ## Notes +/// /// # Examples /// -/// - `S-1-5-21-202424912787-2692429404-2351956786-1000` +/// - `"S-1-5-21-202424912787-2692429404-2351956786-1000"` +#[cfg(feature = "semconv_experimental")] pub const USER_ID: &str = "user.id"; + /// Short name or login/username of the user. /// +/// ## Notes +/// /// # Examples /// -/// - `a.einstein` +/// - `"a.einstein"` +#[cfg(feature = "semconv_experimental")] pub const USER_NAME: &str = "user.name"; + /// Array of user roles at the time of the event. /// +/// ## Notes +/// /// # Examples /// -/// - `admin` -/// - `reporting_user` +/// - `[ +/// "admin", +/// "reporting_user", +/// ]` +#[cfg(feature = "semconv_experimental")] pub const USER_ROLES: &str = "user.roles"; + +/// Name of the user-agent extracted from original. Usually refers to the browser's name. +/// +/// ## Notes +/// +/// [Example](https://www.whatsmyua.info) of extracting browser's name from original string. In the case of using a user-agent for non-browser products, such as microservices with multiple names/versions inside the `user_agent.original`, the most significant name SHOULD be selected. In such a scenario it should align with `user_agent.version` +/// +/// # Examples +/// +/// - `"Safari"` +/// - `"YourApp"` +#[cfg(feature = "semconv_experimental")] +pub const USER_AGENT_NAME: &str = "user_agent.name"; + +/// Value of the [HTTP User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"CERN-LineMode/2.15 libwww/2.17b3"` +/// - `"Mozilla/5.0 (iPhone; CPU iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Mobile/15E148 Safari/604.1"` +/// - `"YourApp/1.0.0 grpc-java-okhttp/1.27.2"` +pub const USER_AGENT_ORIGINAL: &str = "user_agent.original"; + +/// Specifies the category of synthetic traffic, such as tests or bots. +/// +/// ## Notes +/// +/// This attribute MAY be derived from the contents of the `user_agent.original` attribute. Components that populate the attribute are responsible for determining what they consider to be synthetic bot or test traffic. This attribute can either be set for self-identification purposes, or on telemetry detected to be generated as a result of a synthetic request. This attribute is useful for distinguishing between genuine client traffic and synthetic traffic generated by bots or tests +#[cfg(feature = "semconv_experimental")] +pub const USER_AGENT_SYNTHETIC_TYPE: &str = "user_agent.synthetic.type"; + +/// Version of the user-agent extracted from original. Usually refers to the browser's version +/// +/// ## Notes +/// +/// [Example](https://www.whatsmyua.info) of extracting browser's version from original string. In the case of using a user-agent for non-browser products, such as microservices with multiple names/versions inside the `user_agent.original`, the most significant version SHOULD be selected. In such a scenario it should align with `user_agent.name` +/// +/// # Examples +/// +/// - `"14.1.2"` +/// - `"1.0.0"` +#[cfg(feature = "semconv_experimental")] +pub const USER_AGENT_VERSION: &str = "user_agent.version"; + /// The type of garbage collection. +/// +/// ## Notes +#[cfg(feature = "semconv_experimental")] pub const V8JS_GC_TYPE: &str = "v8js.gc.type"; + /// The name of the space type of heap memory. /// +/// ## Notes +/// /// Value can be retrieved from value `space_name` of [`v8.getHeapSpaceStatistics()`](https://nodejs.org/api/v8.html#v8getheapspacestatistics) +#[cfg(feature = "semconv_experimental")] pub const V8JS_HEAP_SPACE_NAME: &str = "v8js.heap.space.name"; -/// The ID of the change (pull request/merge request) if applicable. This is usually a unique (within repository) identifier generated by the VCS system. + +/// The ID of the change (pull request/merge request/changelist) if applicable. This is usually a unique (within repository) identifier generated by the VCS system. +/// +/// ## Notes /// /// # Examples /// -/// - `123` -pub const VCS_REPOSITORY_CHANGE_ID: &str = "vcs.repository.change.id"; -/// The human readable title of the change (pull request/merge request). This title is often a brief summary of the change and may get merged in to a ref as the commit summary. +/// - `"123"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_CHANGE_ID: &str = "vcs.change.id"; + +/// The state of the change (pull request/merge request/changelist). +/// +/// ## Notes /// /// # Examples /// -/// - `Fixes broken thing` -/// - `feat: add my new feature` -/// - `[chore] update dependency` -pub const VCS_REPOSITORY_CHANGE_TITLE: &str = "vcs.repository.change.title"; +/// - `"open"` +/// - `"closed"` +/// - `"merged"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_CHANGE_STATE: &str = "vcs.change.state"; + +/// The human readable title of the change (pull request/merge request/changelist). This title is often a brief summary of the change and may get merged in to a ref as the commit summary. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Fixes broken thing"` +/// - `"feat: add my new feature"` +/// - `"[chore] update dependency"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_CHANGE_TITLE: &str = "vcs.change.title"; + +/// The type of line change being measured on a branch or change. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"added"` +/// - `"removed"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_LINE_CHANGE_TYPE: &str = "vcs.line_change.type"; + /// The name of the [reference](https://git-scm.com/docs/gitglossary#def_ref) such as **branch** or **tag** in the repository. /// +/// ## Notes +/// /// # Examples /// -/// - `my-feature-branch` -/// - `tag-1-test` -pub const VCS_REPOSITORY_REF_NAME: &str = "vcs.repository.ref.name"; +/// - `"my-feature-branch"` +/// - `"tag-1-test"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_BASE_NAME: &str = "vcs.ref.base.name"; + /// The revision, literally [revised version](https://www.merriam-webster.com/dictionary/revision), The revision most often refers to a commit object in Git, or a revision number in SVN. /// +/// ## Notes +/// /// The revision can be a full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), /// of the recorded change to a ref within a repository pointing to a /// commit [commit](https://git-scm.com/docs/git-commit) object. It does /// not necessarily have to be a hash; it can simply define a /// [revision number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) /// which is an integer that is monotonically increasing. In cases where -/// it is identical to the `ref.name`, it SHOULD still be included. It is +/// it is identical to the `ref.base.name`, it SHOULD still be included. It is /// up to the implementer to decide which value to set as the revision /// based on the VCS system and situational context. /// /// # Examples /// -/// - `9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc` -/// - `main` -/// - `123` -/// - `HEAD` -pub const VCS_REPOSITORY_REF_REVISION: &str = "vcs.repository.ref.revision"; +/// - `"9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc"` +/// - `"main"` +/// - `"123"` +/// - `"HEAD"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_BASE_REVISION: &str = "vcs.ref.base.revision"; + +/// The type of the [reference](https://git-scm.com/docs/gitglossary#def_ref) in the repository. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"branch"` +/// - `"tag"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_BASE_TYPE: &str = "vcs.ref.base.type"; + +/// The name of the [reference](https://git-scm.com/docs/gitglossary#def_ref) such as **branch** or **tag** in the repository. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"my-feature-branch"` +/// - `"tag-1-test"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_HEAD_NAME: &str = "vcs.ref.head.name"; + +/// The revision, literally [revised version](https://www.merriam-webster.com/dictionary/revision), The revision most often refers to a commit object in Git, or a revision number in SVN. +/// +/// ## Notes +/// +/// The revision can be a full [hash value (see glossary)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf), +/// of the recorded change to a ref within a repository pointing to a +/// commit [commit](https://git-scm.com/docs/git-commit) object. It does +/// not necessarily have to be a hash; it can simply define a +/// [revision number](https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html) +/// which is an integer that is monotonically increasing. In cases where +/// it is identical to the `ref.head.name`, it SHOULD still be included. It is +/// up to the implementer to decide which value to set as the revision +/// based on the VCS system and situational context. +/// +/// # Examples +/// +/// - `"9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc"` +/// - `"main"` +/// - `"123"` +/// - `"HEAD"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_HEAD_REVISION: &str = "vcs.ref.head.revision"; + +/// The type of the [reference](https://git-scm.com/docs/gitglossary#def_ref) in the repository. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"branch"` +/// - `"tag"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_HEAD_TYPE: &str = "vcs.ref.head.type"; + /// The type of the [reference](https://git-scm.com/docs/gitglossary#def_ref) in the repository. /// +/// ## Notes +/// +/// # Examples +/// +/// - `"branch"` +/// - `"tag"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_TYPE: &str = "vcs.ref.type"; + +/// Deprecated, use `vcs.change.id` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"123"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Deprecated, use `vcs.change.id` instead.")] +pub const VCS_REPOSITORY_CHANGE_ID: &str = "vcs.repository.change.id"; + +/// Deprecated, use `vcs.change.title` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"Fixes broken thing"` +/// - `"feat: add my new feature"` +/// - `"[chore] update dependency"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Deprecated, use `vcs.change.title` instead.")] +pub const VCS_REPOSITORY_CHANGE_TITLE: &str = "vcs.repository.change.title"; + +/// Deprecated, use `vcs.ref.head.name` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"my-feature-branch"` +/// - `"tag-1-test"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Deprecated, use `vcs.ref.head.name` instead.")] +pub const VCS_REPOSITORY_REF_NAME: &str = "vcs.repository.ref.name"; + +/// Deprecated, use `vcs.ref.head.revision` instead. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc"` +/// - `"main"` +/// - `"123"` +/// - `"HEAD"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Deprecated, use `vcs.ref.head.revision` instead.")] +pub const VCS_REPOSITORY_REF_REVISION: &str = "vcs.repository.ref.revision"; + +/// Deprecated, use `vcs.ref.head.type` instead. +/// +/// ## Notes +/// /// # Examples /// -/// - `branch` -/// - `tag` +/// - `"branch"` +/// - `"tag"` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Deprecated, use `vcs.ref.head.type` instead.")] pub const VCS_REPOSITORY_REF_TYPE: &str = "vcs.repository.ref.type"; -/// The [URL](https://en.wikipedia.org/wiki/URL) of the repository providing the complete address in order to locate and identify the repository. + +/// The [URL](https://wikipedia.org/wiki/URL) of the repository providing the complete address in order to locate and identify the repository. +/// +/// ## Notes /// /// # Examples /// -/// - `https://github.com/opentelemetry/open-telemetry-collector-contrib` -/// - `https://gitlab.com/my-org/my-project/my-projects-project/repo` +/// - `"https://github.com/opentelemetry/open-telemetry-collector-contrib"` +/// - `"https://gitlab.com/my-org/my-project/my-projects-project/repo"` +#[cfg(feature = "semconv_experimental")] pub const VCS_REPOSITORY_URL_FULL: &str = "vcs.repository.url.full"; + +/// The type of revision comparison. +/// +/// ## Notes +/// +/// # Examples +/// +/// - `"ahead"` +/// - `"behind"` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REVISION_DELTA_DIRECTION: &str = "vcs.revision_delta.direction"; + /// Additional description of the web engine (e.g. detailed version and edition information). /// +/// ## Notes +/// /// # Examples /// -/// - `WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final` +/// - `"WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final"` +#[cfg(feature = "semconv_experimental")] pub const WEBENGINE_DESCRIPTION: &str = "webengine.description"; + /// The name of the web engine. /// +/// ## Notes +/// /// # Examples /// -/// - `WildFly` +/// - `"WildFly"` +#[cfg(feature = "semconv_experimental")] pub const WEBENGINE_NAME: &str = "webengine.name"; + /// The version of the web engine. /// +/// ## Notes +/// /// # Examples /// -/// - `21.0.0` +/// - `"21.0.0"` +#[cfg(feature = "semconv_experimental")] pub const WEBENGINE_VERSION: &str = "webengine.version"; diff --git a/opentelemetry-semantic-conventions/src/lib.rs b/opentelemetry-semantic-conventions/src/lib.rs index ca2eb7e1cd..bdb0a9277e 100644 --- a/opentelemetry-semantic-conventions/src/lib.rs +++ b/opentelemetry-semantic-conventions/src/lib.rs @@ -22,4 +22,4 @@ pub mod trace; /// The schema URL that matches the version of the semantic conventions that /// this crate defines. -pub const SCHEMA_URL: &str = "https://opentelemetry.io/schemas/1.27.0"; +pub const SCHEMA_URL: &str = "https://opentelemetry.io/schemas/1.29.0"; diff --git a/opentelemetry-semantic-conventions/src/metric.rs b/opentelemetry-semantic-conventions/src/metric.rs index d80e04bb8b..837101e0df 100644 --- a/opentelemetry-semantic-conventions/src/metric.rs +++ b/opentelemetry-semantic-conventions/src/metric.rs @@ -1,7 +1,7 @@ // DO NOT EDIT, this is an auto-generated file // // If you want to update the file: -// - Edit the template at scripts/templates/semantic_metrics.rs.j2 +// - Edit the template at scripts/templates/registry/rust/metric.rs.j2 // - Run the script at scripts/generate-consts-from-spec.sh //! # Metric Semantic Conventions @@ -25,11 +25,15 @@ //! .u64_histogram(semconv::metric::HTTP_SERVER_REQUEST_DURATION) //! .with_unit("By") //! .with_description("Duration of HTTP server requests.") -//! .init(); +//! .build(); //! ``` + /// ## Description +/// /// Number of exceptions caught by exception handling middleware. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Diagnostics`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -42,12 +46,16 @@ /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::ASPNETCORE_DIAGNOSTICS_EXCEPTION_RESULT`] | `Required` +/// | [`crate::attribute::ASPNETCORE_DIAGNOSTICS_HANDLER_TYPE`] | `Conditionally_required`: if and only if the exception was handled by this handler. /// | [`crate::attribute::ERROR_TYPE`] | `Required` -/// | [`crate::attribute::ASPNETCORE_DIAGNOSTICS_HANDLER_TYPE`] | `Conditionally required`: if and only if the exception was handled by this handler. pub const ASPNETCORE_DIAGNOSTICS_EXCEPTIONS: &str = "aspnetcore.diagnostics.exceptions"; + /// ## Description +/// /// Number of requests that are currently active on the server that hold a rate limiting lease. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.RateLimiting`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -59,12 +67,16 @@ pub const ASPNETCORE_DIAGNOSTICS_EXCEPTIONS: &str = "aspnetcore.diagnostics.exce /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally required`: if the matched endpoint for the request had a rate-limiting policy. +/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally_required`: if the matched endpoint for the request had a rate-limiting policy. pub const ASPNETCORE_RATE_LIMITING_ACTIVE_REQUEST_LEASES: &str = "aspnetcore.rate_limiting.active_request_leases"; + /// ## Description +/// /// Number of requests that are currently queued, waiting to acquire a rate limiting lease. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.RateLimiting`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -76,12 +88,16 @@ pub const ASPNETCORE_RATE_LIMITING_ACTIVE_REQUEST_LEASES: &str = /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally required`: if the matched endpoint for the request had a rate-limiting policy. +/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally_required`: if the matched endpoint for the request had a rate-limiting policy. pub const ASPNETCORE_RATE_LIMITING_QUEUED_REQUESTS: &str = "aspnetcore.rate_limiting.queued_requests"; + /// ## Description +/// /// The time the request spent in a queue waiting to acquire a rate limiting lease. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.RateLimiting`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -93,13 +109,17 @@ pub const ASPNETCORE_RATE_LIMITING_QUEUED_REQUESTS: &str = /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally_required`: if the matched endpoint for the request had a rate-limiting policy. /// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_RESULT`] | `Required` -/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally required`: if the matched endpoint for the request had a rate-limiting policy. pub const ASPNETCORE_RATE_LIMITING_REQUEST_TIME_IN_QUEUE: &str = "aspnetcore.rate_limiting.request.time_in_queue"; + /// ## Description +/// /// The duration of rate limiting lease held by requests on the server. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.RateLimiting`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -111,16 +131,20 @@ pub const ASPNETCORE_RATE_LIMITING_REQUEST_TIME_IN_QUEUE: &str = /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally required`: if the matched endpoint for the request had a rate-limiting policy. +/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally_required`: if the matched endpoint for the request had a rate-limiting policy. pub const ASPNETCORE_RATE_LIMITING_REQUEST_LEASE_DURATION: &str = "aspnetcore.rate_limiting.request_lease.duration"; + /// ## Description +/// /// Number of requests that tried to acquire a rate limiting lease. /// +/// ## Notes +/// /// Requests could be: /// -/// * Rejected by global or endpoint rate limiting policies -/// * Canceled while waiting for the lease. +/// - Rejected by global or endpoint rate limiting policies +/// - Canceled while waiting for the lease. /// /// Meter name: `Microsoft.AspNetCore.RateLimiting`; Added in: ASP.NET Core 8.0 /// ## Metadata @@ -133,12 +157,16 @@ pub const ASPNETCORE_RATE_LIMITING_REQUEST_LEASE_DURATION: &str = /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally_required`: if the matched endpoint for the request had a rate-limiting policy. /// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_RESULT`] | `Required` -/// | [`crate::attribute::ASPNETCORE_RATE_LIMITING_POLICY`] | `Conditionally required`: if the matched endpoint for the request had a rate-limiting policy. pub const ASPNETCORE_RATE_LIMITING_REQUESTS: &str = "aspnetcore.rate_limiting.requests"; + /// ## Description +/// /// Number of requests that were attempted to be matched to an endpoint. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Routing`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -150,12 +178,16 @@ pub const ASPNETCORE_RATE_LIMITING_REQUESTS: &str = "aspnetcore.rate_limiting.re /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ASPNETCORE_ROUTING_IS_FALLBACK`] | `Conditionally_required`: if and only if a route was successfully matched. /// | [`crate::attribute::ASPNETCORE_ROUTING_MATCH_STATUS`] | `Required` -/// | [`crate::attribute::ASPNETCORE_ROUTING_IS_FALLBACK`] | `Conditionally required`: if and only if a route was successfully matched. -/// | [`crate::attribute::HTTP_ROUTE`] | `Conditionally required`: if and only if a route was successfully matched. +/// | [`crate::attribute::HTTP_ROUTE`] | `Conditionally_required`: if and only if a route was successfully matched. pub const ASPNETCORE_ROUTING_MATCH_ATTEMPTS: &str = "aspnetcore.routing.match_attempts"; + /// ## Description -/// Total CPU time consumed. +/// +/// Total CPU time consumed +/// +/// ## Notes /// /// Total CPU time consumed by the specific container on all available CPU cores /// ## Metadata @@ -168,12 +200,38 @@ pub const ASPNETCORE_ROUTING_MATCH_ATTEMPTS: &str = "aspnetcore.routing.match_at /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::CPU_MODE`] | `Opt in` +/// | [`crate::attribute::CPU_MODE`] | `Conditionally_required`: Required if mode is available, i.e. metrics coming from the Docker Stats API. +#[cfg(feature = "semconv_experimental")] pub const CONTAINER_CPU_TIME: &str = "container.cpu.time"; + +/// ## Description +/// +/// Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +/// +/// ## Notes +/// +/// CPU usage of the specific container on all available CPU cores, averaged over the sample window +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `{cpu}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CPU_MODE`] | `Conditionally_required`: Required if mode is available, i.e. metrics coming from the Docker Stats API. +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_CPU_USAGE: &str = "container.cpu.usage"; + /// ## Description +/// /// Disk bytes for the container. /// -/// The total number of bytes read/written successfully (aggregated from all disks). +/// ## Notes +/// +/// The total number of bytes read/written successfully (aggregated from all disks) /// ## Metadata /// | | | /// |:-|:- @@ -184,24 +242,34 @@ pub const CONTAINER_CPU_TIME: &str = "container.cpu.time"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const CONTAINER_DISK_IO: &str = "container.disk.io"; + /// ## Description -/// Memory usage of the container. /// /// Memory usage of the container. +/// +/// ## Notes +/// +/// Memory usage of the container /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const CONTAINER_MEMORY_USAGE: &str = "container.memory.usage"; + /// ## Description +/// /// Network bytes for the container. /// -/// The number of bytes sent/received on all network interfaces by the container. +/// ## Notes +/// +/// The number of bytes sent/received on all network interfaces by the container /// ## Metadata /// | | | /// |:-|:- @@ -212,11 +280,31 @@ pub const CONTAINER_MEMORY_USAGE: &str = "container.memory.usage"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const CONTAINER_NETWORK_IO: &str = "container.network.io"; + +/// ## Description +/// +/// The time the container has been running +/// +/// ## Notes +/// +/// Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +/// The actual accuracy would depend on the instrumentation and operating system +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const CONTAINER_UPTIME: &str = "container.uptime"; + /// ## Description -/// The number of connections that are currently in state described by the `state` attribute. +/// +/// The number of connections that are currently in state described by the `state` attribute /// ## Metadata /// | | | /// |:-|:- @@ -229,9 +317,12 @@ pub const CONTAINER_NETWORK_IO: &str = "container.network.io"; /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTION_POOL_NAME`] | `Required` /// | [`crate::attribute::DB_CLIENT_CONNECTION_STATE`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_COUNT: &str = "db.client.connection.count"; + /// ## Description -/// The time it took to create a new connection. +/// +/// The time it took to create a new connection /// ## Metadata /// | | | /// |:-|:- @@ -243,9 +334,12 @@ pub const DB_CLIENT_CONNECTION_COUNT: &str = "db.client.connection.count"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTION_POOL_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_CREATE_TIME: &str = "db.client.connection.create_time"; + /// ## Description -/// The maximum number of idle open connections allowed. +/// +/// The maximum number of idle open connections allowed /// ## Metadata /// | | | /// |:-|:- @@ -257,9 +351,12 @@ pub const DB_CLIENT_CONNECTION_CREATE_TIME: &str = "db.client.connection.create_ /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTION_POOL_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_IDLE_MAX: &str = "db.client.connection.idle.max"; + /// ## Description -/// The minimum number of idle open connections allowed. +/// +/// The minimum number of idle open connections allowed /// ## Metadata /// | | | /// |:-|:- @@ -271,9 +368,12 @@ pub const DB_CLIENT_CONNECTION_IDLE_MAX: &str = "db.client.connection.idle.max"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTION_POOL_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_IDLE_MIN: &str = "db.client.connection.idle.min"; + /// ## Description -/// The maximum number of open connections allowed. +/// +/// The maximum number of open connections allowed /// ## Metadata /// | | | /// |:-|:- @@ -285,9 +385,12 @@ pub const DB_CLIENT_CONNECTION_IDLE_MIN: &str = "db.client.connection.idle.min"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTION_POOL_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_MAX: &str = "db.client.connection.max"; + /// ## Description -/// The number of pending requests for an open connection, cumulative for the entire pool. +/// +/// The number of current pending requests for an open connection /// ## Metadata /// | | | /// |:-|:- @@ -299,9 +402,12 @@ pub const DB_CLIENT_CONNECTION_MAX: &str = "db.client.connection.max"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTION_POOL_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_PENDING_REQUESTS: &str = "db.client.connection.pending_requests"; + /// ## Description -/// The number of connection timeouts that have occurred trying to obtain a connection from the pool. +/// +/// The number of connection timeouts that have occurred trying to obtain a connection from the pool /// ## Metadata /// | | | /// |:-|:- @@ -313,9 +419,12 @@ pub const DB_CLIENT_CONNECTION_PENDING_REQUESTS: &str = "db.client.connection.pe /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTION_POOL_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_TIMEOUTS: &str = "db.client.connection.timeouts"; + /// ## Description -/// The time between borrowing a connection and returning it to the pool. +/// +/// The time between borrowing a connection and returning it to the pool /// ## Metadata /// | | | /// |:-|:- @@ -327,9 +436,12 @@ pub const DB_CLIENT_CONNECTION_TIMEOUTS: &str = "db.client.connection.timeouts"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTION_POOL_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_USE_TIME: &str = "db.client.connection.use_time"; + /// ## Description -/// The time it took to obtain an open connection from the pool. +/// +/// The time it took to obtain an open connection from the pool /// ## Metadata /// | | | /// |:-|:- @@ -341,9 +453,12 @@ pub const DB_CLIENT_CONNECTION_USE_TIME: &str = "db.client.connection.use_time"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTION_POOL_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const DB_CLIENT_CONNECTION_WAIT_TIME: &str = "db.client.connection.wait_time"; + /// ## Description -/// Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`. +/// +/// Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s` /// ## Metadata /// | | | /// |:-|:- @@ -355,10 +470,15 @@ pub const DB_CLIENT_CONNECTION_WAIT_TIME: &str = "db.client.connection.wait_time /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_POOL_NAME`] | `Required` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "Replaced by `db.client.connection.create_time`. Note: the unit also changed from `ms` to `s`." +)] pub const DB_CLIENT_CONNECTIONS_CREATE_TIME: &str = "db.client.connections.create_time"; + /// ## Description -/// Deprecated, use `db.client.connection.idle.max` instead. +/// +/// Deprecated, use `db.client.connection.idle.max` instead /// ## Metadata /// | | | /// |:-|:- @@ -370,10 +490,13 @@ pub const DB_CLIENT_CONNECTIONS_CREATE_TIME: &str = "db.client.connections.creat /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_POOL_NAME`] | `Required` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.idle.max`.")] pub const DB_CLIENT_CONNECTIONS_IDLE_MAX: &str = "db.client.connections.idle.max"; + /// ## Description -/// Deprecated, use `db.client.connection.idle.min` instead. +/// +/// Deprecated, use `db.client.connection.idle.min` instead /// ## Metadata /// | | | /// |:-|:- @@ -385,10 +508,13 @@ pub const DB_CLIENT_CONNECTIONS_IDLE_MAX: &str = "db.client.connections.idle.max /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_POOL_NAME`] | `Required` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.idle.min`.")] pub const DB_CLIENT_CONNECTIONS_IDLE_MIN: &str = "db.client.connections.idle.min"; + /// ## Description -/// Deprecated, use `db.client.connection.max` instead. +/// +/// Deprecated, use `db.client.connection.max` instead /// ## Metadata /// | | | /// |:-|:- @@ -400,10 +526,13 @@ pub const DB_CLIENT_CONNECTIONS_IDLE_MIN: &str = "db.client.connections.idle.min /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_POOL_NAME`] | `Required` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.max`.")] pub const DB_CLIENT_CONNECTIONS_MAX: &str = "db.client.connections.max"; + /// ## Description -/// Deprecated, use `db.client.connection.pending_requests` instead. +/// +/// Deprecated, use `db.client.connection.pending_requests` instead /// ## Metadata /// | | | /// |:-|:- @@ -415,10 +544,13 @@ pub const DB_CLIENT_CONNECTIONS_MAX: &str = "db.client.connections.max"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_POOL_NAME`] | `Required` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.pending_requests`.")] pub const DB_CLIENT_CONNECTIONS_PENDING_REQUESTS: &str = "db.client.connections.pending_requests"; + /// ## Description -/// Deprecated, use `db.client.connection.timeouts` instead. +/// +/// Deprecated, use `db.client.connection.timeouts` instead /// ## Metadata /// | | | /// |:-|:- @@ -430,10 +562,13 @@ pub const DB_CLIENT_CONNECTIONS_PENDING_REQUESTS: &str = "db.client.connections. /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_POOL_NAME`] | `Required` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.timeouts`.")] pub const DB_CLIENT_CONNECTIONS_TIMEOUTS: &str = "db.client.connections.timeouts"; + /// ## Description -/// Deprecated, use `db.client.connection.count` instead. +/// +/// Deprecated, use `db.client.connection.count` instead /// ## Metadata /// | | | /// |:-|:- @@ -446,10 +581,13 @@ pub const DB_CLIENT_CONNECTIONS_TIMEOUTS: &str = "db.client.connections.timeouts /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_POOL_NAME`] | `Required` /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_STATE`] | `Required` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `db.client.connection.count`.")] pub const DB_CLIENT_CONNECTIONS_USAGE: &str = "db.client.connections.usage"; + /// ## Description -/// Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`. +/// +/// Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s` /// ## Metadata /// | | | /// |:-|:- @@ -461,10 +599,15 @@ pub const DB_CLIENT_CONNECTIONS_USAGE: &str = "db.client.connections.usage"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_POOL_NAME`] | `Required` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "Replaced by `db.client.connection.use_time`. Note: the unit also changed from `ms` to `s`." +)] pub const DB_CLIENT_CONNECTIONS_USE_TIME: &str = "db.client.connections.use_time"; + /// ## Description -/// Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`. +/// +/// Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s` /// ## Metadata /// | | | /// |:-|:- @@ -476,254 +619,711 @@ pub const DB_CLIENT_CONNECTIONS_USE_TIME: &str = "db.client.connections.use_time /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::DB_CLIENT_CONNECTIONS_POOL_NAME`] | `Required` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated( + note = "Replaced by `db.client.connection.wait_time`. Note: the unit also changed from `ms` to `s`." +)] pub const DB_CLIENT_CONNECTIONS_WAIT_TIME: &str = "db.client.connections.wait_time"; + /// ## Description -/// Duration of database client operations. /// -/// Batch operations SHOULD be recorded as a single operation. +/// Number of active client instances /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `histogram` | -/// | Unit: | `s` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `{instance}` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::DB_SYSTEM`] | `Required` -/// | [`crate::attribute::DB_COLLECTION_NAME`] | `Conditionally required`: If readily available. The collection name MAY be parsed from the query text, in which case it SHOULD be the first collection name in the query. -/// | [`crate::attribute::DB_NAMESPACE`] | `Conditionally required`: If available. -/// | [`crate::attribute::DB_OPERATION_NAME`] | `Conditionally required`: If readily available. The operation name MAY be parsed from the query text, in which case it SHOULD be the first operation name found in the query. -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the operation failed. -/// | [`crate::attribute::SERVER_PORT`] | `Conditionally required`: If using a port other than the default port for this DBMS and if `server.address` is set. -/// | [`crate::attribute::NETWORK_PEER_ADDRESS`] | `Recommended`: If applicable for this database system. -/// | [`crate::attribute::NETWORK_PEER_PORT`] | `Recommended`: If and only if `network.peer.address` is set. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Unspecified` -pub const DB_CLIENT_OPERATION_DURATION: &str = "db.client.operation.duration"; +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If using a port other than the default port for this DBMS and if `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const DB_CLIENT_COSMOSDB_ACTIVE_INSTANCE_COUNT: &str = + "db.client.cosmosdb.active_instance.count"; + /// ## Description -/// Measures the time taken to perform a DNS lookup. +/// +/// [Request charge](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `histogram` | -/// | Unit: | `s` | +/// | Unit: | `{request_unit}` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::DNS_QUESTION_NAME`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: if and only if an error has occurred. -pub const DNS_LOOKUP_DURATION: &str = "dns.lookup.duration"; +/// | [`crate::attribute::DB_COLLECTION_NAME`] | `Conditionally_required`: If available. +/// | [`crate::attribute::DB_COSMOSDB_CONSISTENCY_LEVEL`] | `Conditionally_required`: If available. +/// | [`crate::attribute::DB_COSMOSDB_REGIONS_CONTACTED`] | `{"recommended": "if available"}` +/// | [`crate::attribute::DB_COSMOSDB_SUB_STATUS_CODE`] | `Conditionally_required`: when response was received and contained sub-code. +/// | [`crate::attribute::DB_NAMESPACE`] | `Conditionally_required`: If available. +/// | [`crate::attribute::DB_OPERATION_NAME`] | `Conditionally_required`: If readily available and if there is a single operation name that describes the database call. The operation name MAY be parsed from the query text, in which case it SHOULD be the single operation name found in the query. +/// | [`crate::attribute::DB_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If the operation failed and status code is available. +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the operation failed. +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If using a port other than the default port for this DBMS and if `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const DB_CLIENT_COSMOSDB_OPERATION_REQUEST_CHARGE: &str = + "db.client.cosmosdb.operation.request_charge"; + /// ## Description -/// Number of invocation cold starts. +/// +/// Duration of database client operations. +/// +/// ## Notes +/// +/// Batch operations SHOULD be recorded as a single operation /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `counter` | -/// | Unit: | `{coldstart}` | +/// | Instrument: | `histogram` | +/// | Unit: | `s` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::FAAS_TRIGGER`] | `Unspecified` -pub const FAAS_COLDSTARTS: &str = "faas.coldstarts"; +/// | [`crate::attribute::DB_COLLECTION_NAME`] | `Conditionally_required`: If readily available and if a database call is performed on a single collection. The collection name MAY be parsed from the query text, in which case it SHOULD be the single collection name in the query. +/// | [`crate::attribute::DB_NAMESPACE`] | `Conditionally_required`: If available. +/// | [`crate::attribute::DB_OPERATION_NAME`] | `Conditionally_required`: If readily available and if there is a single operation name that describes the database call. The operation name MAY be parsed from the query text, in which case it SHOULD be the single operation name found in the query. +/// | [`crate::attribute::DB_QUERY_SUMMARY`] | `{"recommended": "if readily available or if instrumentation supports query summarization."}` +/// | [`crate::attribute::DB_QUERY_TEXT`] | `Opt_in` +/// | [`crate::attribute::DB_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If the operation failed and status code is available. +/// | [`crate::attribute::DB_SYSTEM`] | `Required` +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the operation failed. +/// | [`crate::attribute::NETWORK_PEER_ADDRESS`] | `{"recommended": "if applicable for this database system."}` +/// | [`crate::attribute::NETWORK_PEER_PORT`] | `{"recommended": "if and only if `network.peer.address` is set."}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If using a port other than the default port for this DBMS and if `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const DB_CLIENT_OPERATION_DURATION: &str = "db.client.operation.duration"; + /// ## Description -/// Distribution of CPU usage per invocation. +/// +/// The actual number of records returned by the database operation /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `histogram` | -/// | Unit: | `s` | +/// | Unit: | `{row}` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::FAAS_TRIGGER`] | `Unspecified` -pub const FAAS_CPU_USAGE: &str = "faas.cpu_usage"; +/// | [`crate::attribute::DB_COLLECTION_NAME`] | `Conditionally_required`: If readily available and if a database call is performed on a single collection. The collection name MAY be parsed from the query text, in which case it SHOULD be the single collection name in the query. +/// | [`crate::attribute::DB_NAMESPACE`] | `Conditionally_required`: If available. +/// | [`crate::attribute::DB_OPERATION_NAME`] | `Conditionally_required`: If readily available and if there is a single operation name that describes the database call. The operation name MAY be parsed from the query text, in which case it SHOULD be the single operation name found in the query. +/// | [`crate::attribute::DB_QUERY_SUMMARY`] | `{"recommended": "if readily available or if instrumentation supports query summarization."}` +/// | [`crate::attribute::DB_QUERY_TEXT`] | `Opt_in` +/// | [`crate::attribute::DB_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If the operation failed and status code is available. +/// | [`crate::attribute::DB_SYSTEM`] | `Required` +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the operation failed. +/// | [`crate::attribute::NETWORK_PEER_ADDRESS`] | `{"recommended": "if applicable for this database system."}` +/// | [`crate::attribute::NETWORK_PEER_PORT`] | `{"recommended": "if and only if `network.peer.address` is set."}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If using a port other than the default port for this DBMS and if `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const DB_CLIENT_RESPONSE_RETURNED_ROWS: &str = "db.client.response.returned_rows"; + /// ## Description -/// Number of invocation errors. +/// +/// Measures the time taken to perform a DNS lookup /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `counter` | -/// | Unit: | `{error}` | +/// | Instrument: | `histogram` | +/// | Unit: | `s` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::FAAS_TRIGGER`] | `Unspecified` -pub const FAAS_ERRORS: &str = "faas.errors"; +/// | [`crate::attribute::DNS_QUESTION_NAME`] | `Required` +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: if and only if an error has occurred. +#[cfg(feature = "semconv_experimental")] +pub const DNS_LOOKUP_DURATION: &str = "dns.lookup.duration"; + /// ## Description -/// Measures the duration of the function's initialization, such as a cold start. +/// +/// The number of .NET assemblies that are currently loaded. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`AppDomain.CurrentDomain.GetAssemblies().Length`](https://learn.microsoft.com/dotnet/api/system.appdomain.getassemblies) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `histogram` | -/// | Unit: | `s` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `{assembly}` | /// | Status: | `Experimental` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::FAAS_TRIGGER`] | `Unspecified` -pub const FAAS_INIT_DURATION: &str = "faas.init_duration"; +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_ASSEMBLY_COUNT: &str = "dotnet.assembly.count"; + /// ## Description -/// Number of successful invocations. +/// +/// The number of exceptions that have been thrown in managed code. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as counting calls to [`AppDomain.CurrentDomain.FirstChanceException`](https://learn.microsoft.com/dotnet/api/system.appdomain.firstchanceexception) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `counter` | -/// | Unit: | `{invocation}` | +/// | Unit: | `{exception}` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::FAAS_TRIGGER`] | `Unspecified` -pub const FAAS_INVOCATIONS: &str = "faas.invocations"; +/// | [`crate::attribute::ERROR_TYPE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_EXCEPTIONS: &str = "dotnet.exceptions"; + /// ## Description -/// Measures the duration of the function's logic execution. +/// +/// The number of garbage collections that have occurred since the process has started. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric uses the [`GC.CollectionCount(int generation)`](https://learn.microsoft.com/dotnet/api/system.gc.collectioncount) API to calculate exclusive collections per generation /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `histogram` | -/// | Unit: | `s` | +/// | Instrument: | `counter` | +/// | Unit: | `{collection}` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::FAAS_TRIGGER`] | `Unspecified` -pub const FAAS_INVOKE_DURATION: &str = "faas.invoke_duration"; +/// | [`crate::attribute::DOTNET_GC_HEAP_GENERATION`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_GC_COLLECTIONS: &str = "dotnet.gc.collections"; + /// ## Description -/// Distribution of max memory usage per invocation. +/// +/// The *approximate* number of bytes allocated on the managed GC heap since the process has started. The returned value does not include any native allocations. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`GC.GetTotalAllocatedBytes()`](https://learn.microsoft.com/dotnet/api/system.gc.gettotalallocatedbytes) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `histogram` | +/// | Instrument: | `counter` | /// | Unit: | `By` | /// | Status: | `Experimental` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::FAAS_TRIGGER`] | `Unspecified` -pub const FAAS_MEM_USAGE: &str = "faas.mem_usage"; +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_GC_HEAP_TOTAL_ALLOCATED: &str = "dotnet.gc.heap.total_allocated"; + /// ## Description -/// Distribution of net I/O usage per invocation. +/// +/// The heap fragmentation, as observed during the latest garbage collection. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`GC.GetGCMemoryInfo().GenerationInfo.FragmentationAfterBytes`](https://learn.microsoft.com/dotnet/api/system.gcgenerationinfo.fragmentationafterbytes) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `histogram` | +/// | Instrument: | `updowncounter` | /// | Unit: | `By` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::FAAS_TRIGGER`] | `Unspecified` -pub const FAAS_NET_IO: &str = "faas.net_io"; +/// | [`crate::attribute::DOTNET_GC_HEAP_GENERATION`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_GC_LAST_COLLECTION_HEAP_FRAGMENTATION_SIZE: &str = + "dotnet.gc.last_collection.heap.fragmentation.size"; + /// ## Description -/// Number of invocation timeouts. +/// +/// The managed GC heap size (including fragmentation), as observed during the latest garbage collection. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`GC.GetGCMemoryInfo().GenerationInfo.SizeAfterBytes`](https://learn.microsoft.com/dotnet/api/system.gcgenerationinfo.sizeafterbytes) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `counter` | -/// | Unit: | `{timeout}` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::FAAS_TRIGGER`] | `Unspecified` -pub const FAAS_TIMEOUTS: &str = "faas.timeouts"; +/// | [`crate::attribute::DOTNET_GC_HEAP_GENERATION`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_GC_LAST_COLLECTION_HEAP_SIZE: &str = "dotnet.gc.last_collection.heap.size"; + /// ## Description -/// GenAI operation duration. +/// +/// The amount of committed virtual memory in use by the .NET GC, as observed during the latest garbage collection. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`GC.GetGCMemoryInfo().TotalCommittedBytes`](https://learn.microsoft.com/dotnet/api/system.gcmemoryinfo.totalcommittedbytes). Committed virtual memory may be larger than the heap size because it includes both memory for storing existing objects (the heap size) and some extra memory that is ready to handle newly allocated objects in the future /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `histogram` | -/// | Unit: | `s` | +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | /// | Status: | `Experimental` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::GEN_AI_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Required` -/// | [`crate::attribute::GEN_AI_SYSTEM`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: if the operation ended in an error -/// | [`crate::attribute::SERVER_PORT`] | `Conditionally required`: If `server.address` is set. -/// | [`crate::attribute::GEN_AI_RESPONSE_MODEL`] | `Recommended` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` -pub const GEN_AI_CLIENT_OPERATION_DURATION: &str = "gen_ai.client.operation.duration"; +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_GC_LAST_COLLECTION_MEMORY_COMMITTED_SIZE: &str = + "dotnet.gc.last_collection.memory.committed_size"; + /// ## Description -/// Measures number of input and output tokens used. +/// +/// The total amount of time paused in GC since the process has started. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`GC.GetTotalPauseDuration()`](https://learn.microsoft.com/dotnet/api/system.gc.gettotalpauseduration) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `histogram` | -/// | Unit: | `{token}` | +/// | Instrument: | `counter` | +/// | Unit: | `s` | /// | Status: | `Experimental` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::GEN_AI_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Required` -/// | [`crate::attribute::GEN_AI_SYSTEM`] | `Required` -/// | [`crate::attribute::GEN_AI_TOKEN_TYPE`] | `Required` -/// | [`crate::attribute::SERVER_PORT`] | `Conditionally required`: If `server.address` is set. -/// | [`crate::attribute::GEN_AI_RESPONSE_MODEL`] | `Recommended` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` -pub const GEN_AI_CLIENT_TOKEN_USAGE: &str = "gen_ai.client.token.usage"; +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_GC_PAUSE_TIME: &str = "dotnet.gc.pause.time"; + /// ## Description -/// Generative AI server request duration such as time-to-last byte or last output token. +/// +/// The amount of time the JIT compiler has spent compiling methods since the process has started. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`JitInfo.GetCompilationTime()`](https://learn.microsoft.com/dotnet/api/system.runtime.jitinfo.getcompilationtime) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `histogram` | +/// | Instrument: | `counter` | /// | Unit: | `s` | /// | Status: | `Experimental` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::GEN_AI_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Required` -/// | [`crate::attribute::GEN_AI_SYSTEM`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: if the operation ended in an error -/// | [`crate::attribute::SERVER_PORT`] | `Conditionally required`: If `server.address` is set. -/// | [`crate::attribute::GEN_AI_RESPONSE_MODEL`] | `Recommended` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` -pub const GEN_AI_SERVER_REQUEST_DURATION: &str = "gen_ai.server.request.duration"; +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_JIT_COMPILATION_TIME: &str = "dotnet.jit.compilation.time"; + /// ## Description -/// Time per output token generated after the first token for successful responses. +/// +/// Count of bytes of intermediate language that have been compiled since the process has started. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`JitInfo.GetCompiledILBytes()`](https://learn.microsoft.com/dotnet/api/system.runtime.jitinfo.getcompiledilbytes) /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `histogram` | -/// | Unit: | `s` | +/// | Instrument: | `counter` | +/// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_JIT_COMPILED_IL_SIZE: &str = "dotnet.jit.compiled_il.size"; + +/// ## Description /// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | +/// The number of times the JIT compiler (re)compiled methods since the process has started. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`JitInfo.GetCompiledMethodCount()`](https://learn.microsoft.com/dotnet/api/system.runtime.jitinfo.getcompiledmethodcount) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{method}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_JIT_COMPILED_METHODS: &str = "dotnet.jit.compiled_methods"; + +/// ## Description +/// +/// The number of times there was contention when trying to acquire a monitor lock since the process has started. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`Monitor.LockContentionCount`](https://learn.microsoft.com/dotnet/api/system.threading.monitor.lockcontentioncount) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{contention}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_MONITOR_LOCK_CONTENTIONS: &str = "dotnet.monitor.lock_contentions"; + +/// ## Description +/// +/// The number of processors available to the process. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as accessing [`Environment.ProcessorCount`](https://learn.microsoft.com/dotnet/api/system.environment.processorcount) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{cpu}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_PROCESS_CPU_COUNT: &str = "dotnet.process.cpu.count"; + +/// ## Description +/// +/// CPU time used by the process. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as accessing the corresponding processor time properties on [`System.Diagnostics.Process`](https://learn.microsoft.com/dotnet/api/system.diagnostics.process) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::CPU_MODE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_PROCESS_CPU_TIME: &str = "dotnet.process.cpu.time"; + +/// ## Description +/// +/// The number of bytes of physical memory mapped to the process context. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`Environment.WorkingSet`](https://learn.microsoft.com/dotnet/api/system.environment.workingset) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_PROCESS_MEMORY_WORKING_SET: &str = "dotnet.process.memory.working_set"; + +/// ## Description +/// +/// The number of work items that are currently queued to be processed by the thread pool. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`ThreadPool.PendingWorkItemCount`](https://learn.microsoft.com/dotnet/api/system.threading.threadpool.pendingworkitemcount) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{work_item}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_THREAD_POOL_QUEUE_LENGTH: &str = "dotnet.thread_pool.queue.length"; + +/// ## Description +/// +/// The number of thread pool threads that currently exist. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`ThreadPool.ThreadCount`](https://learn.microsoft.com/dotnet/api/system.threading.threadpool.threadcount) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{thread}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_THREAD_POOL_THREAD_COUNT: &str = "dotnet.thread_pool.thread.count"; + +/// ## Description +/// +/// The number of work items that the thread pool has completed since the process has started. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`ThreadPool.CompletedWorkItemCount`](https://learn.microsoft.com/dotnet/api/system.threading.threadpool.completedworkitemcount) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{work_item}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_THREAD_POOL_WORK_ITEM_COUNT: &str = "dotnet.thread_pool.work_item.count"; + +/// ## Description +/// +/// The number of timer instances that are currently active. +/// +/// ## Notes +/// +/// Meter name: `System.Runtime`; Added in: .NET 9.0. +/// This metric reports the same values as calling [`Timer.ActiveCount`](https://learn.microsoft.com/dotnet/api/system.threading.timer.activecount) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{timer}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const DOTNET_TIMER_COUNT: &str = "dotnet.timer.count"; + +/// ## Description +/// +/// Number of invocation cold starts +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{coldstart}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::FAAS_TRIGGER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const FAAS_COLDSTARTS: &str = "faas.coldstarts"; + +/// ## Description +/// +/// Distribution of CPU usage per invocation +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::FAAS_TRIGGER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const FAAS_CPU_USAGE: &str = "faas.cpu_usage"; + +/// ## Description +/// +/// Number of invocation errors +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{error}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::FAAS_TRIGGER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const FAAS_ERRORS: &str = "faas.errors"; + +/// ## Description +/// +/// Measures the duration of the function's initialization, such as a cold start +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::FAAS_TRIGGER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const FAAS_INIT_DURATION: &str = "faas.init_duration"; + +/// ## Description +/// +/// Number of successful invocations +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{invocation}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::FAAS_TRIGGER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const FAAS_INVOCATIONS: &str = "faas.invocations"; + +/// ## Description +/// +/// Measures the duration of the function's logic execution +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::FAAS_TRIGGER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const FAAS_INVOKE_DURATION: &str = "faas.invoke_duration"; + +/// ## Description +/// +/// Distribution of max memory usage per invocation +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `By` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::FAAS_TRIGGER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const FAAS_MEM_USAGE: &str = "faas.mem_usage"; + +/// ## Description +/// +/// Distribution of net I/O usage per invocation +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `By` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::FAAS_TRIGGER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const FAAS_NET_IO: &str = "faas.net_io"; + +/// ## Description +/// +/// Number of invocation timeouts +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{timeout}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::FAAS_TRIGGER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const FAAS_TIMEOUTS: &str = "faas.timeouts"; + +/// ## Description +/// +/// GenAI operation duration +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: if the operation ended in an error +/// | [`crate::attribute::GEN_AI_OPERATION_NAME`] | `Required` +/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Conditionally_required`: If available. +/// | [`crate::attribute::GEN_AI_RESPONSE_MODEL`] | `Recommended` +/// | [`crate::attribute::GEN_AI_SYSTEM`] | `Required` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_CLIENT_OPERATION_DURATION: &str = "gen_ai.client.operation.duration"; + +/// ## Description +/// +/// Measures number of input and output tokens used +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `{token}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | /// | [`crate::attribute::GEN_AI_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Required` +/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Conditionally_required`: If available. +/// | [`crate::attribute::GEN_AI_RESPONSE_MODEL`] | `Recommended` /// | [`crate::attribute::GEN_AI_SYSTEM`] | `Required` -/// | [`crate::attribute::SERVER_PORT`] | `Conditionally required`: If `server.address` is set. +/// | [`crate::attribute::GEN_AI_TOKEN_TYPE`] | `Required` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_CLIENT_TOKEN_USAGE: &str = "gen_ai.client.token.usage"; + +/// ## Description +/// +/// Generative AI server request duration such as time-to-last byte or last output token +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: if the operation ended in an error +/// | [`crate::attribute::GEN_AI_OPERATION_NAME`] | `Required` +/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Conditionally_required`: If available. /// | [`crate::attribute::GEN_AI_RESPONSE_MODEL`] | `Recommended` +/// | [`crate::attribute::GEN_AI_SYSTEM`] | `Required` /// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` -pub const GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN: &str = "gen_ai.server.time_per_output_token"; +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_SERVER_REQUEST_DURATION: &str = "gen_ai.server.request.duration"; + /// ## Description -/// Time to generate first token for successful responses. +/// +/// Time per output token generated after the first token for successful responses /// ## Metadata /// | | | /// |:-|:- @@ -735,82 +1335,139 @@ pub const GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN: &str = "gen_ai.server.time_per_ou /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::GEN_AI_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Required` +/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Conditionally_required`: If available. +/// | [`crate::attribute::GEN_AI_RESPONSE_MODEL`] | `Recommended` /// | [`crate::attribute::GEN_AI_SYSTEM`] | `Required` -/// | [`crate::attribute::SERVER_PORT`] | `Conditionally required`: If `server.address` is set. +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If `server.address` is set. +#[cfg(feature = "semconv_experimental")] +pub const GEN_AI_SERVER_TIME_PER_OUTPUT_TOKEN: &str = "gen_ai.server.time_per_output_token"; + +/// ## Description +/// +/// Time to generate first token for successful responses +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `histogram` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::GEN_AI_OPERATION_NAME`] | `Required` +/// | [`crate::attribute::GEN_AI_REQUEST_MODEL`] | `Conditionally_required`: If available. /// | [`crate::attribute::GEN_AI_RESPONSE_MODEL`] | `Recommended` +/// | [`crate::attribute::GEN_AI_SYSTEM`] | `Required` /// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Conditionally_required`: If `server.address` is set. +#[cfg(feature = "semconv_experimental")] pub const GEN_AI_SERVER_TIME_TO_FIRST_TOKEN: &str = "gen_ai.server.time_to_first_token"; + /// ## Description +/// /// Heap size target percentage configured by the user, otherwise 100. /// -/// The value range is \[0.0,100.0\]. Computed from `/gc/gogc:percent`. +/// ## Notes +/// +/// The value range is \\[0.0,100.0\\]. Computed from `/gc/gogc:percent` /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `%` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const GO_CONFIG_GOGC: &str = "go.config.gogc"; + /// ## Description +/// /// Count of live goroutines. /// -/// Computed from `/sched/goroutines:goroutines`. +/// ## Notes +/// +/// Computed from `/sched/goroutines:goroutines` /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{goroutine}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const GO_GOROUTINE_COUNT: &str = "go.goroutine.count"; + /// ## Description +/// /// Memory allocated to the heap by the application. /// -/// Computed from `/gc/heap/allocs:bytes`. +/// ## Notes +/// +/// Computed from `/gc/heap/allocs:bytes` /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_ALLOCATED: &str = "go.memory.allocated"; + /// ## Description +/// /// Count of allocations to the heap by the application. /// -/// Computed from `/gc/heap/allocs:objects`. +/// ## Notes +/// +/// Computed from `/gc/heap/allocs:objects` /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{allocation}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_ALLOCATIONS: &str = "go.memory.allocations"; + /// ## Description +/// /// Heap size target for the end of the GC cycle. /// -/// Computed from `/gc/heap/goal:bytes`. +/// ## Notes +/// +/// Computed from `/gc/heap/goal:bytes` /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_GC_GOAL: &str = "go.memory.gc.goal"; + /// ## Description +/// /// Go runtime memory limit configured by the user, if a limit exists. /// -/// Computed from `/gc/gomemlimit:bytes`. This metric is excluded if the limit obtained from the Go runtime is math.MaxInt64. +/// ## Notes +/// +/// Computed from `/gc/gomemlimit:bytes`. This metric is excluded if the limit obtained from the Go runtime is math.MaxInt64 /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_LIMIT: &str = "go.memory.limit"; + /// ## Description +/// /// Memory used by the Go runtime. /// -/// Computed from `(/memory/classes/total:bytes - /memory/classes/heap/released:bytes)`. +/// ## Notes +/// +/// Computed from `(/memory/classes/total:bytes - /memory/classes/heap/released:bytes)` /// ## Metadata /// | | | /// |:-|:- @@ -822,31 +1479,44 @@ pub const GO_MEMORY_LIMIT: &str = "go.memory.limit"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::GO_MEMORY_TYPE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const GO_MEMORY_USED: &str = "go.memory.used"; + /// ## Description +/// /// The number of OS threads that can execute user-level Go code simultaneously. /// -/// Computed from `/sched/gomaxprocs:threads`. +/// ## Notes +/// +/// Computed from `/sched/gomaxprocs:threads` /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{thread}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const GO_PROCESSOR_LIMIT: &str = "go.processor.limit"; + /// ## Description +/// /// The time goroutines have spent in the scheduler in a runnable state before actually running. /// -/// Computed from `/sched/latencies:seconds`. Bucket boundaries are provided by the runtime, and are subject to change. +/// ## Notes +/// +/// Computed from `/sched/latencies:seconds`. Bucket boundaries are provided by the runtime, and are subject to change /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `s` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const GO_SCHEDULE_DURATION: &str = "go.schedule.duration"; + /// ## Description -/// Number of active HTTP requests. +/// +/// Number of active HTTP requests /// ## Metadata /// | | | /// |:-|:- @@ -857,14 +1527,17 @@ pub const GO_SCHEDULE_DURATION: &str = "go.schedule.duration"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::HTTP_REQUEST_METHOD`] | `Recommended` /// | [`crate::attribute::SERVER_ADDRESS`] | `Required` /// | [`crate::attribute::SERVER_PORT`] | `Required` -/// | [`crate::attribute::URL_TEMPLATE`] | `Conditionally required`: If available. -/// | [`crate::attribute::HTTP_REQUEST_METHOD`] | `Recommended` -/// | [`crate::attribute::URL_SCHEME`] | `Opt in` +/// | [`crate::attribute::URL_SCHEME`] | `Opt_in` +/// | [`crate::attribute::URL_TEMPLATE`] | `Conditionally_required`: If available. +#[cfg(feature = "semconv_experimental")] pub const HTTP_CLIENT_ACTIVE_REQUESTS: &str = "http.client.active_requests"; + /// ## Description -/// The duration of the successfully established outbound HTTP connections. +/// +/// The duration of the successfully established outbound HTTP connections /// ## Metadata /// | | | /// |:-|:- @@ -875,14 +1548,17 @@ pub const HTTP_CLIENT_ACTIVE_REQUESTS: &str = "http.client.active_requests"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SERVER_ADDRESS`] | `Required` -/// | [`crate::attribute::SERVER_PORT`] | `Required` /// | [`crate::attribute::NETWORK_PEER_ADDRESS`] | `Recommended` /// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` -/// | [`crate::attribute::URL_SCHEME`] | `Opt in` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Required` +/// | [`crate::attribute::SERVER_PORT`] | `Required` +/// | [`crate::attribute::URL_SCHEME`] | `Opt_in` +#[cfg(feature = "semconv_experimental")] pub const HTTP_CLIENT_CONNECTION_DURATION: &str = "http.client.connection.duration"; + /// ## Description -/// Number of outbound HTTP connections that are currently active or idle on the client. +/// +/// Number of outbound HTTP connections that are currently active or idle on the client /// ## Metadata /// | | | /// |:-|:- @@ -894,16 +1570,21 @@ pub const HTTP_CLIENT_CONNECTION_DURATION: &str = "http.client.connection.durati /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::HTTP_CONNECTION_STATE`] | `Required` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Required` -/// | [`crate::attribute::SERVER_PORT`] | `Required` /// | [`crate::attribute::NETWORK_PEER_ADDRESS`] | `Recommended` /// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` -/// | [`crate::attribute::URL_SCHEME`] | `Opt in` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Required` +/// | [`crate::attribute::SERVER_PORT`] | `Required` +/// | [`crate::attribute::URL_SCHEME`] | `Opt_in` +#[cfg(feature = "semconv_experimental")] pub const HTTP_CLIENT_OPEN_CONNECTIONS: &str = "http.client.open_connections"; + /// ## Description +/// /// Size of HTTP client request bodies. /// -/// The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +/// ## Notes +/// +/// The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size /// ## Metadata /// | | | /// |:-|:- @@ -914,18 +1595,21 @@ pub const HTTP_CLIENT_OPEN_CONNECTIONS: &str = "http.client.open_connections"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If request has ended with an error. /// | [`crate::attribute::HTTP_REQUEST_METHOD`] | `Required` +/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If and only if one was received/sent. +/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally_required`: If not `http` and `network.protocol.version` is set. +/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` /// | [`crate::attribute::SERVER_ADDRESS`] | `Required` /// | [`crate::attribute::SERVER_PORT`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If request has ended with an error. -/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally required`: If and only if one was received/sent. -/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally required`: If not `http` and `network.protocol.version` is set. -/// | [`crate::attribute::URL_TEMPLATE`] | `Conditionally required`: If available. -/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Unspecified` -/// | [`crate::attribute::URL_SCHEME`] | `Opt in` +/// | [`crate::attribute::URL_SCHEME`] | `Opt_in` +/// | [`crate::attribute::URL_TEMPLATE`] | `Conditionally_required`: If available. +#[cfg(feature = "semconv_experimental")] pub const HTTP_CLIENT_REQUEST_BODY_SIZE: &str = "http.client.request.body.size"; + /// ## Description -/// Duration of HTTP client requests. +/// +/// Duration of HTTP client requests /// ## Metadata /// | | | /// |:-|:- @@ -936,19 +1620,24 @@ pub const HTTP_CLIENT_REQUEST_BODY_SIZE: &str = "http.client.request.body.size"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If request has ended with an error. /// | [`crate::attribute::HTTP_REQUEST_METHOD`] | `Required` +/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If and only if one was received/sent. +/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally_required`: If not `http` and `network.protocol.version` is set. +/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` /// | [`crate::attribute::SERVER_ADDRESS`] | `Required` /// | [`crate::attribute::SERVER_PORT`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If request has ended with an error. -/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally required`: If and only if one was received/sent. -/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally required`: If not `http` and `network.protocol.version` is set. -/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Unspecified` -/// | [`crate::attribute::URL_SCHEME`] | `Opt in` +/// | [`crate::attribute::URL_SCHEME`] | `Opt_in` +/// | [`crate::attribute::URL_TEMPLATE`] | `Opt_in` pub const HTTP_CLIENT_REQUEST_DURATION: &str = "http.client.request.duration"; + /// ## Description +/// /// Size of HTTP client response bodies. /// -/// The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +/// ## Notes +/// +/// The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size /// ## Metadata /// | | | /// |:-|:- @@ -959,18 +1648,21 @@ pub const HTTP_CLIENT_REQUEST_DURATION: &str = "http.client.request.duration"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If request has ended with an error. /// | [`crate::attribute::HTTP_REQUEST_METHOD`] | `Required` +/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If and only if one was received/sent. +/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally_required`: If not `http` and `network.protocol.version` is set. +/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` /// | [`crate::attribute::SERVER_ADDRESS`] | `Required` /// | [`crate::attribute::SERVER_PORT`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If request has ended with an error. -/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally required`: If and only if one was received/sent. -/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally required`: If not `http` and `network.protocol.version` is set. -/// | [`crate::attribute::URL_TEMPLATE`] | `Conditionally required`: If available. -/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Unspecified` -/// | [`crate::attribute::URL_SCHEME`] | `Opt in` +/// | [`crate::attribute::URL_SCHEME`] | `Opt_in` +/// | [`crate::attribute::URL_TEMPLATE`] | `Conditionally_required`: If available. +#[cfg(feature = "semconv_experimental")] pub const HTTP_CLIENT_RESPONSE_BODY_SIZE: &str = "http.client.response.body.size"; + /// ## Description -/// Number of active HTTP server requests. +/// +/// Number of active HTTP server requests /// ## Metadata /// | | | /// |:-|:- @@ -982,14 +1674,19 @@ pub const HTTP_CLIENT_RESPONSE_BODY_SIZE: &str = "http.client.response.body.size /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::HTTP_REQUEST_METHOD`] | `Required` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Opt_in` +/// | [`crate::attribute::SERVER_PORT`] | `Opt_in` /// | [`crate::attribute::URL_SCHEME`] | `Required` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Opt in` -/// | [`crate::attribute::SERVER_PORT`] | `Opt in` +#[cfg(feature = "semconv_experimental")] pub const HTTP_SERVER_ACTIVE_REQUESTS: &str = "http.server.active_requests"; + /// ## Description +/// /// Size of HTTP server request bodies. /// -/// The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +/// ## Notes +/// +/// The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size /// ## Metadata /// | | | /// |:-|:- @@ -1000,18 +1697,22 @@ pub const HTTP_SERVER_ACTIVE_REQUESTS: &str = "http.server.active_requests"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If request has ended with an error. /// | [`crate::attribute::HTTP_REQUEST_METHOD`] | `Required` +/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If and only if one was received/sent. +/// | [`crate::attribute::HTTP_ROUTE`] | `Conditionally_required`: If and only if it's available +/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally_required`: If not `http` and `network.protocol.version` is set. +/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Opt_in` +/// | [`crate::attribute::SERVER_PORT`] | `Opt_in` /// | [`crate::attribute::URL_SCHEME`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If request has ended with an error. -/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally required`: If and only if one was received/sent. -/// | [`crate::attribute::HTTP_ROUTE`] | `Conditionally required`: If and only if it's available -/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally required`: If not `http` and `network.protocol.version` is set. -/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Unspecified` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Opt in` -/// | [`crate::attribute::SERVER_PORT`] | `Opt in` +/// | [`crate::attribute::USER_AGENT_SYNTHETIC_TYPE`] | `Opt_in` +#[cfg(feature = "semconv_experimental")] pub const HTTP_SERVER_REQUEST_BODY_SIZE: &str = "http.server.request.body.size"; + /// ## Description -/// Duration of HTTP server requests. +/// +/// Duration of HTTP server requests /// ## Metadata /// | | | /// |:-|:- @@ -1022,20 +1723,25 @@ pub const HTTP_SERVER_REQUEST_BODY_SIZE: &str = "http.server.request.body.size"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If request has ended with an error. /// | [`crate::attribute::HTTP_REQUEST_METHOD`] | `Required` +/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If and only if one was received/sent. +/// | [`crate::attribute::HTTP_ROUTE`] | `Conditionally_required`: If and only if it's available +/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally_required`: If not `http` and `network.protocol.version` is set. +/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Opt_in` +/// | [`crate::attribute::SERVER_PORT`] | `Opt_in` /// | [`crate::attribute::URL_SCHEME`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If request has ended with an error. -/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally required`: If and only if one was received/sent. -/// | [`crate::attribute::HTTP_ROUTE`] | `Conditionally required`: If and only if it's available -/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally required`: If not `http` and `network.protocol.version` is set. -/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Unspecified` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Opt in` -/// | [`crate::attribute::SERVER_PORT`] | `Opt in` +/// | [`crate::attribute::USER_AGENT_SYNTHETIC_TYPE`] | `Opt_in` pub const HTTP_SERVER_REQUEST_DURATION: &str = "http.server.request.duration"; + /// ## Description +/// /// Size of HTTP server response bodies. /// -/// The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size. +/// ## Notes +/// +/// The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) header. For requests using transport encoding, this should be the compressed size /// ## Metadata /// | | | /// |:-|:- @@ -1046,18 +1752,112 @@ pub const HTTP_SERVER_REQUEST_DURATION: &str = "http.server.request.duration"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If request has ended with an error. /// | [`crate::attribute::HTTP_REQUEST_METHOD`] | `Required` +/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally_required`: If and only if one was received/sent. +/// | [`crate::attribute::HTTP_ROUTE`] | `Conditionally_required`: If and only if it's available +/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally_required`: If not `http` and `network.protocol.version` is set. +/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Opt_in` +/// | [`crate::attribute::SERVER_PORT`] | `Opt_in` /// | [`crate::attribute::URL_SCHEME`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If request has ended with an error. -/// | [`crate::attribute::HTTP_RESPONSE_STATUS_CODE`] | `Conditionally required`: If and only if one was received/sent. -/// | [`crate::attribute::HTTP_ROUTE`] | `Conditionally required`: If and only if it's available -/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Conditionally required`: If not `http` and `network.protocol.version` is set. -/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Unspecified` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Opt in` -/// | [`crate::attribute::SERVER_PORT`] | `Opt in` +/// | [`crate::attribute::USER_AGENT_SYNTHETIC_TYPE`] | `Opt_in` +#[cfg(feature = "semconv_experimental")] pub const HTTP_SERVER_RESPONSE_BODY_SIZE: &str = "http.server.response.body.size"; + +/// ## Description +/// +/// Energy consumed by the component +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `J` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::HW_ID`] | `Required` +/// | [`crate::attribute::HW_NAME`] | `Recommended` +/// | [`crate::attribute::HW_PARENT`] | `Recommended` +/// | [`crate::attribute::HW_TYPE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const HW_ENERGY: &str = "hw.energy"; + +/// ## Description +/// +/// Number of errors encountered by the component +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{error}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: if and only if an error has occurred +/// | [`crate::attribute::HW_ID`] | `Required` +/// | [`crate::attribute::HW_NAME`] | `Recommended` +/// | [`crate::attribute::HW_PARENT`] | `Recommended` +/// | [`crate::attribute::HW_TYPE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const HW_ERRORS: &str = "hw.errors"; + +/// ## Description +/// +/// Instantaneous power consumed by the component +/// +/// ## Notes +/// +/// It is recommended to report `hw.energy` instead of `hw.power` when possible +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `W` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::HW_ID`] | `Required` +/// | [`crate::attribute::HW_NAME`] | `Recommended` +/// | [`crate::attribute::HW_PARENT`] | `Recommended` +/// | [`crate::attribute::HW_TYPE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const HW_POWER: &str = "hw.power"; + +/// ## Description +/// +/// Operational status: `1` (true) or `0` (false) for each of the possible states +/// +/// ## Notes +/// +/// `hw.status` is currently specified as an *UpDownCounter* but would ideally be represented using a [*StateSet* as defined in OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#stateset). This semantic convention will be updated once *StateSet* is specified in OpenTelemetry. This planned change is not expected to have any consequence on the way users query their timeseries backend to retrieve the values of `hw.status` over time +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `1` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::HW_ID`] | `Required` +/// | [`crate::attribute::HW_NAME`] | `Recommended` +/// | [`crate::attribute::HW_PARENT`] | `Recommended` +/// | [`crate::attribute::HW_STATE`] | `Required` +/// | [`crate::attribute::HW_TYPE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const HW_STATUS: &str = "hw.status"; + /// ## Description -/// Number of buffers in the pool. +/// +/// Number of buffers in the pool /// ## Metadata /// | | | /// |:-|:- @@ -1069,9 +1869,12 @@ pub const HTTP_SERVER_RESPONSE_BODY_SIZE: &str = "http.server.response.body.size /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::JVM_BUFFER_POOL_NAME`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const JVM_BUFFER_COUNT: &str = "jvm.buffer.count"; + /// ## Description -/// Measure of total memory capacity of buffers. +/// +/// Measure of total memory capacity of buffers /// ## Metadata /// | | | /// |:-|:- @@ -1083,9 +1886,12 @@ pub const JVM_BUFFER_COUNT: &str = "jvm.buffer.count"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::JVM_BUFFER_POOL_NAME`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const JVM_BUFFER_MEMORY_LIMIT: &str = "jvm.buffer.memory.limit"; + /// ## Description -/// Deprecated, use `jvm.buffer.memory.used` instead. +/// +/// Deprecated, use `jvm.buffer.memory.used` instead /// ## Metadata /// | | | /// |:-|:- @@ -1097,10 +1903,13 @@ pub const JVM_BUFFER_MEMORY_LIMIT: &str = "jvm.buffer.memory.limit"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::JVM_BUFFER_POOL_NAME`] | `Recommended` -#[deprecated] +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `jvm.buffer.memory.used`.")] pub const JVM_BUFFER_MEMORY_USAGE: &str = "jvm.buffer.memory.usage"; + /// ## Description -/// Measure of memory used by buffers. +/// +/// Measure of memory used by buffers /// ## Metadata /// | | | /// |:-|:- @@ -1112,9 +1921,12 @@ pub const JVM_BUFFER_MEMORY_USAGE: &str = "jvm.buffer.memory.usage"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::JVM_BUFFER_POOL_NAME`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const JVM_BUFFER_MEMORY_USED: &str = "jvm.buffer.memory.used"; + /// ## Description -/// Number of classes currently loaded. +/// +/// Number of classes currently loaded /// ## Metadata /// | | | /// |:-|:- @@ -1122,8 +1934,10 @@ pub const JVM_BUFFER_MEMORY_USED: &str = "jvm.buffer.memory.used"; /// | Unit: | `{class}` | /// | Status: | `Stable` | pub const JVM_CLASS_COUNT: &str = "jvm.class.count"; + /// ## Description -/// Number of classes loaded since JVM start. +/// +/// Number of classes loaded since JVM start /// ## Metadata /// | | | /// |:-|:- @@ -1131,8 +1945,10 @@ pub const JVM_CLASS_COUNT: &str = "jvm.class.count"; /// | Unit: | `{class}` | /// | Status: | `Stable` | pub const JVM_CLASS_LOADED: &str = "jvm.class.loaded"; + /// ## Description -/// Number of classes unloaded since JVM start. +/// +/// Number of classes unloaded since JVM start /// ## Metadata /// | | | /// |:-|:- @@ -1140,8 +1956,10 @@ pub const JVM_CLASS_LOADED: &str = "jvm.class.loaded"; /// | Unit: | `{class}` | /// | Status: | `Stable` | pub const JVM_CLASS_UNLOADED: &str = "jvm.class.unloaded"; + /// ## Description -/// Number of processors available to the Java virtual machine. +/// +/// Number of processors available to the Java virtual machine /// ## Metadata /// | | | /// |:-|:- @@ -1149,10 +1967,14 @@ pub const JVM_CLASS_UNLOADED: &str = "jvm.class.unloaded"; /// | Unit: | `{cpu}` | /// | Status: | `Stable` | pub const JVM_CPU_COUNT: &str = "jvm.cpu.count"; + /// ## Description +/// /// Recent CPU utilization for the process as reported by the JVM. /// -/// The value range is \[0.0,1.0\]. This utilization is not defined as being for the specific interval since last measurement (unlike `system.cpu.utilization`). [Reference](https://docs.oracle.com/en/java/javase/17/docs/api/jdk.management/com/sun/management/OperatingSystemMXBean.html#getProcessCpuLoad()). +/// ## Notes +/// +/// The value range is \\[0.0,1.0\\]. This utilization is not defined as being for the specific interval since last measurement (unlike `system.cpu.utilization`). [Reference](https://docs.oracle.com/en/java/javase/17/docs/api/jdk.management/com/sun/management/OperatingSystemMXBean.html#getProcessCpuLoad()) /// ## Metadata /// | | | /// |:-|:- @@ -1160,8 +1982,10 @@ pub const JVM_CPU_COUNT: &str = "jvm.cpu.count"; /// | Unit: | `1` | /// | Status: | `Stable` | pub const JVM_CPU_RECENT_UTILIZATION: &str = "jvm.cpu.recent_utilization"; + /// ## Description -/// CPU time used by the process as reported by the JVM. +/// +/// CPU time used by the process as reported by the JVM /// ## Metadata /// | | | /// |:-|:- @@ -1169,8 +1993,10 @@ pub const JVM_CPU_RECENT_UTILIZATION: &str = "jvm.cpu.recent_utilization"; /// | Unit: | `s` | /// | Status: | `Stable` | pub const JVM_CPU_TIME: &str = "jvm.cpu.time"; + /// ## Description -/// Duration of JVM garbage collection actions. +/// +/// Duration of JVM garbage collection actions /// ## Metadata /// | | | /// |:-|:- @@ -1184,121 +2010,350 @@ pub const JVM_CPU_TIME: &str = "jvm.cpu.time"; /// | [`crate::attribute::JVM_GC_ACTION`] | `Recommended` /// | [`crate::attribute::JVM_GC_NAME`] | `Recommended` pub const JVM_GC_DURATION: &str = "jvm.gc.duration"; + +/// ## Description +/// +/// Measure of memory committed +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Stable` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` +/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` +pub const JVM_MEMORY_COMMITTED: &str = "jvm.memory.committed"; + +/// ## Description +/// +/// Measure of initial memory requested +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` +/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const JVM_MEMORY_INIT: &str = "jvm.memory.init"; + +/// ## Description +/// +/// Measure of max obtainable memory +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Stable` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` +/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` +pub const JVM_MEMORY_LIMIT: &str = "jvm.memory.limit"; + +/// ## Description +/// +/// Measure of memory used +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Stable` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` +/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` +pub const JVM_MEMORY_USED: &str = "jvm.memory.used"; + +/// ## Description +/// +/// Measure of memory used, as measured after the most recent garbage collection event on this pool +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Stable` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` +/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` +pub const JVM_MEMORY_USED_AFTER_LAST_GC: &str = "jvm.memory.used_after_last_gc"; + +/// ## Description +/// +/// Average CPU load of the whole system for the last minute as reported by the JVM. +/// +/// ## Notes +/// +/// The value range is \\[0,n\\], where n is the number of CPU cores - or a negative number if the value is not available. This utilization is not defined as being for the specific interval since last measurement (unlike `system.cpu.utilization`). [Reference](https://docs.oracle.com/en/java/javase/17/docs/api/java.management/java/lang/management/OperatingSystemMXBean.html#getSystemLoadAverage()) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `{run_queue_item}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const JVM_SYSTEM_CPU_LOAD_1M: &str = "jvm.system.cpu.load_1m"; + +/// ## Description +/// +/// Recent CPU utilization for the whole system as reported by the JVM. +/// +/// ## Notes +/// +/// The value range is \\[0.0,1.0\\]. This utilization is not defined as being for the specific interval since last measurement (unlike `system.cpu.utilization`). [Reference](https://docs.oracle.com/en/java/javase/17/docs/api/jdk.management/com/sun/management/OperatingSystemMXBean.html#getCpuLoad()) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `1` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const JVM_SYSTEM_CPU_UTILIZATION: &str = "jvm.system.cpu.utilization"; + +/// ## Description +/// +/// Number of executing platform threads +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{thread}` | +/// | Status: | `Stable` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::JVM_THREAD_DAEMON`] | `Recommended` +/// | [`crate::attribute::JVM_THREAD_STATE`] | `Recommended` +pub const JVM_THREAD_COUNT: &str = "jvm.thread.count"; + +/// ## Description +/// +/// Total CPU time consumed +/// +/// ## Notes +/// +/// Total CPU time consumed by the specific Node on all available CPU cores +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_CPU_TIME: &str = "k8s.node.cpu.time"; + +/// ## Description +/// +/// Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +/// +/// ## Notes +/// +/// CPU usage of the specific Node on all available CPU cores, averaged over the sample window +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `{cpu}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_CPU_USAGE: &str = "k8s.node.cpu.usage"; + /// ## Description -/// Measure of memory committed. +/// +/// Memory usage of the Node +/// +/// ## Notes +/// +/// Total memory usage of the Node /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `updowncounter` | +/// | Instrument: | `gauge` | /// | Unit: | `By` | -/// | Status: | `Stable` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` -/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` -pub const JVM_MEMORY_COMMITTED: &str = "jvm.memory.committed"; +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_MEMORY_USAGE: &str = "k8s.node.memory.usage"; + /// ## Description -/// Measure of initial memory requested. +/// +/// Node network errors /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `updowncounter` | -/// | Unit: | `By` | +/// | Instrument: | `counter` | +/// | Unit: | `{error}` | /// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` -/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` -pub const JVM_MEMORY_INIT: &str = "jvm.memory.init"; +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_NETWORK_ERRORS: &str = "k8s.node.network.errors"; + /// ## Description -/// Measure of max obtainable memory. +/// +/// Network bytes for the Node /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `updowncounter` | +/// | Instrument: | `counter` | /// | Unit: | `By` | -/// | Status: | `Stable` | +/// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` -/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` -pub const JVM_MEMORY_LIMIT: &str = "jvm.memory.limit"; +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_NETWORK_IO: &str = "k8s.node.network.io"; + /// ## Description -/// Measure of memory used. +/// +/// The time the Node has been running +/// +/// ## Notes +/// +/// Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +/// The actual accuracy would depend on the instrumentation and operating system /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `updowncounter` | -/// | Unit: | `By` | -/// | Status: | `Stable` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` -/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` -pub const JVM_MEMORY_USED: &str = "jvm.memory.used"; +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_NODE_UPTIME: &str = "k8s.node.uptime"; + /// ## Description -/// Measure of memory used, as measured after the most recent garbage collection event on this pool. +/// +/// Total CPU time consumed +/// +/// ## Notes +/// +/// Total CPU time consumed by the specific Pod on all available CPU cores /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `updowncounter` | -/// | Unit: | `By` | -/// | Status: | `Stable` | -/// -/// ## Attributes -/// | Name | Requirement | -/// |:-|:- | -/// | [`crate::attribute::JVM_MEMORY_POOL_NAME`] | `Recommended` -/// | [`crate::attribute::JVM_MEMORY_TYPE`] | `Recommended` -pub const JVM_MEMORY_USED_AFTER_LAST_GC: &str = "jvm.memory.used_after_last_gc"; +/// | Instrument: | `counter` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_CPU_TIME: &str = "k8s.pod.cpu.time"; + /// ## Description -/// Average CPU load of the whole system for the last minute as reported by the JVM. /// -/// The value range is \[0,n\], where n is the number of CPU cores - or a negative number if the value is not available. This utilization is not defined as being for the specific interval since last measurement (unlike `system.cpu.utilization`). [Reference](https://docs.oracle.com/en/java/javase/17/docs/api/java.management/java/lang/management/OperatingSystemMXBean.html#getSystemLoadAverage()). +/// Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs +/// +/// ## Notes +/// +/// CPU usage of the specific Pod on all available CPU cores, averaged over the sample window /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `gauge` | -/// | Unit: | `{run_queue_item}` | +/// | Unit: | `{cpu}` | /// | Status: | `Experimental` | -pub const JVM_SYSTEM_CPU_LOAD_1M: &str = "jvm.system.cpu.load_1m"; +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_CPU_USAGE: &str = "k8s.pod.cpu.usage"; + /// ## Description -/// Recent CPU utilization for the whole system as reported by the JVM. /// -/// The value range is \[0.0,1.0\]. This utilization is not defined as being for the specific interval since last measurement (unlike `system.cpu.utilization`). [Reference](https://docs.oracle.com/en/java/javase/17/docs/api/jdk.management/com/sun/management/OperatingSystemMXBean.html#getCpuLoad()). +/// Memory usage of the Pod +/// +/// ## Notes +/// +/// Total memory usage of the Pod /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `gauge` | -/// | Unit: | `1` | +/// | Unit: | `By` | /// | Status: | `Experimental` | -pub const JVM_SYSTEM_CPU_UTILIZATION: &str = "jvm.system.cpu.utilization"; +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_MEMORY_USAGE: &str = "k8s.pod.memory.usage"; + /// ## Description -/// Number of executing platform threads. +/// +/// Pod network errors /// ## Metadata /// | | | /// |:-|:- -/// | Instrument: | `updowncounter` | -/// | Unit: | `{thread}` | -/// | Status: | `Stable` | +/// | Instrument: | `counter` | +/// | Unit: | `{error}` | +/// | Status: | `Experimental` | /// /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::JVM_THREAD_DAEMON`] | `Recommended` -/// | [`crate::attribute::JVM_THREAD_STATE`] | `Recommended` -pub const JVM_THREAD_COUNT: &str = "jvm.thread.count"; +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_NETWORK_ERRORS: &str = "k8s.pod.network.errors"; + +/// ## Description +/// +/// Network bytes for the Pod +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `By` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_NETWORK_IO: &str = "k8s.pod.network.io"; + +/// ## Description +/// +/// The time the Pod has been running +/// +/// ## Notes +/// +/// Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +/// The actual accuracy would depend on the instrumentation and operating system +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const K8S_POD_UPTIME: &str = "k8s.pod.uptime"; + /// ## Description +/// /// Number of connections that are currently active on the server. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Server.Kestrel`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -1310,14 +2365,18 @@ pub const JVM_THREAD_COUNT: &str = "jvm.thread.count"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TYPE`] | `Recommended`: if the transport is `tcp` or `udp` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +/// | [`crate::attribute::NETWORK_TYPE`] | `{"recommended": "if the transport is `tcp` or `udp`"}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` pub const KESTREL_ACTIVE_CONNECTIONS: &str = "kestrel.active_connections"; + /// ## Description +/// /// Number of TLS handshakes that are currently in progress on the server. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Server.Kestrel`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -1329,14 +2388,18 @@ pub const KESTREL_ACTIVE_CONNECTIONS: &str = "kestrel.active_connections"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TYPE`] | `Recommended`: if the transport is `tcp` or `udp` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +/// | [`crate::attribute::NETWORK_TYPE`] | `{"recommended": "if the transport is `tcp` or `udp`"}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` pub const KESTREL_ACTIVE_TLS_HANDSHAKES: &str = "kestrel.active_tls_handshakes"; + /// ## Description +/// /// The duration of connections on the server. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Server.Kestrel`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -1348,18 +2411,22 @@ pub const KESTREL_ACTIVE_TLS_HANDSHAKES: &str = "kestrel.active_tls_handshakes"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: if and only if an error has occurred. -/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Unspecified` -/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TYPE`] | `Recommended`: if the transport is `tcp` or `udp` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` -/// | [`crate::attribute::TLS_PROTOCOL_VERSION`] | `Unspecified` +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: if and only if an error has occurred. +/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +/// | [`crate::attribute::NETWORK_TYPE`] | `{"recommended": "if the transport is `tcp` or `udp`"}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +/// | [`crate::attribute::TLS_PROTOCOL_VERSION`] | `Recommended` pub const KESTREL_CONNECTION_DURATION: &str = "kestrel.connection.duration"; + /// ## Description +/// /// Number of connections that are currently queued and are waiting to start. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Server.Kestrel`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -1371,14 +2438,18 @@ pub const KESTREL_CONNECTION_DURATION: &str = "kestrel.connection.duration"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TYPE`] | `Recommended`: if the transport is `tcp` or `udp` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +/// | [`crate::attribute::NETWORK_TYPE`] | `{"recommended": "if the transport is `tcp` or `udp`"}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` pub const KESTREL_QUEUED_CONNECTIONS: &str = "kestrel.queued_connections"; + /// ## Description +/// /// Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Server.Kestrel`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -1390,16 +2461,20 @@ pub const KESTREL_QUEUED_CONNECTIONS: &str = "kestrel.queued_connections"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Unspecified` -/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TYPE`] | `Recommended`: if the transport is `tcp` or `udp` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::NETWORK_PROTOCOL_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_PROTOCOL_VERSION`] | `Recommended` +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +/// | [`crate::attribute::NETWORK_TYPE`] | `{"recommended": "if the transport is `tcp` or `udp`"}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` pub const KESTREL_QUEUED_REQUESTS: &str = "kestrel.queued_requests"; + /// ## Description +/// /// Number of connections rejected by the server. /// +/// ## Notes +/// /// Connections are rejected when the currently active count exceeds the value configured with `MaxConcurrentConnections`. /// Meter name: `Microsoft.AspNetCore.Server.Kestrel`; Added in: ASP.NET Core 8.0 /// ## Metadata @@ -1412,14 +2487,18 @@ pub const KESTREL_QUEUED_REQUESTS: &str = "kestrel.queued_requests"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TYPE`] | `Recommended`: if the transport is `tcp` or `udp` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +/// | [`crate::attribute::NETWORK_TYPE`] | `{"recommended": "if the transport is `tcp` or `udp`"}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` pub const KESTREL_REJECTED_CONNECTIONS: &str = "kestrel.rejected_connections"; + /// ## Description +/// /// The duration of TLS handshakes on the server. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Server.Kestrel`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -1431,16 +2510,20 @@ pub const KESTREL_REJECTED_CONNECTIONS: &str = "kestrel.rejected_connections"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: if and only if an error has occurred. -/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TYPE`] | `Recommended`: if the transport is `tcp` or `udp` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` -/// | [`crate::attribute::TLS_PROTOCOL_VERSION`] | `Unspecified` +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: if and only if an error has occurred. +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +/// | [`crate::attribute::NETWORK_TYPE`] | `{"recommended": "if the transport is `tcp` or `udp`"}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +/// | [`crate::attribute::TLS_PROTOCOL_VERSION`] | `Recommended` pub const KESTREL_TLS_HANDSHAKE_DURATION: &str = "kestrel.tls_handshake.duration"; + /// ## Description +/// /// Number of connections that are currently upgraded (WebSockets). . /// +/// ## Notes +/// /// The counter only tracks HTTP/1.1 connections. /// /// Meter name: `Microsoft.AspNetCore.Server.Kestrel`; Added in: ASP.NET Core 8.0 @@ -1454,16 +2537,20 @@ pub const KESTREL_TLS_HANDSHAKE_DURATION: &str = "kestrel.tls_handshake.duration /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Unspecified` -/// | [`crate::attribute::NETWORK_TYPE`] | `Recommended`: if the transport is `tcp` or `udp` -/// | [`crate::attribute::SERVER_ADDRESS`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +/// | [`crate::attribute::NETWORK_TYPE`] | `{"recommended": "if the transport is `tcp` or `udp`"}` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Recommended` +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` pub const KESTREL_UPGRADED_CONNECTIONS: &str = "kestrel.upgraded_connections"; + /// ## Description +/// /// Number of messages that were delivered to the application. /// +/// ## Notes +/// /// Records the number of messages pulled from the broker or number of messages dispatched to the application in push-based scenarios. -/// The metric SHOULD be reported once per message delivery. For example, if receiving and processing operations are both instrumented for a single message delivery, this counter is incremented when the message is received and not reported when it is processed. +/// The metric SHOULD be reported once per message delivery. For example, if receiving and processing operations are both instrumented for a single message delivery, this counter is incremented when the message is received and not reported when it is processed /// ## Metadata /// | | | /// |:-|:- @@ -1474,21 +2561,26 @@ pub const KESTREL_UPGRADED_CONNECTIONS: &str = "kestrel.upgraded_connections"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. +/// | [`crate::attribute::MESSAGING_CONSUMER_GROUP_NAME`] | `Conditionally_required`: if applicable. +/// | [`crate::attribute::MESSAGING_DESTINATION_NAME`] | `Conditionally_required`: if and only if `messaging.destination.name` is known to have low cardinality. Otherwise, `messaging.destination.template` MAY be populated. +/// | [`crate::attribute::MESSAGING_DESTINATION_PARTITION_ID`] | `Recommended` +/// | [`crate::attribute::MESSAGING_DESTINATION_SUBSCRIPTION_NAME`] | `Conditionally_required`: if applicable. +/// | [`crate::attribute::MESSAGING_DESTINATION_TEMPLATE`] | `Conditionally_required`: if available. /// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` /// | [`crate::attribute::MESSAGING_SYSTEM`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the messaging operation has failed. -/// | [`crate::attribute::MESSAGING_CONSUMER_GROUP_NAME`] | `Conditionally required`: if applicable. -/// | [`crate::attribute::MESSAGING_DESTINATION_NAME`] | `Conditionally required`: if and only if `messaging.destination.name` is known to have low cardinality. Otherwise, `messaging.destination.template` MAY be populated. -/// | [`crate::attribute::MESSAGING_DESTINATION_SUBSCRIPTION_NAME`] | `Conditionally required`: if applicable. -/// | [`crate::attribute::MESSAGING_DESTINATION_TEMPLATE`] | `Conditionally required`: if available. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally required`: If available. -/// | [`crate::attribute::MESSAGING_DESTINATION_PARTITION_ID`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_CLIENT_CONSUMED_MESSAGES: &str = "messaging.client.consumed.messages"; + /// ## Description +/// /// Duration of messaging operation initiated by a producer or consumer client. /// -/// This metric SHOULD NOT be used to report processing duration - processing duration is reported in `messaging.process.duration` metric. +/// ## Notes +/// +/// This metric SHOULD NOT be used to report processing duration - processing duration is reported in `messaging.process.duration` metric /// ## Metadata /// | | | /// |:-|:- @@ -1499,22 +2591,23 @@ pub const MESSAGING_CLIENT_CONSUMED_MESSAGES: &str = "messaging.client.consumed. /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. +/// | [`crate::attribute::MESSAGING_CONSUMER_GROUP_NAME`] | `Conditionally_required`: if applicable. +/// | [`crate::attribute::MESSAGING_DESTINATION_NAME`] | `Conditionally_required`: if and only if `messaging.destination.name` is known to have low cardinality. Otherwise, `messaging.destination.template` MAY be populated. +/// | [`crate::attribute::MESSAGING_DESTINATION_PARTITION_ID`] | `Recommended` +/// | [`crate::attribute::MESSAGING_DESTINATION_SUBSCRIPTION_NAME`] | `Conditionally_required`: if applicable. +/// | [`crate::attribute::MESSAGING_DESTINATION_TEMPLATE`] | `Conditionally_required`: if available. /// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` +/// | [`crate::attribute::MESSAGING_OPERATION_TYPE`] | `Conditionally_required`: If applicable. /// | [`crate::attribute::MESSAGING_SYSTEM`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the messaging operation has failed. -/// | [`crate::attribute::MESSAGING_CONSUMER_GROUP_NAME`] | `Conditionally required`: if applicable. -/// | [`crate::attribute::MESSAGING_DESTINATION_NAME`] | `Conditionally required`: if and only if `messaging.destination.name` is known to have low cardinality. Otherwise, `messaging.destination.template` MAY be populated. -/// | [`crate::attribute::MESSAGING_DESTINATION_SUBSCRIPTION_NAME`] | `Conditionally required`: if applicable. -/// | [`crate::attribute::MESSAGING_DESTINATION_TEMPLATE`] | `Conditionally required`: if available. -/// | [`crate::attribute::MESSAGING_OPERATION_TYPE`] | `Conditionally required`: If applicable. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally required`: If available. -/// | [`crate::attribute::MESSAGING_DESTINATION_PARTITION_ID`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_CLIENT_OPERATION_DURATION: &str = "messaging.client.operation.duration"; + /// ## Description -/// Number of messages producer attempted to publish to the broker. /// -/// This metric MUST NOT count messages that were created haven't yet been attempted to be published. +/// Deprecated. Use `messaging.client.sent.messages` instead /// ## Metadata /// | | | /// |:-|:- @@ -1525,19 +2618,53 @@ pub const MESSAGING_CLIENT_OPERATION_DURATION: &str = "messaging.client.operatio /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. +/// | [`crate::attribute::MESSAGING_DESTINATION_NAME`] | `Conditionally_required`: if and only if `messaging.destination.name` is known to have low cardinality. Otherwise, `messaging.destination.template` MAY be populated. +/// | [`crate::attribute::MESSAGING_DESTINATION_PARTITION_ID`] | `Recommended` +/// | [`crate::attribute::MESSAGING_DESTINATION_TEMPLATE`] | `Conditionally_required`: if available. /// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` /// | [`crate::attribute::MESSAGING_SYSTEM`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the messaging operation has failed. -/// | [`crate::attribute::MESSAGING_DESTINATION_NAME`] | `Conditionally required`: if and only if `messaging.destination.name` is known to have low cardinality. Otherwise, `messaging.destination.template` MAY be populated. -/// | [`crate::attribute::MESSAGING_DESTINATION_TEMPLATE`] | `Conditionally required`: if available. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally required`: If available. -/// | [`crate::attribute::MESSAGING_DESTINATION_PARTITION_ID`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.client.sent.messages`.")] pub const MESSAGING_CLIENT_PUBLISHED_MESSAGES: &str = "messaging.client.published.messages"; + +/// ## Description +/// +/// Number of messages producer attempted to send to the broker. +/// +/// ## Notes +/// +/// This metric MUST NOT count messages that were created but haven't yet been sent +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `{message}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. +/// | [`crate::attribute::MESSAGING_DESTINATION_NAME`] | `Conditionally_required`: if and only if `messaging.destination.name` is known to have low cardinality. Otherwise, `messaging.destination.template` MAY be populated. +/// | [`crate::attribute::MESSAGING_DESTINATION_PARTITION_ID`] | `Recommended` +/// | [`crate::attribute::MESSAGING_DESTINATION_TEMPLATE`] | `Conditionally_required`: if available. +/// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` +/// | [`crate::attribute::MESSAGING_SYSTEM`] | `Required` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const MESSAGING_CLIENT_SENT_MESSAGES: &str = "messaging.client.sent.messages"; + /// ## Description +/// /// Duration of processing operation. /// -/// This metric MUST be reported for operations with `messaging.operation.type` that matches `process`. +/// ## Notes +/// +/// This metric MUST be reported for operations with `messaging.operation.type` that matches `process` /// ## Metadata /// | | | /// |:-|:- @@ -1548,19 +2675,22 @@ pub const MESSAGING_CLIENT_PUBLISHED_MESSAGES: &str = "messaging.client.publishe /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. +/// | [`crate::attribute::MESSAGING_CONSUMER_GROUP_NAME`] | `Conditionally_required`: if applicable. +/// | [`crate::attribute::MESSAGING_DESTINATION_NAME`] | `Conditionally_required`: if and only if `messaging.destination.name` is known to have low cardinality. Otherwise, `messaging.destination.template` MAY be populated. +/// | [`crate::attribute::MESSAGING_DESTINATION_PARTITION_ID`] | `Recommended` +/// | [`crate::attribute::MESSAGING_DESTINATION_SUBSCRIPTION_NAME`] | `Conditionally_required`: if applicable. +/// | [`crate::attribute::MESSAGING_DESTINATION_TEMPLATE`] | `Conditionally_required`: if available. /// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` /// | [`crate::attribute::MESSAGING_SYSTEM`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the messaging operation has failed. -/// | [`crate::attribute::MESSAGING_CONSUMER_GROUP_NAME`] | `Conditionally required`: if applicable. -/// | [`crate::attribute::MESSAGING_DESTINATION_NAME`] | `Conditionally required`: if and only if `messaging.destination.name` is known to have low cardinality. Otherwise, `messaging.destination.template` MAY be populated. -/// | [`crate::attribute::MESSAGING_DESTINATION_SUBSCRIPTION_NAME`] | `Conditionally required`: if applicable. -/// | [`crate::attribute::MESSAGING_DESTINATION_TEMPLATE`] | `Conditionally required`: if available. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally required`: If available. -/// | [`crate::attribute::MESSAGING_DESTINATION_PARTITION_ID`] | `Unspecified` -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const MESSAGING_PROCESS_DURATION: &str = "messaging.process.duration"; + /// ## Description -/// Deprecated. Use `messaging.client.consumed.messages` instead. +/// +/// Deprecated. Use `messaging.client.consumed.messages` instead /// ## Metadata /// | | | /// |:-|:- @@ -1571,14 +2701,17 @@ pub const MESSAGING_PROCESS_DURATION: &str = "messaging.process.duration"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. /// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the messaging operation has failed. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally required`: If available. -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` -#[deprecated] +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.client.consumed.messages`.")] pub const MESSAGING_PROCESS_MESSAGES: &str = "messaging.process.messages"; + /// ## Description -/// Deprecated. Use `messaging.client.operation.duration` instead. +/// +/// Deprecated. Use `messaging.client.operation.duration` instead /// ## Metadata /// | | | /// |:-|:- @@ -1589,14 +2722,17 @@ pub const MESSAGING_PROCESS_MESSAGES: &str = "messaging.process.messages"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. /// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the messaging operation has failed. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally required`: If available. -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` -#[deprecated] +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.client.operation.duration`.")] pub const MESSAGING_PUBLISH_DURATION: &str = "messaging.publish.duration"; + /// ## Description -/// Deprecated. Use `messaging.client.produced.messages` instead. +/// +/// Deprecated. Use `messaging.client.produced.messages` instead /// ## Metadata /// | | | /// |:-|:- @@ -1607,14 +2743,17 @@ pub const MESSAGING_PUBLISH_DURATION: &str = "messaging.publish.duration"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. /// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the messaging operation has failed. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally required`: If available. -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` -#[deprecated] +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.client.produced.messages`.")] pub const MESSAGING_PUBLISH_MESSAGES: &str = "messaging.publish.messages"; + /// ## Description -/// Deprecated. Use `messaging.client.operation.duration` instead. +/// +/// Deprecated. Use `messaging.client.operation.duration` instead /// ## Metadata /// | | | /// |:-|:- @@ -1625,14 +2764,17 @@ pub const MESSAGING_PUBLISH_MESSAGES: &str = "messaging.publish.messages"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. /// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the messaging operation has failed. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally required`: If available. -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` -#[deprecated] +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.client.operation.duration`.")] pub const MESSAGING_RECEIVE_DURATION: &str = "messaging.receive.duration"; + /// ## Description -/// Deprecated. Use `messaging.client.consumed.messages` instead. +/// +/// Deprecated. Use `messaging.client.consumed.messages` instead /// ## Metadata /// | | | /// |:-|:- @@ -1643,15 +2785,20 @@ pub const MESSAGING_RECEIVE_DURATION: &str = "messaging.receive.duration"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | +/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally_required`: If and only if the messaging operation has failed. /// | [`crate::attribute::MESSAGING_OPERATION_NAME`] | `Required` -/// | [`crate::attribute::ERROR_TYPE`] | `Conditionally required`: If and only if the messaging operation has failed. -/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally required`: If available. -/// | [`crate::attribute::SERVER_PORT`] | `Unspecified` -#[deprecated] +/// | [`crate::attribute::SERVER_ADDRESS`] | `Conditionally_required`: If available. +/// | [`crate::attribute::SERVER_PORT`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +#[deprecated(note = "Replaced by `messaging.client.consumed.messages`.")] pub const MESSAGING_RECEIVE_MESSAGES: &str = "messaging.receive.messages"; + /// ## Description +/// /// Event loop maximum delay. /// +/// ## Notes +/// /// Value can be retrieved from value `histogram.max` of [`perf_hooks.monitorEventLoopDelay([options])`](https://nodejs.org/api/perf_hooks.html#perf_hooksmonitoreventloopdelayoptions) /// ## Metadata /// | | | @@ -1659,10 +2806,15 @@ pub const MESSAGING_RECEIVE_MESSAGES: &str = "messaging.receive.messages"; /// | Instrument: | `gauge` | /// | Unit: | `s` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_MAX: &str = "nodejs.eventloop.delay.max"; + /// ## Description +/// /// Event loop mean delay. /// +/// ## Notes +/// /// Value can be retrieved from value `histogram.mean` of [`perf_hooks.monitorEventLoopDelay([options])`](https://nodejs.org/api/perf_hooks.html#perf_hooksmonitoreventloopdelayoptions) /// ## Metadata /// | | | @@ -1670,10 +2822,15 @@ pub const NODEJS_EVENTLOOP_DELAY_MAX: &str = "nodejs.eventloop.delay.max"; /// | Instrument: | `gauge` | /// | Unit: | `s` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_MEAN: &str = "nodejs.eventloop.delay.mean"; + /// ## Description +/// /// Event loop minimum delay. /// +/// ## Notes +/// /// Value can be retrieved from value `histogram.min` of [`perf_hooks.monitorEventLoopDelay([options])`](https://nodejs.org/api/perf_hooks.html#perf_hooksmonitoreventloopdelayoptions) /// ## Metadata /// | | | @@ -1681,10 +2838,15 @@ pub const NODEJS_EVENTLOOP_DELAY_MEAN: &str = "nodejs.eventloop.delay.mean"; /// | Instrument: | `gauge` | /// | Unit: | `s` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_MIN: &str = "nodejs.eventloop.delay.min"; + /// ## Description +/// /// Event loop 50 percentile delay. /// +/// ## Notes +/// /// Value can be retrieved from value `histogram.percentile(50)` of [`perf_hooks.monitorEventLoopDelay([options])`](https://nodejs.org/api/perf_hooks.html#perf_hooksmonitoreventloopdelayoptions) /// ## Metadata /// | | | @@ -1692,10 +2854,15 @@ pub const NODEJS_EVENTLOOP_DELAY_MIN: &str = "nodejs.eventloop.delay.min"; /// | Instrument: | `gauge` | /// | Unit: | `s` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_P50: &str = "nodejs.eventloop.delay.p50"; + /// ## Description +/// /// Event loop 90 percentile delay. /// +/// ## Notes +/// /// Value can be retrieved from value `histogram.percentile(90)` of [`perf_hooks.monitorEventLoopDelay([options])`](https://nodejs.org/api/perf_hooks.html#perf_hooksmonitoreventloopdelayoptions) /// ## Metadata /// | | | @@ -1703,10 +2870,15 @@ pub const NODEJS_EVENTLOOP_DELAY_P50: &str = "nodejs.eventloop.delay.p50"; /// | Instrument: | `gauge` | /// | Unit: | `s` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_P90: &str = "nodejs.eventloop.delay.p90"; + /// ## Description +/// /// Event loop 99 percentile delay. /// +/// ## Notes +/// /// Value can be retrieved from value `histogram.percentile(99)` of [`perf_hooks.monitorEventLoopDelay([options])`](https://nodejs.org/api/perf_hooks.html#perf_hooksmonitoreventloopdelayoptions) /// ## Metadata /// | | | @@ -1714,10 +2886,15 @@ pub const NODEJS_EVENTLOOP_DELAY_P90: &str = "nodejs.eventloop.delay.p90"; /// | Instrument: | `gauge` | /// | Unit: | `s` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_P99: &str = "nodejs.eventloop.delay.p99"; + /// ## Description +/// /// Event loop standard deviation delay. /// +/// ## Notes +/// /// Value can be retrieved from value `histogram.stddev` of [`perf_hooks.monitorEventLoopDelay([options])`](https://nodejs.org/api/perf_hooks.html#perf_hooksmonitoreventloopdelayoptions) /// ## Metadata /// | | | @@ -1725,20 +2902,49 @@ pub const NODEJS_EVENTLOOP_DELAY_P99: &str = "nodejs.eventloop.delay.p99"; /// | Instrument: | `gauge` | /// | Unit: | `s` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_DELAY_STDDEV: &str = "nodejs.eventloop.delay.stddev"; + +/// ## Description +/// +/// Cumulative duration of time the event loop has been in each state. +/// +/// ## Notes +/// +/// Value can be retrieved from [`performance.eventLoopUtilization([utilization1[, utilization2]])`](https://nodejs.org/api/perf_hooks.html#performanceeventlooputilizationutilization1-utilization2) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `counter` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::NODEJS_EVENTLOOP_STATE`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const NODEJS_EVENTLOOP_TIME: &str = "nodejs.eventloop.time"; + /// ## Description +/// /// Event loop utilization. /// -/// The value range is \[0.0,1.0\] and can be retrieved from value [`performance.eventLoopUtilization([utilization1[, utilization2]])`](https://nodejs.org/api/perf_hooks.html#performanceeventlooputilizationutilization1-utilization2) +/// ## Notes +/// +/// The value range is \[0.0, 1.0\] and can be retrieved from [`performance.eventLoopUtilization([utilization1[, utilization2]])`](https://nodejs.org/api/perf_hooks.html#performanceeventlooputilizationutilization1-utilization2) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `gauge` | /// | Unit: | `1` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const NODEJS_EVENTLOOP_UTILIZATION: &str = "nodejs.eventloop.utilization"; + /// ## Description -/// Number of times the process has been context switched. +/// +/// Number of times the process has been context switched /// ## Metadata /// | | | /// |:-|:- @@ -1749,10 +2955,13 @@ pub const NODEJS_EVENTLOOP_UTILIZATION: &str = "nodejs.eventloop.utilization"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::PROCESS_CONTEXT_SWITCH_TYPE`] | `Unspecified` +/// | [`crate::attribute::PROCESS_CONTEXT_SWITCH_TYPE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_CONTEXT_SWITCHES: &str = "process.context_switches"; + /// ## Description -/// Total CPU seconds broken down by different states. +/// +/// Total CPU seconds broken down by different states /// ## Metadata /// | | | /// |:-|:- @@ -1763,10 +2972,13 @@ pub const PROCESS_CONTEXT_SWITCHES: &str = "process.context_switches"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::CPU_MODE`] | `Unspecified` +/// | [`crate::attribute::CPU_MODE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_CPU_TIME: &str = "process.cpu.time"; + /// ## Description -/// Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process. +/// +/// Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process /// ## Metadata /// | | | /// |:-|:- @@ -1777,10 +2989,13 @@ pub const PROCESS_CPU_TIME: &str = "process.cpu.time"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::CPU_MODE`] | `Unspecified` +/// | [`crate::attribute::CPU_MODE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_CPU_UTILIZATION: &str = "process.cpu.utilization"; + /// ## Description -/// Disk bytes transferred. +/// +/// Disk bytes transferred /// ## Metadata /// | | | /// |:-|:- @@ -1791,28 +3006,37 @@ pub const PROCESS_CPU_UTILIZATION: &str = "process.cpu.utilization"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Unspecified` +/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_DISK_IO: &str = "process.disk.io"; + /// ## Description -/// The amount of physical memory in use. +/// +/// The amount of physical memory in use /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const PROCESS_MEMORY_USAGE: &str = "process.memory.usage"; + /// ## Description -/// The amount of committed virtual memory. +/// +/// The amount of committed virtual memory /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const PROCESS_MEMORY_VIRTUAL: &str = "process.memory.virtual"; + /// ## Description -/// Network bytes transferred. +/// +/// Network bytes transferred /// ## Metadata /// | | | /// |:-|:- @@ -1823,19 +3047,25 @@ pub const PROCESS_MEMORY_VIRTUAL: &str = "process.memory.virtual"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Unspecified` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_NETWORK_IO: &str = "process.network.io"; + /// ## Description -/// Number of file descriptors in use by the process. +/// +/// Number of file descriptors in use by the process /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{count}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const PROCESS_OPEN_FILE_DESCRIPTOR_COUNT: &str = "process.open_file_descriptor.count"; + /// ## Description -/// Number of page faults the process has made. +/// +/// Number of page faults the process has made /// ## Metadata /// | | | /// |:-|:- @@ -1846,34 +3076,64 @@ pub const PROCESS_OPEN_FILE_DESCRIPTOR_COUNT: &str = "process.open_file_descript /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::PROCESS_PAGING_FAULT_TYPE`] | `Unspecified` +/// | [`crate::attribute::PROCESS_PAGING_FAULT_TYPE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const PROCESS_PAGING_FAULTS: &str = "process.paging.faults"; + /// ## Description -/// Process threads count. +/// +/// Process threads count /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{thread}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const PROCESS_THREAD_COUNT: &str = "process.thread.count"; + +/// ## Description +/// +/// The time the process has been running. +/// +/// ## Notes +/// +/// Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +/// The actual accuracy would depend on the instrumentation and operating system +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const PROCESS_UPTIME: &str = "process.uptime"; + /// ## Description +/// /// Measures the duration of outbound RPC. /// +/// ## Notes +/// /// While streaming RPCs may record this metric as start-of-batch -/// to end-of-batch, it's hard to interpret in practice. +/// to end-of-batch, it's hard to interpret in practice. /// -/// **Streaming**: N/A. +/// **Streaming**: N/A /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `ms` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_DURATION: &str = "rpc.client.duration"; + /// ## Description +/// /// Measures the size of RPC request messages (uncompressed). /// +/// ## Notes +/// /// **Streaming**: Recorded per message in a streaming batch /// ## Metadata /// | | | @@ -1881,10 +3141,15 @@ pub const RPC_CLIENT_DURATION: &str = "rpc.client.duration"; /// | Instrument: | `histogram` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_REQUEST_SIZE: &str = "rpc.client.request.size"; + /// ## Description +/// /// Measures the number of messages received per RPC. /// +/// ## Notes +/// /// Should be 1 for all non-streaming RPCs. /// /// **Streaming**: This metric is required for server and client streaming RPCs @@ -1894,10 +3159,15 @@ pub const RPC_CLIENT_REQUEST_SIZE: &str = "rpc.client.request.size"; /// | Instrument: | `histogram` | /// | Unit: | `{count}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_REQUESTS_PER_RPC: &str = "rpc.client.requests_per_rpc"; + /// ## Description +/// /// Measures the size of RPC response messages (uncompressed). /// +/// ## Notes +/// /// **Streaming**: Recorded per response in a streaming batch /// ## Metadata /// | | | @@ -1905,10 +3175,15 @@ pub const RPC_CLIENT_REQUESTS_PER_RPC: &str = "rpc.client.requests_per_rpc"; /// | Instrument: | `histogram` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_RESPONSE_SIZE: &str = "rpc.client.response.size"; + /// ## Description +/// /// Measures the number of messages sent per RPC. /// +/// ## Notes +/// /// Should be 1 for all non-streaming RPCs. /// /// **Streaming**: This metric is required for server and client streaming RPCs @@ -1918,24 +3193,34 @@ pub const RPC_CLIENT_RESPONSE_SIZE: &str = "rpc.client.response.size"; /// | Instrument: | `histogram` | /// | Unit: | `{count}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_CLIENT_RESPONSES_PER_RPC: &str = "rpc.client.responses_per_rpc"; + /// ## Description +/// /// Measures the duration of inbound RPC. /// +/// ## Notes +/// /// While streaming RPCs may record this metric as start-of-batch -/// to end-of-batch, it's hard to interpret in practice. +/// to end-of-batch, it's hard to interpret in practice. /// -/// **Streaming**: N/A. +/// **Streaming**: N/A /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `histogram` | /// | Unit: | `ms` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_DURATION: &str = "rpc.server.duration"; + /// ## Description +/// /// Measures the size of RPC request messages (uncompressed). /// +/// ## Notes +/// /// **Streaming**: Recorded per message in a streaming batch /// ## Metadata /// | | | @@ -1943,10 +3228,15 @@ pub const RPC_SERVER_DURATION: &str = "rpc.server.duration"; /// | Instrument: | `histogram` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_REQUEST_SIZE: &str = "rpc.server.request.size"; + /// ## Description +/// /// Measures the number of messages received per RPC. /// +/// ## Notes +/// /// Should be 1 for all non-streaming RPCs. /// /// **Streaming** : This metric is required for server and client streaming RPCs @@ -1956,10 +3246,15 @@ pub const RPC_SERVER_REQUEST_SIZE: &str = "rpc.server.request.size"; /// | Instrument: | `histogram` | /// | Unit: | `{count}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_REQUESTS_PER_RPC: &str = "rpc.server.requests_per_rpc"; + /// ## Description +/// /// Measures the size of RPC response messages (uncompressed). /// +/// ## Notes +/// /// **Streaming**: Recorded per response in a streaming batch /// ## Metadata /// | | | @@ -1967,10 +3262,15 @@ pub const RPC_SERVER_REQUESTS_PER_RPC: &str = "rpc.server.requests_per_rpc"; /// | Instrument: | `histogram` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_RESPONSE_SIZE: &str = "rpc.server.response.size"; + /// ## Description +/// /// Measures the number of messages sent per RPC. /// +/// ## Notes +/// /// Should be 1 for all non-streaming RPCs. /// /// **Streaming**: This metric is required for server and client streaming RPCs @@ -1980,10 +3280,15 @@ pub const RPC_SERVER_RESPONSE_SIZE: &str = "rpc.server.response.size"; /// | Instrument: | `histogram` | /// | Unit: | `{count}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const RPC_SERVER_RESPONSES_PER_RPC: &str = "rpc.server.responses_per_rpc"; + /// ## Description +/// /// Number of connections that are currently active on the server. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -1995,12 +3300,16 @@ pub const RPC_SERVER_RESPONSES_PER_RPC: &str = "rpc.server.responses_per_rpc"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SIGNALR_CONNECTION_STATUS`] | `Unspecified` -/// | [`crate::attribute::SIGNALR_TRANSPORT`] | `Unspecified` +/// | [`crate::attribute::SIGNALR_CONNECTION_STATUS`] | `Recommended` +/// | [`crate::attribute::SIGNALR_TRANSPORT`] | `Recommended` pub const SIGNALR_SERVER_ACTIVE_CONNECTIONS: &str = "signalr.server.active_connections"; + /// ## Description +/// /// The duration of connections on the server. /// +/// ## Notes +/// /// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core 8.0 /// ## Metadata /// | | | @@ -2012,11 +3321,13 @@ pub const SIGNALR_SERVER_ACTIVE_CONNECTIONS: &str = "signalr.server.active_conne /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SIGNALR_CONNECTION_STATUS`] | `Unspecified` -/// | [`crate::attribute::SIGNALR_TRANSPORT`] | `Unspecified` +/// | [`crate::attribute::SIGNALR_CONNECTION_STATUS`] | `Recommended` +/// | [`crate::attribute::SIGNALR_TRANSPORT`] | `Recommended` pub const SIGNALR_SERVER_CONNECTION_DURATION: &str = "signalr.server.connection.duration"; + /// ## Description -/// Reports the current frequency of the CPU in Hz. +/// +/// Reports the current frequency of the CPU in Hz /// ## Metadata /// | | | /// |:-|:- @@ -2027,28 +3338,37 @@ pub const SIGNALR_SERVER_CONNECTION_DURATION: &str = "signalr.server.connection. /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_CPU_LOGICAL_NUMBER`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_CPU_LOGICAL_NUMBER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_CPU_FREQUENCY: &str = "system.cpu.frequency"; + /// ## Description -/// Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking. +/// +/// Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{cpu}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_CPU_LOGICAL_COUNT: &str = "system.cpu.logical.count"; + /// ## Description -/// Reports the number of actual physical processor cores on the hardware. +/// +/// Reports the number of actual physical processor cores on the hardware /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `{cpu}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_CPU_PHYSICAL_COUNT: &str = "system.cpu.physical.count"; + /// ## Description -/// Seconds each logical CPU spent on each mode. +/// +/// Seconds each logical CPU spent on each mode /// ## Metadata /// | | | /// |:-|:- @@ -2059,11 +3379,14 @@ pub const SYSTEM_CPU_PHYSICAL_COUNT: &str = "system.cpu.physical.count"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::CPU_MODE`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_CPU_LOGICAL_NUMBER`] | `Unspecified` +/// | [`crate::attribute::CPU_MODE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_CPU_LOGICAL_NUMBER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_CPU_TIME: &str = "system.cpu.time"; + /// ## Description -/// Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs. +/// +/// Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs /// ## Metadata /// | | | /// |:-|:- @@ -2074,11 +3397,12 @@ pub const SYSTEM_CPU_TIME: &str = "system.cpu.time"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::CPU_MODE`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_CPU_LOGICAL_NUMBER`] | `Unspecified` +/// | [`crate::attribute::CPU_MODE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_CPU_LOGICAL_NUMBER`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_CPU_UTILIZATION: &str = "system.cpu.utilization"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2089,18 +3413,23 @@ pub const SYSTEM_CPU_UTILIZATION: &str = "system.cpu.utilization"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_DISK_IO: &str = "system.disk.io"; + /// ## Description -/// Time disk spent activated. /// -/// The real elapsed time ("wall clock") used in the I/O path (time from operations running in parallel are not counted). Measured as: +/// Time disk spent activated +/// +/// ## Notes +/// +/// The real elapsed time ("wall clock") used in the I/O path (time from operations running in parallel are not counted). Measured as: /// /// - Linux: Field 13 from [procfs-diskstats](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) /// - Windows: The complement of -/// ["Disk\% Idle Time"](https://learn.microsoft.com/archive/blogs/askcore/windows-performance-monitor-disk-counters-explained#windows-performance-monitor-disk-counters-explained) -/// performance counter: `uptime * (100 - "Disk\% Idle Time") / 100` +/// ["Disk% Idle Time"](https://learn.microsoft.com/archive/blogs/askcore/windows-performance-monitor-disk-counters-explained#windows-performance-monitor-disk-counters-explained) +/// performance counter: `uptime * (100 - "Disk\% Idle Time") / 100` /// ## Metadata /// | | | /// |:-|:- @@ -2111,10 +3440,28 @@ pub const SYSTEM_DISK_IO: &str = "system.disk.io"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_DISK_IO_TIME: &str = "system.disk.io_time"; + +/// ## Description +/// +/// The total storage capacity of the disk +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const SYSTEM_DISK_LIMIT: &str = "system.disk.limit"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2125,16 +3472,21 @@ pub const SYSTEM_DISK_IO_TIME: &str = "system.disk.io_time"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_DISK_MERGED: &str = "system.disk.merged"; + /// ## Description -/// Sum of the time each operation took to complete. +/// +/// Sum of the time each operation took to complete +/// +/// ## Notes /// /// Because it is the sum of time each request took, parallel-issued requests each contribute to make the count grow. Measured as: /// -/// - Linux: Fields 7 & 11 from [procfs-diskstats](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) -/// - Windows: "Avg. Disk sec/Read" perf counter multiplied by "Disk Reads/sec" perf counter (similar for Writes) +/// - Linux: Fields 7 & 11 from [procfs-diskstats](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) +/// - Windows: "Avg. Disk sec/Read" perf counter multiplied by "Disk Reads/sec" perf counter (similar for Writes) /// ## Metadata /// | | | /// |:-|:- @@ -2145,11 +3497,12 @@ pub const SYSTEM_DISK_MERGED: &str = "system.disk.merged"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_DISK_OPERATION_TIME: &str = "system.disk.operation_time"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2160,11 +3513,39 @@ pub const SYSTEM_DISK_OPERATION_TIME: &str = "system.disk.operation_time"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::DISK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_DISK_OPERATIONS: &str = "system.disk.operations"; + +/// ## Description +/// +/// The total storage capacity of the filesystem +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `By` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_MODE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_MOUNTPOINT`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_TYPE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] +pub const SYSTEM_FILESYSTEM_LIMIT: &str = "system.filesystem.limit"; + /// ## Description -/// . +/// +/// Reports a filesystem's space usage across different states. +/// +/// ## Notes +/// +/// The sum of all `system.filesystem.usage` values over the different `system.filesystem.state` attributes +/// SHOULD equal the total storage capacity of the filesystem, that is `system.filesystem.limit` /// ## Metadata /// | | | /// |:-|:- @@ -2175,14 +3556,15 @@ pub const SYSTEM_DISK_OPERATIONS: &str = "system.disk.operations"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_FILESYSTEM_MODE`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_FILESYSTEM_MOUNTPOINT`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_FILESYSTEM_STATE`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_FILESYSTEM_TYPE`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_MODE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_MOUNTPOINT`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_STATE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_TYPE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_FILESYSTEM_USAGE: &str = "system.filesystem.usage"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2193,33 +3575,43 @@ pub const SYSTEM_FILESYSTEM_USAGE: &str = "system.filesystem.usage"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_FILESYSTEM_MODE`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_FILESYSTEM_MOUNTPOINT`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_FILESYSTEM_STATE`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_FILESYSTEM_TYPE`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_MODE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_MOUNTPOINT`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_STATE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_FILESYSTEM_TYPE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_FILESYSTEM_UTILIZATION: &str = "system.filesystem.utilization"; + /// ## Description -/// An estimate of how much memory is available for starting new applications, without causing swapping. +/// +/// An estimate of how much memory is available for starting new applications, without causing swapping +/// +/// ## Notes /// /// This is an alternative to `system.memory.usage` metric with `state=free`. -/// Linux starting from 3.14 exports "available" memory. It takes "free" memory as a baseline, and then factors in kernel-specific values. -/// This is supposed to be more accurate than just "free" memory. +/// Linux starting from 3.14 exports "available" memory. It takes "free" memory as a baseline, and then factors in kernel-specific values. +/// This is supposed to be more accurate than just "free" memory. /// For reference, see the calculations [here](https://superuser.com/a/980821). -/// See also `MemAvailable` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html). +/// See also `MemAvailable` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html) /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_LINUX_MEMORY_AVAILABLE: &str = "system.linux.memory.available"; + /// ## Description +/// /// Reports the memory used by the Linux kernel for managing caches of frequently used objects. /// +/// ## Notes +/// /// The sum over the `reclaimable` and `unreclaimable` state values in `linux.memory.slab.usage` SHOULD be equal to the total slab memory available on the system. /// Note that the total slab memory is not constant and may vary over time. -/// See also the [Slab allocator](https://blogs.oracle.com/linux/post/understanding-linux-kernel-memory-statistics) and `Slab` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html). +/// See also the [Slab allocator](https://blogs.oracle.com/linux/post/understanding-linux-kernel-memory-statistics) and `Slab` in [/proc/meminfo](https://man7.org/linux/man-pages/man5/proc.5.html) /// ## Metadata /// | | | /// |:-|:- @@ -2230,36 +3622,51 @@ pub const SYSTEM_LINUX_MEMORY_AVAILABLE: &str = "system.linux.memory.available"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::LINUX_MEMORY_SLAB_STATE`] | `Unspecified` +/// | [`crate::attribute::LINUX_MEMORY_SLAB_STATE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_LINUX_MEMORY_SLAB_USAGE: &str = "system.linux.memory.slab.usage"; + /// ## Description +/// /// Total memory available in the system. /// -/// Its value SHOULD equal the sum of `system.memory.state` over all states. +/// ## Notes +/// +/// Its value SHOULD equal the sum of `system.memory.state` over all states /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_MEMORY_LIMIT: &str = "system.memory.limit"; + /// ## Description +/// /// Shared memory used (mostly by tmpfs). /// +/// ## Notes +/// /// Equivalent of `shared` from [`free` command](https://man7.org/linux/man-pages/man1/free.1.html) or -/// `Shmem` from [`/proc/meminfo`](https://man7.org/linux/man-pages/man5/proc.5.html)" +/// `Shmem` from [`/proc/meminfo`](https://man7.org/linux/man-pages/man5/proc.5.html)" /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `updowncounter` | /// | Unit: | `By` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_MEMORY_SHARED: &str = "system.memory.shared"; + /// ## Description +/// /// Reports memory in use by state. /// +/// ## Notes +/// /// The sum over all `system.memory.state` values SHOULD equal the total memory -/// available on the system, that is `system.memory.limit`. +/// available on the system, that is `system.memory.limit` /// ## Metadata /// | | | /// |:-|:- @@ -2270,10 +3677,11 @@ pub const SYSTEM_MEMORY_SHARED: &str = "system.memory.shared"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_MEMORY_STATE`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_MEMORY_STATE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_MEMORY_USAGE: &str = "system.memory.usage"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2284,10 +3692,11 @@ pub const SYSTEM_MEMORY_USAGE: &str = "system.memory.usage"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_MEMORY_STATE`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_MEMORY_STATE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_MEMORY_UTILIZATION: &str = "system.memory.utilization"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2298,12 +3707,17 @@ pub const SYSTEM_MEMORY_UTILIZATION: &str = "system.memory.utilization"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_NETWORK_STATE`] | `Unspecified` +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_TRANSPORT`] | `Recommended` +/// | [`crate::attribute::SYSTEM_NETWORK_STATE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_NETWORK_CONNECTIONS: &str = "system.network.connections"; + /// ## Description -/// Count of packets that are dropped or discarded even though there was no error. +/// +/// Count of packets that are dropped or discarded even though there was no error +/// +/// ## Notes /// /// Measured as: /// @@ -2320,17 +3734,22 @@ pub const SYSTEM_NETWORK_CONNECTIONS: &str = "system.network.connections"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_NETWORK_DROPPED: &str = "system.network.dropped"; + /// ## Description -/// Count of network errors detected. +/// +/// Count of network errors detected +/// +/// ## Notes /// /// Measured as: /// /// - Linux: the `errs` column in `/proc/dev/net` ([source](https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html)). /// - Windows: [`InErrors`/`OutErrors`](https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2) -/// from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2). +/// from [`GetIfEntry2`](https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2) /// ## Metadata /// | | | /// |:-|:- @@ -2341,11 +3760,12 @@ pub const SYSTEM_NETWORK_DROPPED: &str = "system.network.dropped"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_NETWORK_ERRORS: &str = "system.network.errors"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2356,11 +3776,12 @@ pub const SYSTEM_NETWORK_ERRORS: &str = "system.network.errors"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::NETWORK_INTERFACE_NAME`] | `Recommended` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_NETWORK_IO: &str = "system.network.io"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2371,11 +3792,12 @@ pub const SYSTEM_NETWORK_IO: &str = "system.network.io"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_DEVICE`] | `Unspecified` +/// | [`crate::attribute::NETWORK_IO_DIRECTION`] | `Recommended` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_NETWORK_PACKETS: &str = "system.network.packets"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2386,10 +3808,11 @@ pub const SYSTEM_NETWORK_PACKETS: &str = "system.network.packets"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_PAGING_TYPE`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_PAGING_TYPE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PAGING_FAULTS: &str = "system.paging.faults"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2400,11 +3823,14 @@ pub const SYSTEM_PAGING_FAULTS: &str = "system.paging.faults"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_PAGING_DIRECTION`] | `Unspecified` -/// | [`crate::attribute::SYSTEM_PAGING_TYPE`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_PAGING_DIRECTION`] | `Recommended` +/// | [`crate::attribute::SYSTEM_PAGING_TYPE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PAGING_OPERATIONS: &str = "system.paging.operations"; + /// ## Description -/// Unix swap or windows pagefile usage. +/// +/// Unix swap or windows pagefile usage /// ## Metadata /// | | | /// |:-|:- @@ -2415,10 +3841,12 @@ pub const SYSTEM_PAGING_OPERATIONS: &str = "system.paging.operations"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_PAGING_STATE`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_PAGING_STATE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PAGING_USAGE: &str = "system.paging.usage"; + /// ## Description -/// . /// ## Metadata /// | | | /// |:-|:- @@ -2429,10 +3857,14 @@ pub const SYSTEM_PAGING_USAGE: &str = "system.paging.usage"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_PAGING_STATE`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_DEVICE`] | `Recommended` +/// | [`crate::attribute::SYSTEM_PAGING_STATE`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PAGING_UTILIZATION: &str = "system.paging.utilization"; + /// ## Description -/// Total number of processes in each state. +/// +/// Total number of processes in each state /// ## Metadata /// | | | /// |:-|:- @@ -2443,21 +3875,46 @@ pub const SYSTEM_PAGING_UTILIZATION: &str = "system.paging.utilization"; /// ## Attributes /// | Name | Requirement | /// |:-|:- | -/// | [`crate::attribute::SYSTEM_PROCESS_STATUS`] | `Unspecified` +/// | [`crate::attribute::SYSTEM_PROCESS_STATUS`] | `Recommended` +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PROCESS_COUNT: &str = "system.process.count"; + /// ## Description -/// Total number of processes created over uptime of the host. +/// +/// Total number of processes created over uptime of the host /// ## Metadata /// | | | /// |:-|:- /// | Instrument: | `counter` | /// | Unit: | `{process}` | /// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] pub const SYSTEM_PROCESS_CREATED: &str = "system.process.created"; + +/// ## Description +/// +/// The time the system has been running +/// +/// ## Notes +/// +/// Instrumentations SHOULD use a gauge with type `double` and measure uptime in seconds as a floating point number with the highest precision available. +/// The actual accuracy would depend on the instrumentation and operating system +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const SYSTEM_UPTIME: &str = "system.uptime"; + /// ## Description +/// /// Garbage collection duration. /// -/// The values can be retrieve from [`perf_hooks.PerformanceObserver(...).observe({ entryTypes: ['gc'] })`](https://nodejs.org/api/perf_hooks.html#performanceobserverobserveoptions) +/// ## Notes +/// +/// The values can be retrieve from [`perf_hooks.PerformanceObserver(...).observe({ entryTypes: ['gc'] })`](https://nodejs.org/api/perf_hooks.html#performanceobserverobserveoptions) /// ## Metadata /// | | | /// |:-|:- @@ -2469,10 +3926,15 @@ pub const SYSTEM_PROCESS_CREATED: &str = "system.process.created"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::V8JS_GC_TYPE`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const V8JS_GC_DURATION: &str = "v8js.gc.duration"; + /// ## Description +/// /// Heap space available size. /// +/// ## Notes +/// /// Value can be retrieved from value `space_available_size` of [`v8.getHeapSpaceStatistics()`](https://nodejs.org/api/v8.html#v8getheapspacestatistics) /// ## Metadata /// | | | @@ -2485,10 +3947,15 @@ pub const V8JS_GC_DURATION: &str = "v8js.gc.duration"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::V8JS_HEAP_SPACE_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const V8JS_HEAP_SPACE_AVAILABLE_SIZE: &str = "v8js.heap.space.available_size"; + /// ## Description +/// /// Committed size of a heap space. /// +/// ## Notes +/// /// Value can be retrieved from value `physical_space_size` of [`v8.getHeapSpaceStatistics()`](https://nodejs.org/api/v8.html#v8getheapspacestatistics) /// ## Metadata /// | | | @@ -2501,10 +3968,15 @@ pub const V8JS_HEAP_SPACE_AVAILABLE_SIZE: &str = "v8js.heap.space.available_size /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::V8JS_HEAP_SPACE_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const V8JS_HEAP_SPACE_PHYSICAL_SIZE: &str = "v8js.heap.space.physical_size"; + /// ## Description +/// /// Total heap memory size pre-allocated. /// +/// ## Notes +/// /// The value can be retrieved from value `space_size` of [`v8.getHeapSpaceStatistics()`](https://nodejs.org/api/v8.html#v8getheapspacestatistics) /// ## Metadata /// | | | @@ -2517,10 +3989,15 @@ pub const V8JS_HEAP_SPACE_PHYSICAL_SIZE: &str = "v8js.heap.space.physical_size"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::V8JS_HEAP_SPACE_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const V8JS_MEMORY_HEAP_LIMIT: &str = "v8js.memory.heap.limit"; + /// ## Description +/// /// Heap Memory size allocated. /// +/// ## Notes +/// /// The value can be retrieved from value `space_used_size` of [`v8.getHeapSpaceStatistics()`](https://nodejs.org/api/v8.html#v8getheapspacestatistics) /// ## Metadata /// | | | @@ -2533,4 +4010,183 @@ pub const V8JS_MEMORY_HEAP_LIMIT: &str = "v8js.memory.heap.limit"; /// | Name | Requirement | /// |:-|:- | /// | [`crate::attribute::V8JS_HEAP_SPACE_NAME`] | `Required` +#[cfg(feature = "semconv_experimental")] pub const V8JS_MEMORY_HEAP_USED: &str = "v8js.memory.heap.used"; + +/// ## Description +/// +/// The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged) +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{change}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::VCS_CHANGE_STATE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const VCS_CHANGE_COUNT: &str = "vcs.change.count"; + +/// ## Description +/// +/// The time duration a change (pull request/merge request/changelist) has been in a given state +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::VCS_CHANGE_STATE`] | `Required` +/// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const VCS_CHANGE_DURATION: &str = "vcs.change.duration"; + +/// ## Description +/// +/// The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const VCS_CHANGE_TIME_TO_APPROVAL: &str = "vcs.change.time_to_approval"; + +/// ## Description +/// +/// The number of unique contributors to a repository +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `{contributor}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const VCS_CONTRIBUTOR_COUNT: &str = "vcs.contributor.count"; + +/// ## Description +/// +/// The number of refs of type branch or tag in a repository +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{ref}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::VCS_REF_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_COUNT: &str = "vcs.ref.count"; + +/// ## Description +/// +/// The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute +/// +/// ## Notes +/// +/// This metric should be reported for each `vcs.line_change.type` value. For example if a ref added 3 lines and removed 2 lines, +/// instrumentation SHOULD report two measurements: 3 and 2 (both positive numbers). +/// If number of lines added/removed should be calculated from the start of time, then `vcs.ref.base.name` SHOULD be set to an empty string +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `{line}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::VCS_CHANGE_ID`] | `Conditionally_required`: if a change is associate with the ref. +/// | [`crate::attribute::VCS_LINE_CHANGE_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REF_BASE_NAME`] | `Required` +/// | [`crate::attribute::VCS_REF_BASE_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` +/// | [`crate::attribute::VCS_REF_HEAD_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_LINES_DELTA: &str = "vcs.ref.lines_delta"; + +/// ## Description +/// +/// The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute +/// +/// ## Notes +/// +/// This metric should be reported for each `vcs.revision_delta.direction` value. For example if branch `a` is 3 commits behind and 2 commits ahead of `trunk`, +/// instrumentation SHOULD report two measurements: 3 and 2 (both positive numbers) and `vcs.ref.base.name` is set to `trunk` +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `{revision}` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::VCS_CHANGE_ID`] | `Conditionally_required`: if a change is associate with the ref. +/// | [`crate::attribute::VCS_REF_BASE_NAME`] | `Required` +/// | [`crate::attribute::VCS_REF_BASE_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` +/// | [`crate::attribute::VCS_REF_HEAD_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` +/// | [`crate::attribute::VCS_REVISION_DELTA_DIRECTION`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_REVISIONS_DELTA: &str = "vcs.ref.revisions_delta"; + +/// ## Description +/// +/// Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch` +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `gauge` | +/// | Unit: | `s` | +/// | Status: | `Experimental` | +/// +/// ## Attributes +/// | Name | Requirement | +/// |:-|:- | +/// | [`crate::attribute::VCS_REF_HEAD_NAME`] | `Required` +/// | [`crate::attribute::VCS_REF_HEAD_TYPE`] | `Required` +/// | [`crate::attribute::VCS_REPOSITORY_URL_FULL`] | `Required` +#[cfg(feature = "semconv_experimental")] +pub const VCS_REF_TIME: &str = "vcs.ref.time"; + +/// ## Description +/// +/// The number of repositories in an organization +/// ## Metadata +/// | | | +/// |:-|:- +/// | Instrument: | `updowncounter` | +/// | Unit: | `{repository}` | +/// | Status: | `Experimental` | +#[cfg(feature = "semconv_experimental")] +pub const VCS_REPOSITORY_COUNT: &str = "vcs.repository.count"; diff --git a/opentelemetry-semantic-conventions/src/resource.rs b/opentelemetry-semantic-conventions/src/resource.rs index 32da1299f5..170f5b10ea 100644 --- a/opentelemetry-semantic-conventions/src/resource.rs +++ b/opentelemetry-semantic-conventions/src/resource.rs @@ -1,7 +1,7 @@ // DO NOT EDIT, this is an auto-generated file // // If you want to update the file: -// - Edit the template at scripts/templates/semantic_attributes.rs.j2 +// - Edit the template at scripts/templates/registry/rust/resource.rs.j2 // - Run the script at scripts/generate-consts-from-spec.sh //! # Resource Semantic Conventions @@ -19,127 +19,394 @@ //! use opentelemetry_semantic_conventions as semconv; //! //! let _tracer = TracerProvider::builder() -//! .with_config(config().with_resource(Resource::new(vec![ -//! KeyValue::new(semconv::resource::SERVICE_NAME, "my-service"), -//! KeyValue::new(semconv::resource::SERVICE_NAMESPACE, "my-namespace"), -//! ]))) +//! .with_config(config().with_resource(Resource::builder_empty().with_service_name("my-service").build())) //! .build(); //! ``` + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::ANDROID_OS_API_LEVEL; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_ECS_CLUSTER_ARN; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_ECS_CONTAINER_ARN; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_ECS_LAUNCHTYPE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_ECS_TASK_ARN; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_ECS_TASK_FAMILY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_ECS_TASK_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_ECS_TASK_REVISION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_EKS_CLUSTER_ARN; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_LOG_GROUP_ARNS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_LOG_GROUP_NAMES; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_LOG_STREAM_ARNS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_LOG_STREAM_NAMES; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::BROWSER_BRANDS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::BROWSER_LANGUAGE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::BROWSER_MOBILE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::BROWSER_PLATFORM; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CLOUD_ACCOUNT_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CLOUD_AVAILABILITY_ZONE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CLOUD_PLATFORM; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CLOUD_PROVIDER; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CLOUD_REGION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CLOUD_RESOURCE_ID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_APP_ID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_APP_NAME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_ORG_ID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_ORG_NAME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_PROCESS_ID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_PROCESS_TYPE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_SPACE_ID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_SPACE_NAME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_SYSTEM_ID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CLOUDFOUNDRY_SYSTEM_INSTANCE_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_COMMAND; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_COMMAND_ARGS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_COMMAND_LINE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_IMAGE_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_IMAGE_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_IMAGE_REPO_DIGESTS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_IMAGE_TAGS; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::CONTAINER_LABEL; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CONTAINER_RUNTIME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DEPLOYMENT_ENVIRONMENT_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DEVICE_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DEVICE_MANUFACTURER; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DEVICE_MODEL_IDENTIFIER; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DEVICE_MODEL_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_INSTANCE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_MAX_MEMORY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_VERSION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GCP_CLOUD_RUN_JOB_EXECUTION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GCP_CLOUD_RUN_JOB_TASK_INDEX; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GCP_GCE_INSTANCE_HOSTNAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GCP_GCE_INSTANCE_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HEROKU_APP_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HEROKU_RELEASE_COMMIT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HEROKU_RELEASE_CREATION_TIMESTAMP; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_ARCH; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_CPU_CACHE_L2_SIZE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_CPU_FAMILY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_CPU_MODEL_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_CPU_MODEL_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_CPU_STEPPING; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_CPU_VENDOR_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_IMAGE_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_IMAGE_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_IMAGE_VERSION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_IP; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_MAC; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::HOST_TYPE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_CLUSTER_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_CLUSTER_UID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_CONTAINER_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_CONTAINER_RESTART_COUNT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_CONTAINER_STATUS_LAST_TERMINATED_REASON; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_CRONJOB_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_CRONJOB_UID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_DAEMONSET_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_DAEMONSET_UID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_DEPLOYMENT_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_DEPLOYMENT_UID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_JOB_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_JOB_UID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_NAMESPACE_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_NODE_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_NODE_UID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::K8S_POD_ANNOTATION; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::K8S_POD_LABEL; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_POD_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_POD_UID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_REPLICASET_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_REPLICASET_UID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_STATEFULSET_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::K8S_STATEFULSET_UID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::OCI_MANIFEST_DIGEST; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::OS_BUILD_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::OS_DESCRIPTION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::OS_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::OS_TYPE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::OS_VERSION; + pub use crate::attribute::OTEL_SCOPE_NAME; + pub use crate::attribute::OTEL_SCOPE_VERSION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_COMMAND; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_COMMAND_ARGS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_COMMAND_LINE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_EXECUTABLE_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_EXECUTABLE_PATH; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::PROCESS_LINUX_CGROUP; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_OWNER; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_PARENT_PID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_PID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_RUNTIME_DESCRIPTION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_RUNTIME_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::PROCESS_RUNTIME_VERSION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::SERVICE_INSTANCE_ID; + pub use crate::attribute::SERVICE_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::SERVICE_NAMESPACE; + pub use crate::attribute::SERVICE_VERSION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::TELEMETRY_DISTRO_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::TELEMETRY_DISTRO_VERSION; + pub use crate::attribute::TELEMETRY_SDK_LANGUAGE; + pub use crate::attribute::TELEMETRY_SDK_NAME; + pub use crate::attribute::TELEMETRY_SDK_VERSION; + pub use crate::attribute::USER_AGENT_ORIGINAL; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::WEBENGINE_DESCRIPTION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::WEBENGINE_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::WEBENGINE_VERSION; diff --git a/opentelemetry-semantic-conventions/src/trace.rs b/opentelemetry-semantic-conventions/src/trace.rs index 0cf5e2f648..a37ce1dead 100644 --- a/opentelemetry-semantic-conventions/src/trace.rs +++ b/opentelemetry-semantic-conventions/src/trace.rs @@ -1,7 +1,7 @@ // DO NOT EDIT, this is an auto-generated file // // If you want to update the file: -// - Edit the template at scripts/templates/semantic_attributes.rs.j2 +// - Edit the template at scripts/templates/registry/rust/attributes.rs.j2 // - Run the script at scripts/generate-consts-from-spec.sh //! # Trace Semantic Conventions @@ -27,163 +27,499 @@ //! ]) //! .start(&tracer); //! ``` -pub use crate::attribute::AWS_DYNAMODB_ATTRIBUTES_TO_GET; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_ATTRIBUTE_DEFINITIONS; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::AWS_DYNAMODB_ATTRIBUTES_TO_GET; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_CONSISTENT_READ; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_CONSUMED_CAPACITY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_COUNT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_EXCLUSIVE_START_TABLE; -pub use crate::attribute::AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_GLOBAL_SECONDARY_INDEX_UPDATES; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::AWS_DYNAMODB_GLOBAL_SECONDARY_INDEXES; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_INDEX_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_ITEM_COLLECTION_METRICS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_LIMIT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_LOCAL_SECONDARY_INDEXES; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_PROJECTION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_PROVISIONED_READ_CAPACITY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_PROVISIONED_WRITE_CAPACITY; -pub use crate::attribute::AWS_DYNAMODB_SCANNED_COUNT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_SCAN_FORWARD; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::AWS_DYNAMODB_SCANNED_COUNT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_SEGMENT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_SELECT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_TABLE_COUNT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_TABLE_NAMES; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_DYNAMODB_TOTAL_SEGMENTS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_LAMBDA_INVOKED_ARN; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_REQUEST_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_S3_BUCKET; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_S3_COPY_SOURCE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_S3_DELETE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_S3_KEY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_S3_PART_NUMBER; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AWS_S3_UPLOAD_ID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::AZ_NAMESPACE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::AZ_SERVICE_REQUEST_ID; + pub use crate::attribute::CLIENT_ADDRESS; + pub use crate::attribute::CLIENT_PORT; -pub use crate::attribute::CLOUDEVENTS_EVENT_ID; -pub use crate::attribute::CLOUDEVENTS_EVENT_SOURCE; -pub use crate::attribute::CLOUDEVENTS_EVENT_SPEC_VERSION; -pub use crate::attribute::CLOUDEVENTS_EVENT_SUBJECT; -pub use crate::attribute::CLOUDEVENTS_EVENT_TYPE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::CLOUD_RESOURCE_ID; -pub use crate::attribute::CODE_COLUMN; -pub use crate::attribute::CODE_FILEPATH; -pub use crate::attribute::CODE_FUNCTION; -pub use crate::attribute::CODE_LINENO; -pub use crate::attribute::CODE_NAMESPACE; -pub use crate::attribute::CODE_STACKTRACE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_CASSANDRA_CONSISTENCY_LEVEL; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_CASSANDRA_COORDINATOR_DC; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_CASSANDRA_COORDINATOR_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_CASSANDRA_IDEMPOTENCE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_CASSANDRA_PAGE_SIZE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_CASSANDRA_SPECULATIVE_EXECUTION_COUNT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_COLLECTION_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_COSMOSDB_CLIENT_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_COSMOSDB_CONNECTION_MODE; -pub use crate::attribute::DB_COSMOSDB_OPERATION_TYPE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DB_COSMOSDB_CONSISTENCY_LEVEL; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DB_COSMOSDB_REGIONS_CONTACTED; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_COSMOSDB_REQUEST_CHARGE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_COSMOSDB_REQUEST_CONTENT_LENGTH; -pub use crate::attribute::DB_COSMOSDB_STATUS_CODE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_COSMOSDB_SUB_STATUS_CODE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_ELASTICSEARCH_NODE_NAME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DB_ELASTICSEARCH_PATH_PARTS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_NAMESPACE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DB_OPERATION_BATCH_SIZE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_OPERATION_NAME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DB_OPERATION_PARAMETER; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DB_QUERY_SUMMARY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_QUERY_TEXT; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DB_RESPONSE_RETURNED_ROWS; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::DB_RESPONSE_STATUS_CODE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::DB_SYSTEM; + pub use crate::attribute::ERROR_TYPE; -pub use crate::attribute::EVENT_NAME; + pub use crate::attribute::EXCEPTION_ESCAPED; + pub use crate::attribute::EXCEPTION_MESSAGE; + pub use crate::attribute::EXCEPTION_STACKTRACE; + pub use crate::attribute::EXCEPTION_TYPE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_COLDSTART; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_CRON; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_DOCUMENT_COLLECTION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_DOCUMENT_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_DOCUMENT_OPERATION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_DOCUMENT_TIME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_INVOCATION_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_INVOKED_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_INVOKED_PROVIDER; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_INVOKED_REGION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_TIME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FAAS_TRIGGER; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::FEATURE_FLAG_CONTEXT_ID; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::FEATURE_FLAG_EVALUATION_ERROR_MESSAGE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::FEATURE_FLAG_EVALUATION_REASON; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FEATURE_FLAG_KEY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FEATURE_FLAG_PROVIDER_NAME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::FEATURE_FLAG_SET_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::FEATURE_FLAG_VARIANT; -pub use crate::attribute::GEN_AI_COMPLETION; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::FEATURE_FLAG_VERSION; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::GEN_AI_OPENAI_REQUEST_SEED; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::GEN_AI_OPENAI_REQUEST_SERVICE_TIER; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::GEN_AI_OPENAI_RESPONSE_SERVICE_TIER; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_OPERATION_NAME; -pub use crate::attribute::GEN_AI_PROMPT; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::GEN_AI_REQUEST_ENCODING_FORMATS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_FREQUENCY_PENALTY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_MAX_TOKENS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_MODEL; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_PRESENCE_PENALTY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_STOP_SEQUENCES; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_TEMPERATURE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_TOP_K; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_REQUEST_TOP_P; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_RESPONSE_FINISH_REASONS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_RESPONSE_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_RESPONSE_MODEL; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_SYSTEM; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_USAGE_INPUT_TOKENS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GEN_AI_USAGE_OUTPUT_TOKENS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GRAPHQL_DOCUMENT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GRAPHQL_OPERATION_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::GRAPHQL_OPERATION_TYPE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::HTTP_REQUEST_BODY_SIZE; + +pub use crate::attribute::HTTP_REQUEST_HEADER; + pub use crate::attribute::HTTP_REQUEST_METHOD; + pub use crate::attribute::HTTP_REQUEST_METHOD_ORIGINAL; + pub use crate::attribute::HTTP_REQUEST_RESEND_COUNT; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::HTTP_REQUEST_SIZE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::HTTP_RESPONSE_BODY_SIZE; + +pub use crate::attribute::HTTP_RESPONSE_HEADER; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::HTTP_RESPONSE_SIZE; + pub use crate::attribute::HTTP_RESPONSE_STATUS_CODE; + pub use crate::attribute::HTTP_ROUTE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_BATCH_MESSAGE_COUNT; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_CLIENT_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_CONSUMER_GROUP_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_DESTINATION_ANONYMOUS; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_DESTINATION_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_DESTINATION_PARTITION_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_DESTINATION_SUBSCRIPTION_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_DESTINATION_TEMPLATE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_DESTINATION_TEMPORARY; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_MESSAGE_BODY_SIZE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_MESSAGE_CONVERSATION_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_MESSAGE_ENVELOPE_SIZE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_MESSAGE_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_OPERATION_NAME; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_OPERATION_TYPE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::MESSAGING_SYSTEM; + pub use crate::attribute::NETWORK_LOCAL_ADDRESS; + pub use crate::attribute::NETWORK_LOCAL_PORT; + pub use crate::attribute::NETWORK_PEER_ADDRESS; + pub use crate::attribute::NETWORK_PEER_PORT; + pub use crate::attribute::NETWORK_PROTOCOL_NAME; + pub use crate::attribute::NETWORK_PROTOCOL_VERSION; + pub use crate::attribute::NETWORK_TRANSPORT; + pub use crate::attribute::NETWORK_TYPE; -pub use crate::attribute::OPENTRACING_REF_TYPE; -pub use crate::attribute::OTEL_STATUS_CODE; -pub use crate::attribute::OTEL_STATUS_DESCRIPTION; -pub use crate::attribute::PEER_SERVICE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::PROCESS_COMMAND_ARGS; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::PROCESS_EXECUTABLE_NAME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::PROCESS_EXECUTABLE_PATH; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::PROCESS_EXIT_CODE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::PROCESS_PID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_CONNECT_RPC_ERROR_CODE; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::RPC_CONNECT_RPC_REQUEST_METADATA; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::RPC_CONNECT_RPC_RESPONSE_METADATA; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::RPC_GRPC_REQUEST_METADATA; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::RPC_GRPC_RESPONSE_METADATA; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_GRPC_STATUS_CODE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_JSONRPC_ERROR_CODE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_JSONRPC_ERROR_MESSAGE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_JSONRPC_REQUEST_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_JSONRPC_VERSION; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_MESSAGE_COMPRESSED_SIZE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_MESSAGE_ID; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_MESSAGE_TYPE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_MESSAGE_UNCOMPRESSED_SIZE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_METHOD; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_SERVICE; + +#[cfg(feature = "semconv_experimental")] pub use crate::attribute::RPC_SYSTEM; + pub use crate::attribute::SERVER_ADDRESS; + pub use crate::attribute::SERVER_PORT; -pub use crate::attribute::THREAD_ID; -pub use crate::attribute::THREAD_NAME; + pub use crate::attribute::URL_FULL; + pub use crate::attribute::URL_PATH; + pub use crate::attribute::URL_QUERY; + pub use crate::attribute::URL_SCHEME; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::URL_TEMPLATE; + pub use crate::attribute::USER_AGENT_ORIGINAL; + +#[cfg(feature = "semconv_experimental")] +pub use crate::attribute::USER_AGENT_SYNTHETIC_TYPE; diff --git a/opentelemetry-stdout/CHANGELOG.md b/opentelemetry-stdout/CHANGELOG.md index 061de5c524..046262458d 100644 --- a/opentelemetry-stdout/CHANGELOG.md +++ b/opentelemetry-stdout/CHANGELOG.md @@ -2,6 +2,39 @@ ## vNext +- Bump msrv to 1.75.0. +- *Breaking* time fields, `StartTime` and `EndTime` is printed on aggregation (Sum, Gauge, Histogram, ExpoHistogram) with 2 tabs, previously it was on aggregation data point, with 3 tabs, see [#2377](https://github.com/open-telemetry/opentelemetry-rust/pull/2377) and [#2411](https://github.com/open-telemetry/opentelemetry-rust/pull/2411). + +## 0.27.0 + +Released 2024-Nov-11 + +- Update `opentelemetry` dependency version to 0.27 +- Update `opentelemetry_sdk` dependency version to 0.27 + +- Bump MSRV to 1.70 [#2179](https://github.com/open-telemetry/opentelemetry-rust/pull/2179) +- **BREAKING** + - **Replaced** + - ([#2217](https://github.com/open-telemetry/opentelemetry-rust/pull/2217)): The `MetricsExporterBuilder` interface is modified from `with_temporality_selector` to `with_temporality` example can be seen below: + Previous Signature: + ```rust + MetricsExporterBuilder::default().with_temporality_selector(DeltaTemporalitySelector::new()) + ``` + Updated Signature: + ```rust + MetricsExporterBuilder::default().with_temporality(opentelemetry_sdk::metrics::Temporality::Delta) + ``` + - **Renamed** + - ([#2255](https://github.com/open-telemetry/opentelemetry-rust/pull/2255)): de-pluralize Metric types. + - `MetricsExporter` -> `MetricExporter` + - `MetricsExporterBuilder` -> `MetricExporterBuilder` + +## v0.26.0 +Released 2024-Sep-30 + +- Update `opentelemetry` dependency version to 0.26 +- Update `opentelemetry_sdk` dependency version to 0.26 + ## v0.25.0 - Update `opentelemetry` dependency version to 0.25 diff --git a/opentelemetry-stdout/Cargo.toml b/opentelemetry-stdout/Cargo.toml index b28d224b4f..7061a5831b 100644 --- a/opentelemetry-stdout/Cargo.toml +++ b/opentelemetry-stdout/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-stdout" -version = "0.25.0" +version = "0.27.0" description = "An OpenTelemetry exporter for stdout" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-stdout" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-stdout" @@ -13,7 +13,7 @@ categories = [ keywords = ["opentelemetry", "tracing", "metrics", "logs"] license = "Apache-2.0" edition = "2021" -rust-version = "1.65" +rust-version = "1.75.0" [package.metadata.docs.rs] all-features = true @@ -23,7 +23,7 @@ rustdoc-args = ["--cfg", "docsrs"] default = ["trace", "metrics", "logs"] trace = ["opentelemetry/trace", "opentelemetry_sdk/trace", "futures-util"] metrics = ["async-trait", "opentelemetry/metrics", "opentelemetry_sdk/metrics"] -logs = ["opentelemetry/logs", "opentelemetry_sdk/logs", "async-trait", "thiserror", "opentelemetry_sdk/logs_level_enabled"] +logs = ["opentelemetry/logs", "opentelemetry_sdk/logs", "async-trait", "thiserror", "opentelemetry_sdk/spec_unstable_logs_enabled"] populate-logs-event-name = [] [dependencies] @@ -31,10 +31,9 @@ async-trait = { workspace = true, optional = true } chrono = { version = "0.4.34", default-features = false, features = ["now"] } thiserror = { workspace = true, optional = true } futures-util = { workspace = true, optional = true } -opentelemetry = { version = "0.25", path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.25", path = "../opentelemetry-sdk" } +opentelemetry = { version = "0.27", path = "../opentelemetry" } +opentelemetry_sdk = { version = "0.27", path = "../opentelemetry-sdk" } serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } ordered-float = { workspace = true } [dev-dependencies] diff --git a/opentelemetry-stdout/README.md b/opentelemetry-stdout/README.md index fe5957c7b1..6e2f1f4185 100644 --- a/opentelemetry-stdout/README.md +++ b/opentelemetry-stdout/README.md @@ -29,11 +29,10 @@ can easily instrument your applications or systems, no matter their language, infrastructure, or runtime environment. Crucially, the storage and visualization of telemetry is intentionally left to other tools. -*Compiler support: [requires `rustc` 1.65+][msrv]* +*[Supported Rust Versions](#supported-rust-versions)* [Prometheus]: https://prometheus.io [Jaeger]: https://www.jaegertracing.io -[msrv]: #supported-rust-versions ### What does this crate contain? @@ -50,7 +49,7 @@ See [docs](https://docs.rs/opentelemetry-stdout). ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.65. The current OpenTelemetry version is not guaranteed to build +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions diff --git a/opentelemetry-stdout/examples/basic.rs b/opentelemetry-stdout/examples/basic.rs index 4289c74ec4..3934be9727 100644 --- a/opentelemetry-stdout/examples/basic.rs +++ b/opentelemetry-stdout/examples/basic.rs @@ -6,38 +6,34 @@ use opentelemetry::{global, KeyValue}; #[cfg(feature = "trace")] use opentelemetry::trace::{Span, Tracer}; -#[cfg(feature = "metrics")] -use opentelemetry_sdk::runtime; - #[cfg(feature = "metrics")] use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider}; -use opentelemetry_sdk::trace::Config; #[cfg(feature = "trace")] use opentelemetry_sdk::trace::TracerProvider; use opentelemetry_sdk::Resource; static RESOURCE: Lazy = Lazy::new(|| { - Resource::default().merge(&Resource::new(vec![KeyValue::new( - opentelemetry_semantic_conventions::resource::SERVICE_NAME, - "basic-stdout-example", - )])) + Resource::builder() + .with_service_name("basic-stdout-example") + .build() }); #[cfg(feature = "trace")] -fn init_trace() { +fn init_trace() -> TracerProvider { let exporter = opentelemetry_stdout::SpanExporter::default(); let provider = TracerProvider::builder() .with_simple_exporter(exporter) - .with_config(Config::default().with_resource(RESOURCE.clone())) + .with_resource(RESOURCE.clone()) .build(); - global::set_tracer_provider(provider); + global::set_tracer_provider(provider.clone()); + provider } #[cfg(feature = "metrics")] fn init_metrics() -> opentelemetry_sdk::metrics::SdkMeterProvider { - let exporter = opentelemetry_stdout::MetricsExporter::default(); - let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); + let exporter = opentelemetry_stdout::MetricExporter::default(); + let reader = PeriodicReader::builder(exporter).build(); let provider = SdkMeterProvider::builder() .with_reader(reader) .with_resource(RESOURCE.clone()) @@ -64,16 +60,17 @@ fn init_logs() -> opentelemetry_sdk::logs::LoggerProvider { #[cfg(feature = "trace")] fn emit_span() { - use opentelemetry::trace::{ - SpanContext, SpanId, TraceFlags, TraceId, TraceState, TracerProvider, + use opentelemetry::{ + trace::{SpanContext, SpanId, TraceFlags, TraceId, TraceState}, + InstrumentationScope, }; - let tracer = global::tracer_provider() - .tracer_builder("stdout-example") + let scope = InstrumentationScope::builder("stdout-example") .with_version("v1") - .with_schema_url("schema_url") .with_attributes([KeyValue::new("scope_key", "scope_value")]) .build(); + + let tracer = global::tracer_with_scope(scope); let mut span = tracer.start("example-span"); span.set_attribute(KeyValue::new("attribute_key1", "attribute_value1")); span.set_attribute(KeyValue::new("attribute_key2", "attribute_value2")); @@ -114,7 +111,7 @@ fn emit_span() { #[cfg(feature = "metrics")] fn emit_metrics() { let meter = global::meter("stdout-example"); - let c = meter.u64_counter("example_counter").init(); + let c = meter.u64_counter("example_counter").build(); c.add( 1, &[ @@ -151,7 +148,7 @@ fn emit_metrics() { ], ); - let h = meter.f64_histogram("example_histogram").init(); + let h = meter.f64_histogram("example_histogram").build(); h.record( 1.0, &[ @@ -198,7 +195,7 @@ fn emit_log() { #[tokio::main] async fn main() -> Result<(), Box> { #[cfg(feature = "trace")] - init_trace(); + let tracer_provider = init_trace(); #[cfg(feature = "metrics")] let meter_provider = init_metrics(); @@ -216,7 +213,7 @@ async fn main() -> Result<(), Box> { emit_metrics(); #[cfg(feature = "trace")] - global::shutdown_tracer_provider(); + tracer_provider.shutdown()?; #[cfg(feature = "metrics")] meter_provider.shutdown()?; diff --git a/opentelemetry-stdout/src/common.rs b/opentelemetry-stdout/src/common.rs index 236accea7c..4da706f893 100644 --- a/opentelemetry-stdout/src/common.rs +++ b/opentelemetry-stdout/src/common.rs @@ -144,7 +144,9 @@ impl From for Value { opentelemetry::Array::String(s) => { Value::Array(s.into_iter().map(|s| Value::String(s.into())).collect()) } + _ => unreachable!("Nonexistent array type"), // Needs to be updated when new array types are added }, + _ => unreachable!("Nonexistent value type"), // Needs to be updated when new value types are added } } } @@ -169,6 +171,7 @@ impl From for Value { .collect(), ), opentelemetry::logs::AnyValue::Bytes(b) => Value::BytesValue(*b), + _ => unreachable!("Nonexistent value type"), } } } @@ -230,12 +233,12 @@ pub(crate) struct Scope { dropped_attributes_count: u64, } -impl From for Scope { - fn from(value: opentelemetry_sdk::Scope) -> Self { +impl From for Scope { + fn from(value: opentelemetry::InstrumentationScope) -> Self { Scope { - name: value.name, - version: value.version, - attributes: value.attributes.into_iter().map(Into::into).collect(), + name: value.name().to_owned().into(), + version: value.version().map(ToOwned::to_owned).map(Into::into), + attributes: value.attributes().map(Into::into).collect(), dropped_attributes_count: 0, } } diff --git a/opentelemetry-stdout/src/lib.rs b/opentelemetry-stdout/src/lib.rs index deab6ac355..207eb1460c 100644 --- a/opentelemetry-stdout/src/lib.rs +++ b/opentelemetry-stdout/src/lib.rs @@ -25,7 +25,6 @@ //! use opentelemetry::{Context, KeyValue}; //! //! use opentelemetry_sdk::metrics::{SdkMeterProvider, PeriodicReader}; -//! use opentelemetry_sdk::runtime; //! use opentelemetry_sdk::trace::TracerProvider; //! //! use opentelemetry_sdk::logs::LoggerProvider; @@ -38,8 +37,8 @@ //! } //! //! fn init_metrics() -> SdkMeterProvider { -//! let exporter = opentelemetry_stdout::MetricsExporter::default(); -//! let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); +//! let exporter = opentelemetry_stdout::MetricExporter::default(); +//! let reader = PeriodicReader::builder(exporter).build(); //! SdkMeterProvider::builder().with_reader(reader).build() //! } //! diff --git a/opentelemetry-stdout/src/logs/exporter.rs b/opentelemetry-stdout/src/logs/exporter.rs index 48a8b1a120..2633f5a072 100644 --- a/opentelemetry-stdout/src/logs/exporter.rs +++ b/opentelemetry-stdout/src/logs/exporter.rs @@ -1,46 +1,50 @@ use async_trait::async_trait; use chrono::{DateTime, Utc}; use core::fmt; -use opentelemetry::logs::LogResult; use opentelemetry_sdk::export::logs::LogBatch; +use opentelemetry_sdk::logs::LogResult; use opentelemetry_sdk::Resource; use std::sync::atomic; +use std::sync::atomic::Ordering; /// An OpenTelemetry exporter that writes Logs to stdout on export. pub struct LogExporter { resource: Resource, is_shutdown: atomic::AtomicBool, - resource_emitted: bool, + resource_emitted: atomic::AtomicBool, } impl Default for LogExporter { fn default() -> Self { LogExporter { - resource: Resource::default(), + resource: Resource::builder().build(), is_shutdown: atomic::AtomicBool::new(false), - resource_emitted: false, + resource_emitted: atomic::AtomicBool::new(false), } } } impl fmt::Debug for LogExporter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("LogsExporter") + f.write_str("LogExporter") } } #[async_trait] impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { /// Export spans to stdout - async fn export(&mut self, batch: LogBatch<'_>) -> LogResult<()> { + async fn export(&self, batch: LogBatch<'_>) -> LogResult<()> { if self.is_shutdown.load(atomic::Ordering::SeqCst) { return Err("exporter is shut down".into()); } else { println!("Logs"); - if self.resource_emitted { + if self + .resource_emitted + .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) + .is_err() + { print_logs(batch); } else { - self.resource_emitted = true; println!("Resource"); if let Some(schema_url) = self.resource.schema_url() { println!("\t Resource SchemaUrl: {:?}", schema_url); @@ -69,34 +73,34 @@ fn print_logs(batch: LogBatch<'_>) { for (i, log) in batch.iter().enumerate() { println!("Log #{}", i); let (record, _library) = log; - if let Some(event_name) = record.event_name { + if let Some(event_name) = record.event_name() { println!("\t EventName: {:?}", event_name); } - if let Some(target) = &record.target { + if let Some(target) = record.target() { println!("\t Target (Scope): {:?}", target); } - if let Some(trace_context) = &record.trace_context { + if let Some(trace_context) = record.trace_context() { println!("\t TraceId: {:?}", trace_context.trace_id); println!("\t SpanId: {:?}", trace_context.span_id); } - if let Some(timestamp) = record.timestamp { + if let Some(timestamp) = record.timestamp() { let datetime: DateTime = timestamp.into(); println!("\t Timestamp: {}", datetime.format("%Y-%m-%d %H:%M:%S%.6f")); } - if let Some(timestamp) = record.observed_timestamp { + if let Some(timestamp) = record.observed_timestamp() { let datetime: DateTime = timestamp.into(); println!( "\t Observed Timestamp: {}", datetime.format("%Y-%m-%d %H:%M:%S%.6f") ); } - if let Some(severity) = record.severity_text { + if let Some(severity) = record.severity_text() { println!("\t SeverityText: {:?}", severity); } - if let Some(severity) = record.severity_number { + if let Some(severity) = record.severity_number() { println!("\t SeverityNumber: {:?}", severity); } - if let Some(body) = &record.body { + if let Some(body) = record.body() { println!("\t Body: {:?}", body); } diff --git a/opentelemetry-stdout/src/metrics/exporter.rs b/opentelemetry-stdout/src/metrics/exporter.rs index fd39f8919e..54feb33c41 100644 --- a/opentelemetry-stdout/src/metrics/exporter.rs +++ b/opentelemetry-stdout/src/metrics/exporter.rs @@ -1,52 +1,44 @@ use async_trait::async_trait; use chrono::{DateTime, Utc}; use core::{f64, fmt}; -use opentelemetry::metrics::{MetricsError, Result}; use opentelemetry_sdk::metrics::{ data::{self, ScopeMetrics}, - exporter::PushMetricsExporter, - reader::{DefaultTemporalitySelector, TemporalitySelector}, - InstrumentKind, + exporter::PushMetricExporter, }; +use opentelemetry_sdk::metrics::{MetricError, MetricResult, Temporality}; use std::fmt::Debug; use std::sync::atomic; /// An OpenTelemetry exporter that writes to stdout on export. -pub struct MetricsExporter { +pub struct MetricExporter { is_shutdown: atomic::AtomicBool, - temporality_selector: Box, + temporality: Temporality, } -impl MetricsExporter { +impl MetricExporter { /// Create a builder to configure this exporter. - pub fn builder() -> MetricsExporterBuilder { - MetricsExporterBuilder::default() + pub fn builder() -> MetricExporterBuilder { + MetricExporterBuilder::default() } } -impl Default for MetricsExporter { +impl Default for MetricExporter { fn default() -> Self { - MetricsExporterBuilder::default().build() + MetricExporterBuilder::default().build() } } -impl fmt::Debug for MetricsExporter { +impl fmt::Debug for MetricExporter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("MetricsExporter") - } -} - -impl TemporalitySelector for MetricsExporter { - fn temporality(&self, kind: InstrumentKind) -> data::Temporality { - self.temporality_selector.temporality(kind) + f.write_str("MetricExporter") } } #[async_trait] -impl PushMetricsExporter for MetricsExporter { +impl PushMetricExporter for MetricExporter { /// Write Metrics to stdout - async fn export(&self, metrics: &mut data::ResourceMetrics) -> Result<()> { + async fn export(&self, metrics: &mut data::ResourceMetrics) -> MetricResult<()> { if self.is_shutdown.load(atomic::Ordering::SeqCst) { - Err(MetricsError::Other("exporter is shut down".into())) + Err(MetricError::Other("exporter is shut down".into())) } else { println!("Metrics"); println!("Resource"); @@ -62,31 +54,34 @@ impl PushMetricsExporter for MetricsExporter { } } - async fn force_flush(&self) -> Result<()> { + async fn force_flush(&self) -> MetricResult<()> { // exporter holds no state, nothing to flush Ok(()) } - fn shutdown(&self) -> Result<()> { + fn shutdown(&self) -> MetricResult<()> { self.is_shutdown.store(true, atomic::Ordering::SeqCst); Ok(()) } + + fn temporality(&self) -> Temporality { + self.temporality + } } fn print_metrics(metrics: &[ScopeMetrics]) { for (i, metric) in metrics.iter().enumerate() { println!("\tInstrumentation Scope #{}", i); - println!("\t\tName : {}", &metric.scope.name); - if let Some(version) = &metric.scope.version { + println!("\t\tName : {}", &metric.scope.name()); + if let Some(version) = &metric.scope.version() { println!("\t\tVersion : {:?}", version); } - if let Some(schema_url) = &metric.scope.schema_url { + if let Some(schema_url) = &metric.scope.schema_url() { println!("\t\tSchemaUrl: {:?}", schema_url); } metric .scope - .attributes - .iter() + .attributes() .enumerate() .for_each(|(index, kv)| { if index == 0 { @@ -142,46 +137,75 @@ fn print_metrics(metrics: &[ScopeMetrics]) { fn print_sum(sum: &data::Sum) { println!("\t\tSum DataPoints"); println!("\t\tMonotonic : {}", sum.is_monotonic); - if sum.temporality == data::Temporality::Cumulative { + if sum.temporality == Temporality::Cumulative { println!("\t\tTemporality : Cumulative"); } else { println!("\t\tTemporality : Delta"); } - print_data_points(&sum.data_points); + let datetime: DateTime = sum.start_time.into(); + println!( + "\t\tStartTime : {}", + datetime.format("%Y-%m-%d %H:%M:%S%.6f") + ); + let datetime: DateTime = sum.time.into(); + println!( + "\t\tEndTime : {}", + datetime.format("%Y-%m-%d %H:%M:%S%.6f") + ); + print_sum_data_points(&sum.data_points); } fn print_gauge(gauge: &data::Gauge) { println!("\t\tGauge DataPoints"); - print_data_points(&gauge.data_points); + if let Some(start_time) = gauge.start_time { + let datetime: DateTime = start_time.into(); + println!( + "\t\tStartTime : {}", + datetime.format("%Y-%m-%d %H:%M:%S%.6f") + ); + } + let datetime: DateTime = gauge.time.into(); + println!( + "\t\tEndTime : {}", + datetime.format("%Y-%m-%d %H:%M:%S%.6f") + ); + print_gauge_data_points(&gauge.data_points); } fn print_histogram(histogram: &data::Histogram) { - if histogram.temporality == data::Temporality::Cumulative { + if histogram.temporality == Temporality::Cumulative { println!("\t\tTemporality : Cumulative"); } else { println!("\t\tTemporality : Delta"); } + let datetime: DateTime = histogram.start_time.into(); + println!( + "\t\tStartTime : {}", + datetime.format("%Y-%m-%d %H:%M:%S%.6f") + ); + let datetime: DateTime = histogram.time.into(); + println!( + "\t\tEndTime : {}", + datetime.format("%Y-%m-%d %H:%M:%S%.6f") + ); println!("\t\tHistogram DataPoints"); print_hist_data_points(&histogram.data_points); } -fn print_data_points(data_points: &[data::DataPoint]) { +fn print_sum_data_points(data_points: &[data::SumDataPoint]) { for (i, data_point) in data_points.iter().enumerate() { println!("\t\tDataPoint #{}", i); - if let Some(start_time) = data_point.start_time { - let datetime: DateTime = start_time.into(); - println!( - "\t\t\tStartTime : {}", - datetime.format("%Y-%m-%d %H:%M:%S%.6f") - ); - } - if let Some(end_time) = data_point.time { - let datetime: DateTime = end_time.into(); - println!( - "\t\t\tEndTime : {}", - datetime.format("%Y-%m-%d %H:%M:%S%.6f") - ); + println!("\t\t\tValue : {:#?}", data_point.value); + println!("\t\t\tAttributes :"); + for kv in data_point.attributes.iter() { + println!("\t\t\t\t -> {}: {}", kv.key, kv.value.as_str()); } + } +} + +fn print_gauge_data_points(data_points: &[data::GaugeDataPoint]) { + for (i, data_point) in data_points.iter().enumerate() { + println!("\t\tDataPoint #{}", i); println!("\t\t\tValue : {:#?}", data_point.value); println!("\t\t\tAttributes :"); for kv in data_point.attributes.iter() { @@ -193,16 +217,6 @@ fn print_data_points(data_points: &[data::DataPoint]) { fn print_hist_data_points(data_points: &[data::HistogramDataPoint]) { for (i, data_point) in data_points.iter().enumerate() { println!("\t\tDataPoint #{}", i); - let datetime: DateTime = data_point.start_time.into(); - println!( - "\t\t\tStartTime : {}", - datetime.format("%Y-%m-%d %H:%M:%S%.6f") - ); - let datetime: DateTime = data_point.time.into(); - println!( - "\t\t\tEndTime : {}", - datetime.format("%Y-%m-%d %H:%M:%S%.6f") - ); println!("\t\t\tCount : {}", data_point.count); println!("\t\t\tSum : {:?}", data_point.sum); if let Some(min) = &data_point.min { @@ -222,33 +236,28 @@ fn print_hist_data_points(data_points: &[data::HistogramDataPoint]) /// Configuration for the stdout metrics exporter #[derive(Default)] -pub struct MetricsExporterBuilder { - temporality_selector: Option>, +pub struct MetricExporterBuilder { + temporality: Option, } -impl MetricsExporterBuilder { - /// Set the temporality exporter for the exporter - pub fn with_temporality_selector( - mut self, - selector: impl TemporalitySelector + 'static, - ) -> Self { - self.temporality_selector = Some(Box::new(selector)); +impl MetricExporterBuilder { + /// Set the [Temporality] of the exporter. + pub fn with_temporality(mut self, temporality: Temporality) -> Self { + self.temporality = Some(temporality); self } /// Create a metrics exporter with the current configuration - pub fn build(self) -> MetricsExporter { - MetricsExporter { - temporality_selector: self - .temporality_selector - .unwrap_or_else(|| Box::new(DefaultTemporalitySelector::new())), + pub fn build(self) -> MetricExporter { + MetricExporter { + temporality: self.temporality.unwrap_or_default(), is_shutdown: atomic::AtomicBool::new(false), } } } -impl fmt::Debug for MetricsExporterBuilder { +impl fmt::Debug for MetricExporterBuilder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("MetricsExporterBuilder") + f.write_str("MetricExporterBuilder") } } diff --git a/opentelemetry-stdout/src/trace/exporter.rs b/opentelemetry-stdout/src/trace/exporter.rs index a5c057ca46..e2e0fbace9 100644 --- a/opentelemetry-stdout/src/trace/exporter.rs +++ b/opentelemetry-stdout/src/trace/exporter.rs @@ -23,7 +23,7 @@ impl fmt::Debug for SpanExporter { impl Default for SpanExporter { fn default() -> Self { SpanExporter { - resource: Resource::default(), + resource: Resource::builder().build(), is_shutdown: atomic::AtomicBool::new(false), resource_emitted: false, } @@ -72,16 +72,18 @@ fn print_spans(batch: Vec) { for (i, span) in batch.into_iter().enumerate() { println!("Span #{}", i); println!("\tInstrumentation Scope"); - println!("\t\tName : {:?}", &span.instrumentation_lib.name); - if let Some(version) = &span.instrumentation_lib.version { + println!( + "\t\tName : {:?}", + &span.instrumentation_scope.name() + ); + if let Some(version) = &span.instrumentation_scope.version() { println!("\t\tVersion : {:?}", version); } - if let Some(schema_url) = &span.instrumentation_lib.schema_url { + if let Some(schema_url) = &span.instrumentation_scope.schema_url() { println!("\t\tSchemaUrl: {:?}", schema_url); } - span.instrumentation_lib - .attributes - .iter() + span.instrumentation_scope + .attributes() .enumerate() .for_each(|(index, kv)| { if index == 0 { diff --git a/opentelemetry-zipkin/CHANGELOG.md b/opentelemetry-zipkin/CHANGELOG.md index 56cb32b22a..2c92453ce2 100644 --- a/opentelemetry-zipkin/CHANGELOG.md +++ b/opentelemetry-zipkin/CHANGELOG.md @@ -2,6 +2,27 @@ ## vNext +- Bump msrv to 1.75.0. + +## 0.27.0 + +Released 2024-Nov-11 + +- Update `opentelemetry` dependency version to 0.27 +- Update `opentelemetry_sdk` dependency version to 0.27 +- Update `opentelemetry-http` dependency version to 0.27 +- Update `opentelemetry-semantic-conventions` dependency version to 0.27 + +- Bump MSRV to 1.70 [#2179](https://github.com/open-telemetry/opentelemetry-rust/pull/2179) + +## v0.26.0 +Released 2024-Sep-30 + +- Update `opentelemetry` dependency version to 0.26 +- Update `opentelemetry_sdk` dependency version to 0.26 +- Update `opentelemetry-http` dependency version to 0.26 +- Update `opentelemetry-semantic-conventions` dependency version to 0.26 + ## v0.25.0 - Update `opentelemetry` dependency version to 0.25 diff --git a/opentelemetry-zipkin/Cargo.toml b/opentelemetry-zipkin/Cargo.toml index 0e190e0c69..dba2fbc480 100644 --- a/opentelemetry-zipkin/Cargo.toml +++ b/opentelemetry-zipkin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry-zipkin" -version = "0.25.0" +version = "0.27.0" description = "Zipkin exporter for OpenTelemetry" homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-zipkin" repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-zipkin" @@ -13,7 +13,7 @@ categories = [ keywords = ["opentelemetry", "zipkin", "tracing", "async"] license = "Apache-2.0" edition = "2021" -rust-version = "1.65" +rust-version = "1.75.0" [package.metadata.docs.rs] all-features = true @@ -28,10 +28,10 @@ reqwest-rustls = ["reqwest", "reqwest/rustls-tls-native-roots"] [dependencies] async-trait = { workspace = true } once_cell = { workspace = true } -opentelemetry = { version = "0.25", path = "../opentelemetry" } -opentelemetry_sdk = { version = "0.25", path = "../opentelemetry-sdk", features = ["trace"] } -opentelemetry-http = { version = "0.25", path = "../opentelemetry-http" } -opentelemetry-semantic-conventions = { version = "0.25", path = "../opentelemetry-semantic-conventions" } +opentelemetry = { version = "0.27", path = "../opentelemetry" } +opentelemetry_sdk = { version = "0.27", path = "../opentelemetry-sdk", features = ["trace"] } +opentelemetry-http = { version = "0.27", path = "../opentelemetry-http" } +opentelemetry-semantic-conventions = { version = "0.27", path = "../opentelemetry-semantic-conventions" } serde_json = { workspace = true } serde = { workspace = true, features = ["derive"] } typed-builder = "0.18" diff --git a/opentelemetry-zipkin/README.md b/opentelemetry-zipkin/README.md index b629e28cd0..2220e90cd0 100644 --- a/opentelemetry-zipkin/README.md +++ b/opentelemetry-zipkin/README.md @@ -30,6 +30,8 @@ of telemetry is intentionally left to other tools. [`Zipkin`]: https://zipkin.io/ [`OpenTelemetry`]: https://crates.io/crates/opentelemetry +*[Supported Rust Versions](#supported-rust-versions)* + ## Quickstart First make sure you have a running version of the zipkin process you want to @@ -54,7 +56,7 @@ fn main() -> Result<(), Box> { // Traced app logic here... }); - global::shutdown_tracer_provider(); + provider.shutdown().expect("TracerProvider should shutdown successfully"); Ok(()) } @@ -108,7 +110,7 @@ available so be sure to match them appropriately. ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.65. The current OpenTelemetry version is not guaranteed to build on +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions before diff --git a/opentelemetry-zipkin/examples/zipkin.rs b/opentelemetry-zipkin/examples/zipkin.rs index 48c902a928..09b755f3fa 100644 --- a/opentelemetry-zipkin/examples/zipkin.rs +++ b/opentelemetry-zipkin/examples/zipkin.rs @@ -1,5 +1,5 @@ use opentelemetry::{ - global::{self, shutdown_tracer_provider}, + global::{self}, trace::{Span, Tracer}, }; use std::thread; @@ -13,7 +13,7 @@ fn bar() { } fn main() -> Result<(), Box> { - let tracer = opentelemetry_zipkin::new_pipeline() + let (tracer, provider) = opentelemetry_zipkin::new_pipeline() .with_service_name("trace-demo") .install_simple()?; @@ -23,6 +23,6 @@ fn main() -> Result<(), Box> { thread::sleep(Duration::from_millis(6)); }); - shutdown_tracer_provider(); + provider.shutdown()?; Ok(()) } diff --git a/opentelemetry-zipkin/src/exporter/mod.rs b/opentelemetry-zipkin/src/exporter/mod.rs index 88edcfb025..889061a40e 100644 --- a/opentelemetry-zipkin/src/exporter/mod.rs +++ b/opentelemetry-zipkin/src/exporter/mod.rs @@ -6,7 +6,7 @@ use async_trait::async_trait; use futures_core::future::BoxFuture; use http::Uri; use model::endpoint::Endpoint; -use opentelemetry::{global, trace::TraceError, KeyValue}; +use opentelemetry::{global, trace::TraceError, InstrumentationScope, KeyValue}; use opentelemetry_http::HttpClient; use opentelemetry_sdk::{ export::{trace, ExportError}, @@ -19,7 +19,6 @@ use opentelemetry_semantic_conventions as semcov; use std::borrow::Cow; use std::net::SocketAddr; use std::sync::Arc; -use std::time::Duration; /// Zipkin span exporter #[derive(Debug)] @@ -97,26 +96,32 @@ impl ZipkinPipelineBuilder { let service_name = self.service_name.take(); if let Some(service_name) = service_name { let config = if let Some(mut cfg) = self.trace_config.take() { - cfg.resource = Cow::Owned(Resource::new( - cfg.resource - .iter() - .filter(|(k, _v)| k.as_str() != semcov::resource::SERVICE_NAME) - .map(|(k, v)| KeyValue::new(k.clone(), v.clone())) - .collect::>(), - )); + cfg.resource = Cow::Owned( + Resource::builder_empty() + .with_attributes( + cfg.resource + .iter() + .filter(|(k, _v)| k.as_str() != semcov::resource::SERVICE_NAME) + .map(|(k, v)| KeyValue::new(k.clone(), v.clone())) + .collect::>(), + ) + .build(), + ); cfg } else { - Config::default().with_resource(Resource::empty()) + #[allow(deprecated)] + Config::default().with_resource(Resource::builder_empty().build()) }; (config, Endpoint::new(service_name, self.service_addr)) } else { let service_name = SdkProvidedResourceDetector - .detect(Duration::from_secs(0)) + .detect() .get(semcov::resource::SERVICE_NAME.into()) .unwrap() .to_string(); ( - Config::default().with_resource(Resource::empty()), + #[allow(deprecated)] + Config::default().with_resource(Resource::builder_empty().build()), Endpoint::new(service_name, self.service_addr), ) } @@ -138,36 +143,43 @@ impl ZipkinPipelineBuilder { } /// Install the Zipkin trace exporter pipeline with a simple span processor. - pub fn install_simple(mut self) -> Result { + #[allow(deprecated)] + pub fn install_simple( + mut self, + ) -> Result<(Tracer, opentelemetry_sdk::trace::TracerProvider), TraceError> { let (config, endpoint) = self.init_config_and_endpoint(); let exporter = self.init_exporter_with_endpoint(endpoint)?; let mut provider_builder = TracerProvider::builder().with_simple_exporter(exporter); provider_builder = provider_builder.with_config(config); let provider = provider_builder.build(); - let tracer = - opentelemetry::trace::TracerProvider::tracer_builder(&provider, "opentelemetry-zipkin") - .with_version(env!("CARGO_PKG_VERSION")) - .with_schema_url(semcov::SCHEMA_URL) - .build(); - let _ = global::set_tracer_provider(provider); - Ok(tracer) + let scope = InstrumentationScope::builder("opentelemetry-zipkin") + .with_version(env!("CARGO_PKG_VERSION")) + .with_schema_url(semcov::SCHEMA_URL) + .build(); + let tracer = opentelemetry::trace::TracerProvider::tracer_with_scope(&provider, scope); + let _ = global::set_tracer_provider(provider.clone()); + Ok((tracer, provider)) } /// Install the Zipkin trace exporter pipeline with a batch span processor using the specified /// runtime. - pub fn install_batch(mut self, runtime: R) -> Result { + #[allow(deprecated)] + pub fn install_batch( + mut self, + runtime: R, + ) -> Result<(Tracer, opentelemetry_sdk::trace::TracerProvider), TraceError> { let (config, endpoint) = self.init_config_and_endpoint(); let exporter = self.init_exporter_with_endpoint(endpoint)?; let mut provider_builder = TracerProvider::builder().with_batch_exporter(exporter, runtime); provider_builder = provider_builder.with_config(config); let provider = provider_builder.build(); - let tracer = - opentelemetry::trace::TracerProvider::tracer_builder(&provider, "opentelemetry-zipkin") - .with_version(env!("CARGO_PKG_VERSION")) - .with_schema_url(semcov::SCHEMA_URL) - .build(); - let _ = global::set_tracer_provider(provider); - Ok(tracer) + let scope = InstrumentationScope::builder("opentelemetry-zipkin") + .with_version(env!("CARGO_PKG_VERSION")) + .with_schema_url(semcov::SCHEMA_URL) + .build(); + let tracer = opentelemetry::trace::TracerProvider::tracer_with_scope(&provider, scope); + let _ = global::set_tracer_provider(provider.clone()); + Ok((tracer, provider)) } /// Assign the service name under which to group traces. @@ -252,3 +264,9 @@ impl ExportError for Error { "zipkin" } } + +impl opentelemetry::trace::ExportError for Error { + fn exporter_name(&self) -> &'static str { + "zipkin" + } +} diff --git a/opentelemetry-zipkin/src/exporter/model/mod.rs b/opentelemetry-zipkin/src/exporter/model/mod.rs index b143848f31..a78708a2ae 100644 --- a/opentelemetry-zipkin/src/exporter/model/mod.rs +++ b/opentelemetry-zipkin/src/exporter/model/mod.rs @@ -46,11 +46,14 @@ pub(crate) fn into_zipkin_span(local_endpoint: Endpoint, span_data: SpanData) -> [ ( INSTRUMENTATION_LIBRARY_NAME, - Some(span_data.instrumentation_lib.name), + Some(span_data.instrumentation_scope.name().to_owned()), ), ( INSTRUMENTATION_LIBRARY_VERSION, - span_data.instrumentation_lib.version, + span_data + .instrumentation_scope + .version() + .map(ToOwned::to_owned), ), ] .into_iter() diff --git a/opentelemetry-zipkin/src/exporter/model/span.rs b/opentelemetry-zipkin/src/exporter/model/span.rs index 8c9c7fd5a1..51223be92b 100644 --- a/opentelemetry-zipkin/src/exporter/model/span.rs +++ b/opentelemetry-zipkin/src/exporter/model/span.rs @@ -165,7 +165,7 @@ mod tests { events: SpanEvents::default(), links: SpanLinks::default(), status, - instrumentation_lib: Default::default(), + instrumentation_scope: Default::default(), }; let local_endpoint = Endpoint::new("test".into(), None); let span = into_zipkin_span(local_endpoint, span_data); diff --git a/opentelemetry-zipkin/src/lib.rs b/opentelemetry-zipkin/src/lib.rs index 0e8db47dd3..8d414db8a8 100644 --- a/opentelemetry-zipkin/src/lib.rs +++ b/opentelemetry-zipkin/src/lib.rs @@ -26,13 +26,13 @@ //! //! fn main() -> Result<(), TraceError> { //! global::set_text_map_propagator(opentelemetry_zipkin::Propagator::new()); -//! let tracer = opentelemetry_zipkin::new_pipeline().install_simple()?; +//! let (tracer, provider) = opentelemetry_zipkin::new_pipeline().install_simple()?; //! //! tracer.in_span("doing_work", |cx| { //! // Traced app logic here... //! }); //! -//! global::shutdown_tracer_provider(); // sending remaining spans +//! provider.shutdown().expect("TracerProvider should shutdown successfully"); // sending remaining spans //! //! Ok(()) //! } @@ -131,7 +131,7 @@ //! //! fn main() -> Result<(), Box> { //! global::set_text_map_propagator(opentelemetry_zipkin::Propagator::new()); -//! let tracer = opentelemetry_zipkin::new_pipeline() +//! let (tracer, provider) = opentelemetry_zipkin::new_pipeline() //! .with_http_client( //! HyperClient( //! Client::builder(TokioExecutor::new()) @@ -148,7 +148,7 @@ //! .with_max_events_per_span(64) //! .with_max_attributes_per_span(16) //! .with_max_events_per_span(16) -//! .with_resource(Resource::new(vec![KeyValue::new("key", "value")])), +//! .with_resource(Resource::builder_empty().with_attribute(KeyValue::new("key", "value")).build()), //! ) //! .install_batch(opentelemetry_sdk::runtime::Tokio)?; //! @@ -156,7 +156,7 @@ //! // Traced app logic here... //! }); //! -//! global::shutdown_tracer_provider(); // sending remaining spans +//! provider.shutdown()?; // sending remaining spans //! //! Ok(()) //! } diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index 1f0c9be637..fa94f47be1 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -2,6 +2,77 @@ ## vNext +- Bump msrv to 1.75.0. + +## 0.27.1 + +Released 2024-Nov-27 + +## 0.27.0 + +Released 2024-Nov-11 + +- Bump MSRV to 1.70 [#2179](https://github.com/open-telemetry/opentelemetry-rust/pull/2179) +- Add `LogRecord::set_trace_context`; an optional method conditional on the `trace` feature for setting trace context on a log record. +- Removed unnecessary public methods named `as_any` from `AsyncInstrument` trait and the implementing instruments: `ObservableCounter`, `ObservableGauge`, and `ObservableUpDownCounter` [#2187](https://github.com/open-telemetry/opentelemetry-rust/pull/2187) +- Introduced `SyncInstrument` trait to replace the individual synchronous instrument traits (`SyncCounter`, `SyncGauge`, `SyncHistogram`, `SyncUpDownCounter`) which are meant for SDK implementation. [#2207](https://github.com/open-telemetry/opentelemetry-rust/pull/2207) +- Ensured that `observe` method on asynchronous instruments can only be called inside a callback. This was done by removing the implementation of `AsyncInstrument` trait for each of the asynchronous instruments. [#2210](https://github.com/open-telemetry/opentelemetry-rust/pull/2210) +- Removed `PartialOrd` and `Ord` implementations for `KeyValue`. [#2215](https://github.com/open-telemetry/opentelemetry-rust/pull/2215) +- **Breaking change for exporter authors:** Marked `KeyValue` related structs and enums as `non_exhaustive`. [#2228](https://github.com/open-telemetry/opentelemetry-rust/pull/2228) +- **Breaking change for log exporter authors:** Marked `AnyValue` enum as `non_exhaustive`. [#2230](https://github.com/open-telemetry/opentelemetry-rust/pull/2230) +- **Breaking change for Metrics users:** The `init` method used to create instruments has been renamed to `build`. Also, `try_init()` method is removed from instrument builders. The return types of `InstrumentProvider` trait methods modified to return the instrument struct, instead of `Result`. [#2227](https://github.com/open-telemetry/opentelemetry-rust/pull/2227) + +Before: +```rust +let counter = meter.u64_counter("my_counter").init(); +``` + +Now: +```rust +let counter = meter.u64_counter("my_counter").build(); +``` +- **Breaking change**: [#2220](https://github.com/open-telemetry/opentelemetry-rust/pull/2220) + - Removed deprecated method `InstrumentationLibrary::new` + - Renamed `InstrumentationLibrary` to `InstrumentationScope` + - Renamed `InstrumentationLibraryBuilder` to `InstrumentationScopeBuilder` + - Removed deprecated methods `LoggerProvider::versioned_logger` and `TracerProvider::versioned_tracer` + - Removed methods `LoggerProvider::logger_builder`, `TracerProvider::tracer_builder` and `MeterProvider::versioned_meter` + - Replaced these methods with `LoggerProvider::logger_with_scope`, `TracerProvider::logger_with_scope`, `MeterProvider::meter_with_scope` + - Replaced `global::meter_with_version` with `global::meter_with_scope` + - Added `global::tracer_with_scope` + - Refer to PR description for migration guide. +- **Breaking change**: replaced `InstrumentationScope` public attributes by getters [#2275](https://github.com/open-telemetry/opentelemetry-rust/pull/2275) + +- **Breaking change**: [#2260](https://github.com/open-telemetry/opentelemetry-rust/pull/2260) + - Removed `global::set_error_handler` and `global::handle_error`. + - `global::handle_error` usage inside the opentelemetry crates has been replaced with `global::otel_info`, `otel_warn`, `otel_debug` and `otel_error` macros based on the severity of the internal logs. + - The default behavior of `global::handle_error` was to log the error using `eprintln!`. With otel macros, the internal logs get emitted via `tracing` macros of matching severity. Users now need to configure a `tracing` layer/subscriber to capture these logs. + - Refer to PR description for migration guide. Also refer to [self-diagnostics](https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples/self-diagnostics) example to learn how to view internal logs in stdout using `tracing::fmt` layer. + +- **Breaking change for exporter/processor authors:** [#2266](https://github.com/open-telemetry/opentelemetry-rust/pull/2266) + - Moved `ExportError` trait from `opentelemetry::ExportError` to `opentelemetry_sdk::export::ExportError` + - Created new trait `opentelemetry::trace::ExportError` for trace API. This would be eventually be consolidated with ExportError in the SDK. + - Moved `LogError` enum from `opentelemetry::logs::LogError` to `opentelemetry_sdk::logs::LogError` + - Moved `LogResult` type alias from `opentelemetry::logs::LogResult` to `opentelemetry_sdk::logs::LogResult` + - Moved `MetricError` enum from `opentelemetry::metrics::MetricError` to `opentelemetry_sdk::metrics::MetricError` + - Moved `MetricResult` type alias from `opentelemetry::metrics::MetricResult` to `opentelemetry_sdk::metrics::MetricResult` + These changes shouldn't directly affect the users of OpenTelemetry crate, as these constructs are used in SDK and Exporters. If you are an author of an sdk component/plug-in, like an exporter etc. please use these types from sdk. Refer [CHANGELOG.md](https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-sdk/CHANGELOG.md) for more details, under same version section. +- **Breaking** [2291](https://github.com/open-telemetry/opentelemetry-rust/pull/2291) Rename `logs_level_enabled flag` to `spec_unstable_logs_enabled`. Please enable this updated flag if the feature is needed. This flag will be removed once the feature is stabilized in the specifications. + +## v0.26.0 +Released 2024-Sep-30 + +- **BREAKING** Public API changes: + - **Removed**: `Key.bool()`, `Key.i64()`, `Key.f64()`, `Key.string()`, `Key.array()` [#2090](https://github.com/open-telemetry/opentelemetry-rust/issues/2090). These APIs were redundant as they didn't offer any additional functionality. The existing `KeyValue::new()` API covers all the scenarios offered by these APIs. + + - **Removed**: `ObjectSafeMeterProvider` and `GlobalMeterProvider` [#2112](https://github.com/open-telemetry/opentelemetry-rust/pull/2112). These APIs were unnecessary and were mainly meant for internal use. + + - **Modified**: `MeterProvider.meter()` and `MeterProvider.versioned_meter()` argument types have been updated to `&'static str` instead of `impl Into>>` [#2112](https://github.com/open-telemetry/opentelemetry-rust/pull/2112). These APIs were modified to enforce the Meter `name`, `version`, and `schema_url` to be `&'static str`. + + - **Renamed**: `NoopMeterCore` to `NoopMeter` + +- Added `with_boundaries` API to allow users to provide custom bounds for Histogram instruments. [#2135](https://github.com/open-telemetry/opentelemetry-rust/pull/2135) + ## v0.25.0 - **BREAKING** [#1993](https://github.com/open-telemetry/opentelemetry-rust/pull/1993) Box complex types in AnyValue enum diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index 2cd03c778f..8175cf4412 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "opentelemetry" -version = "0.25.0" +version = "0.27.1" description = "OpenTelemetry API for Rust" homepage = "https://github.com/open-telemetry/opentelemetry-rust" repository = "https://github.com/open-telemetry/opentelemetry-rust" @@ -11,36 +11,37 @@ categories = [ "api-bindings", "asynchronous", ] -keywords = ["opentelemetry", "logging", "tracing", "metrics", "async"] +keywords = ["opentelemetry", "logging", "tracing", "metrics"] license = "Apache-2.0" edition = "2021" -rust-version = "1.65" +rust-version = "1.75.0" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -futures-core = { workspace = true } -futures-sink = "0.3" -once_cell = { workspace = true } +futures-core = { workspace = true, optional = true } +futures-sink = { version = "0.3", optional = true } pin-project-lite = { workspace = true, optional = true } -thiserror = { workspace = true } +thiserror = { workspace = true, optional = true} +tracing = {workspace = true, optional = true} # optional for opentelemetry internal logging [target.'cfg(all(target_arch = "wasm32", not(target_os = "wasi")))'.dependencies] js-sys = "0.3.63" [features] -default = ["trace", "metrics", "logs"] -trace = ["pin-project-lite"] +default = ["trace", "metrics", "logs", "internal-logs"] +trace = ["pin-project-lite", "futures-sink", "futures-core", "thiserror"] metrics = [] testing = ["trace", "metrics"] logs = [] -logs_level_enabled = ["logs"] +spec_unstable_logs_enabled = ["logs"] otel_unstable = [] +internal-logs = ["tracing"] [dev-dependencies] -opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["logs_level_enabled"]} # for documentation tests +opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["spec_unstable_logs_enabled"]} # for documentation tests criterion = { workspace = true } rand = { workspace = true } diff --git a/opentelemetry/README.md b/opentelemetry/README.md index 6aacb4925a..522c91e632 100644 --- a/opentelemetry/README.md +++ b/opentelemetry/README.md @@ -28,7 +28,7 @@ can easily instrument your applications or systems, no matter their language, infrastructure, or runtime environment. Crucially, the storage and visualization of telemetry is intentionally left to other tools. -*Compiler support: [requires `rustc` 1.65+][msrv]* +*[Supported Rust Versions](#supported-rust-versions)* [Prometheus]: https://prometheus.io [Jaeger]: https://www.jaegertracing.io @@ -56,7 +56,7 @@ Here's a breakdown of its components: Allows for the attachment of metadata (baggage) to telemetry, which can be used for sharing application-specific information across service boundaries. - **[Logs Bridge - API](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/bridge-api.md):** + API](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/api.md):** Allows to bridge existing logging mechanisms with OpenTelemetry logging. This is **NOT** meant for end users to call, instead it is meant to enable writing bridges/appenders for existing logging mechanisms such as @@ -133,7 +133,7 @@ See [docs](https://docs.rs/opentelemetry). ## Supported Rust Versions OpenTelemetry is built against the latest stable release. The minimum supported -version is 1.65. The current OpenTelemetry version is not guaranteed to build +version is 1.75.0. The current OpenTelemetry version is not guaranteed to build on Rust versions earlier than the minimum supported version. The current stable Rust compiler and the three most recent minor versions diff --git a/opentelemetry/benches/metrics.rs b/opentelemetry/benches/metrics.rs index 6f2102536e..f743fde7c1 100644 --- a/opentelemetry/benches/metrics.rs +++ b/opentelemetry/benches/metrics.rs @@ -22,7 +22,7 @@ use opentelemetry::{global, metrics::Counter, KeyValue}; fn create_counter() -> Counter { let meter = global::meter("benchmarks"); - meter.u64_counter("counter_bench").init() + meter.u64_counter("counter_bench").build() } fn criterion_benchmark(c: &mut Criterion) { diff --git a/opentelemetry/src/baggage.rs b/opentelemetry/src/baggage.rs index 37ba28e682..279613a287 100644 --- a/opentelemetry/src/baggage.rs +++ b/opentelemetry/src/baggage.rs @@ -15,16 +15,22 @@ //! //! [W3C Baggage]: https://w3c.github.io/baggage use crate::{Context, Key, KeyValue, Value}; -use once_cell::sync::Lazy; use std::collections::{hash_map, HashMap}; use std::fmt; +use std::sync::OnceLock; -static DEFAULT_BAGGAGE: Lazy = Lazy::new(Baggage::default); +static DEFAULT_BAGGAGE: OnceLock = OnceLock::new(); const MAX_KEY_VALUE_PAIRS: usize = 180; const MAX_BYTES_FOR_ONE_PAIR: usize = 4096; const MAX_LEN_OF_ALL_PAIRS: usize = 8192; +/// Returns the default baggage, ensuring it is initialized only once. +#[inline] +fn get_default_baggage() -> &'static Baggage { + DEFAULT_BAGGAGE.get_or_init(Baggage::default) +} + /// A set of name/value pairs describing user-defined properties. /// /// ### Baggage Names @@ -399,7 +405,7 @@ impl BaggageExt for Context { } fn baggage(&self) -> &Baggage { - self.get::().unwrap_or(&DEFAULT_BAGGAGE) + self.get::().unwrap_or(get_default_baggage()) } } diff --git a/opentelemetry/src/common.rs b/opentelemetry/src/common.rs index 6e3a6f6710..85fa6e7f9d 100644 --- a/opentelemetry/src/common.rs +++ b/opentelemetry/src/common.rs @@ -7,6 +7,7 @@ use std::{fmt, hash}; /// See the [attribute naming] spec for guidelines. /// /// [attribute naming]: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/general/attribute-naming.md +#[non_exhaustive] #[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct Key(OtelString); @@ -149,6 +150,7 @@ impl hash::Hash for OtelString { } /// A [Value::Array] containing homogeneous values. +#[non_exhaustive] #[derive(Clone, Debug, PartialEq)] pub enum Array { /// Array of bools @@ -212,6 +214,7 @@ into_array!( ); /// The value part of attribute [KeyValue] pairs. +#[non_exhaustive] #[derive(Clone, Debug, PartialEq)] pub enum Value { /// bool values @@ -227,7 +230,8 @@ pub enum Value { } /// Wrapper for string-like values -#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[non_exhaustive] +#[derive(Clone, PartialEq, Eq, Hash)] pub struct StringValue(OtelString); impl fmt::Debug for StringValue { @@ -372,6 +376,7 @@ impl fmt::Display for Value { /// A key-value pair describing an attribute. #[derive(Clone, Debug, PartialEq)] +#[non_exhaustive] pub struct KeyValue { /// The attribute name pub key: Key, @@ -394,15 +399,9 @@ impl KeyValue { } } -/// Marker trait for errors returned by exporters -pub trait ExportError: std::error::Error + Send + Sync + 'static { - /// The name of exporter that returned this error - fn exporter_name(&self) -> &'static str; -} - /// Information about a library or crate providing instrumentation. /// -/// An instrumentation library should be named to follow any naming conventions +/// An instrumentation scope should be named to follow any naming conventions /// of the instrumented library (e.g. 'middleware' for a web framework). /// /// See the [instrumentation libraries] spec for more information. @@ -410,37 +409,28 @@ pub trait ExportError: std::error::Error + Send + Sync + 'static { /// [instrumentation libraries]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/overview.md#instrumentation-libraries #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct InstrumentationLibrary { +pub struct InstrumentationScope { /// The library name. /// /// This should be the name of the crate providing the instrumentation. - pub name: Cow<'static, str>, + name: Cow<'static, str>, /// The library version. - /// - /// # Examples - /// - /// ``` - /// let library = opentelemetry::InstrumentationLibrary::builder("my-crate"). - /// with_version(env!("CARGO_PKG_VERSION")). - /// with_schema_url("https://opentelemetry.io/schemas/1.17.0"). - /// build(); - /// ``` - pub version: Option>, + version: Option>, - /// [Schema url] used by this library. + /// [Schema URL] used by this library. /// - /// [Schema url]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/schemas/overview.md#schema-url - pub schema_url: Option>, + /// [Schema URL]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/schemas/overview.md#schema-url + schema_url: Option>, /// Specifies the instrumentation scope attributes to associate with emitted telemetry. - pub attributes: Vec, + attributes: Vec, } -// Uniqueness for InstrumentationLibrary/InstrumentationScope does not depend on attributes -impl Eq for InstrumentationLibrary {} +// Uniqueness for InstrumentationScope does not depend on attributes +impl Eq for InstrumentationScope {} -impl PartialEq for InstrumentationLibrary { +impl PartialEq for InstrumentationScope { fn eq(&self, other: &Self) -> bool { self.name == other.name && self.version == other.version @@ -448,7 +438,7 @@ impl PartialEq for InstrumentationLibrary { } } -impl hash::Hash for InstrumentationLibrary { +impl hash::Hash for InstrumentationScope { fn hash(&self, state: &mut H) { self.name.hash(state); self.version.hash(state); @@ -456,39 +446,47 @@ impl hash::Hash for InstrumentationLibrary { } } -impl InstrumentationLibrary { - /// Deprecated, use [`InstrumentationLibrary::builder()`] - /// - /// Create an new instrumentation library. - #[deprecated(since = "0.23.0", note = "Please use builder() instead")] - pub fn new( - name: impl Into>, - version: Option>>, - schema_url: Option>>, - attributes: Option>, - ) -> InstrumentationLibrary { - InstrumentationLibrary { - name: name.into(), - version: version.map(Into::into), - schema_url: schema_url.map(Into::into), - attributes: attributes.unwrap_or_default(), - } - } - - /// Create a new builder to create an [InstrumentationLibrary] - pub fn builder>>(name: T) -> InstrumentationLibraryBuilder { - InstrumentationLibraryBuilder { +impl InstrumentationScope { + /// Create a new builder to create an [InstrumentationScope] + pub fn builder>>(name: T) -> InstrumentationScopeBuilder { + InstrumentationScopeBuilder { name: name.into(), version: None, schema_url: None, attributes: None, } } + + /// Returns the instrumentation library name. + #[inline] + pub fn name(&self) -> &str { + &self.name + } + + /// Returns the instrumentation library version. + #[inline] + pub fn version(&self) -> Option<&str> { + self.version.as_deref() + } + + /// Returns the [Schema URL] used by this library. + /// + /// [Schema URL]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/schemas/overview.md#schema-url + #[inline] + pub fn schema_url(&self) -> Option<&str> { + self.schema_url.as_deref() + } + + /// Returns the instrumentation scope attributes to associate with emitted telemetry. + #[inline] + pub fn attributes(&self) -> impl Iterator { + self.attributes.iter() + } } -/// Configuration options for [InstrumentationLibrary]. +/// Configuration options for [InstrumentationScope]. /// -/// An instrumentation library is a library or crate providing instrumentation. +/// An instrumentation scope is a library or crate providing instrumentation. /// It should be named to follow any naming conventions of the instrumented /// library (e.g. 'middleware' for a web framework). /// @@ -498,23 +496,20 @@ impl InstrumentationLibrary { /// /// [instrumentation libraries]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.9.0/specification/overview.md#instrumentation-libraries #[derive(Debug)] -pub struct InstrumentationLibraryBuilder { +pub struct InstrumentationScopeBuilder { name: Cow<'static, str>, - version: Option>, - schema_url: Option>, - attributes: Option>, } -impl InstrumentationLibraryBuilder { - /// Configure the version for the instrumentation library +impl InstrumentationScopeBuilder { + /// Configure the version for the instrumentation scope /// /// # Examples /// /// ``` - /// let library = opentelemetry::InstrumentationLibrary::builder("my-crate") + /// let scope = opentelemetry::InstrumentationScope::builder("my-crate") /// .with_version("v0.1.0") /// .build(); /// ``` @@ -523,12 +518,12 @@ impl InstrumentationLibraryBuilder { self } - /// Configure the Schema URL for the instrumentation library + /// Configure the Schema URL for the instrumentation scope /// /// # Examples /// /// ``` - /// let library = opentelemetry::InstrumentationLibrary::builder("my-crate") + /// let scope = opentelemetry::InstrumentationScope::builder("my-crate") /// .with_schema_url("https://opentelemetry.io/schemas/1.17.0") /// .build(); /// ``` @@ -537,14 +532,14 @@ impl InstrumentationLibraryBuilder { self } - /// Configure the attributes for the instrumentation library + /// Configure the attributes for the instrumentation scope /// /// # Examples /// /// ``` /// use opentelemetry::KeyValue; /// - /// let library = opentelemetry::InstrumentationLibrary::builder("my-crate") + /// let scope = opentelemetry::InstrumentationScope::builder("my-crate") /// .with_attributes([KeyValue::new("k", "v")]) /// .build(); /// ``` @@ -556,9 +551,9 @@ impl InstrumentationLibraryBuilder { self } - /// Create a new [InstrumentationLibrary] from this configuration - pub fn build(self) -> InstrumentationLibrary { - InstrumentationLibrary { + /// Create a new [InstrumentationScope] from this configuration + pub fn build(self) -> InstrumentationScope { + InstrumentationScope { name: self.name, version: self.version, schema_url: self.schema_url, diff --git a/opentelemetry/src/context.rs b/opentelemetry/src/context.rs index 67eae958f6..4398e7b589 100644 --- a/opentelemetry/src/context.rs +++ b/opentelemetry/src/context.rs @@ -327,9 +327,19 @@ impl Context { impl fmt::Debug for Context { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Context") - .field("entries", &self.entries.len()) - .finish() + let mut dbg = f.debug_struct("Context"); + let mut entries = self.entries.len(); + #[cfg(feature = "trace")] + { + if let Some(span) = &self.span { + dbg.field("span", &span.span_context()); + entries += 1; + } else { + dbg.field("span", &"None"); + } + } + + dbg.field("entries", &entries).finish() } } diff --git a/opentelemetry/src/global/error_handler.rs b/opentelemetry/src/global/error_handler.rs deleted file mode 100644 index 87149c1b39..0000000000 --- a/opentelemetry/src/global/error_handler.rs +++ /dev/null @@ -1,86 +0,0 @@ -use std::sync::PoisonError; -use std::sync::RwLock; - -#[cfg(feature = "logs")] -use crate::logs::LogError; -#[cfg(feature = "metrics")] -use crate::metrics::MetricsError; -use crate::propagation::PropagationError; -#[cfg(feature = "trace")] -use crate::trace::TraceError; -use once_cell::sync::Lazy; - -static GLOBAL_ERROR_HANDLER: Lazy>> = Lazy::new(|| RwLock::new(None)); - -/// Wrapper for error from both tracing and metrics part of open telemetry. -#[derive(thiserror::Error, Debug)] -#[non_exhaustive] -pub enum Error { - #[cfg(feature = "trace")] - #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] - #[error(transparent)] - /// Failed to export traces. - Trace(#[from] TraceError), - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - #[error(transparent)] - /// An issue raised by the metrics module. - Metric(#[from] MetricsError), - - #[cfg(feature = "logs")] - #[cfg_attr(docsrs, doc(cfg(feature = "logs")))] - #[error(transparent)] - /// Failed to export logs. - Log(#[from] LogError), - - #[error(transparent)] - /// Error happens when injecting and extracting information using propagators. - Propagation(#[from] PropagationError), - - #[error("{0}")] - /// Other types of failures not covered by the variants above. - Other(String), -} - -impl From> for Error { - fn from(err: PoisonError) -> Self { - Error::Other(err.to_string()) - } -} - -struct ErrorHandler(Box); - -/// Handle error using the globally configured error handler. -/// -/// Writes to stderr if unset. -pub fn handle_error>(err: T) { - match GLOBAL_ERROR_HANDLER.read() { - Ok(handler) if handler.is_some() => (handler.as_ref().unwrap().0)(err.into()), - _ => match err.into() { - #[cfg(feature = "metrics")] - #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] - Error::Metric(err) => eprintln!("OpenTelemetry metrics error occurred. {}", err), - #[cfg(feature = "trace")] - #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] - Error::Trace(err) => eprintln!("OpenTelemetry trace error occurred. {}", err), - #[cfg(feature = "logs")] - #[cfg_attr(docsrs, doc(cfg(feature = "logs")))] - Error::Log(err) => eprintln!("OpenTelemetry log error occurred. {}", err), - Error::Propagation(err) => { - eprintln!("OpenTelemetry propagation error occurred. {}", err) - } - Error::Other(err_msg) => eprintln!("OpenTelemetry error occurred. {}", err_msg), - }, - } -} - -/// Set global error handler. -pub fn set_error_handler(f: F) -> std::result::Result<(), Error> -where - F: Fn(Error) + Send + Sync + 'static, -{ - GLOBAL_ERROR_HANDLER - .write() - .map(|mut handler| *handler = Some(ErrorHandler(Box::new(f)))) - .map_err(Into::into) -} diff --git a/opentelemetry/src/global/internal_logging.rs b/opentelemetry/src/global/internal_logging.rs new file mode 100644 index 0000000000..e27f43c693 --- /dev/null +++ b/opentelemetry/src/global/internal_logging.rs @@ -0,0 +1,162 @@ +#![allow(unused_macros)] +/// +/// **Note**: These macros (`otel_info!`, `otel_warn!`, `otel_debug!`, and `otel_error!`) are intended to be used +/// **internally within OpenTelemetry code** or for **custom exporters and processors**. They are not designed +/// for general application logging and should not be used for that purpose. +/// +/// Macro for logging informational messages in OpenTelemetry. +/// +/// # Fields: +/// - `name`: The operation or action being logged. +/// - Additional optional key-value pairs can be passed as attributes. +/// +/// # Example: +/// ```rust +/// use opentelemetry::otel_info; +/// otel_info!(name: "sdk_start", version = "1.0.0", schema_url = "http://example.com"); +/// ``` +/// +// TODO: Remove `name` attribute duplication in logging macros below once `tracing::Fmt` supports displaying `name`. +// See issue: https://github.com/tokio-rs/tracing/issues/2774 +#[macro_export] +macro_rules! otel_info { + (name: $name:expr $(,)?) => { + #[cfg(feature = "internal-logs")] + { + tracing::info!( name: $name, target: env!("CARGO_PKG_NAME"), name = $name, ""); + } + #[cfg(not(feature = "internal-logs"))] + { + let _ = $name; // Compiler will optimize this out as it's unused. + } + }; + (name: $name:expr, $($key:ident = $value:expr),+ $(,)?) => { + #[cfg(feature = "internal-logs")] + { + tracing::info!(name: $name, target: env!("CARGO_PKG_NAME"), name = $name, $($key = $value),+, ""); + } + #[cfg(not(feature = "internal-logs"))] + { + let _ = ($name, $($value),+); // Compiler will optimize this out as it's unused. + } + }; +} + +/// Macro for logging warning messages in OpenTelemetry. +/// +/// # Fields: +/// - `name`: The operation or action being logged. +/// - Additional optional key-value pairs can be passed as attributes. +/// +/// # Example: +/// ```rust +/// use opentelemetry::otel_warn; +/// otel_warn!(name: "export_warning", error_code = 404, version = "1.0.0"); +/// ``` +#[macro_export] +macro_rules! otel_warn { + (name: $name:expr $(,)?) => { + #[cfg(feature = "internal-logs")] + { + tracing::warn!(name: $name, target: env!("CARGO_PKG_NAME"), name = $name, ""); + } + #[cfg(not(feature = "internal-logs"))] + { + let _ = $name; // Compiler will optimize this out as it's unused. + } + }; + (name: $name:expr, $($key:ident = $value:expr),+ $(,)?) => { + #[cfg(feature = "internal-logs")] + { + tracing::warn!(name: $name, + target: env!("CARGO_PKG_NAME"), + name = $name, + $($key = { + $value + }),+, + "" + ) + } + #[cfg(not(feature = "internal-logs"))] + { + let _ = ($name, $($value),+); // Compiler will optimize this out as it's unused. + } + }; +} + +/// Macro for logging debug messages in OpenTelemetry. +/// +/// # Fields: +/// - `name`: The operation or action being logged. +/// - Additional optional key-value pairs can be passed as attributes. +/// +/// # Example: +/// ```rust +/// use opentelemetry::otel_debug; +/// otel_debug!(name: "debug_operation", debug_level = "high", version = "1.0.0"); +/// ``` +#[macro_export] +macro_rules! otel_debug { + (name: $name:expr $(,)?) => { + #[cfg(feature = "internal-logs")] + { + tracing::debug!(name: $name, target: env!("CARGO_PKG_NAME"), name = $name, ""); + } + #[cfg(not(feature = "internal-logs"))] + { + let _ = $name; // Compiler will optimize this out as it's unused. + } + }; + (name: $name:expr, $($key:ident = $value:expr),+ $(,)?) => { + #[cfg(feature = "internal-logs")] + { + tracing::debug!(name: $name, target: env!("CARGO_PKG_NAME"), name = $name, $($key = $value),+, ""); + } + #[cfg(not(feature = "internal-logs"))] + { + let _ = ($name, $($value),+); // Compiler will optimize this out as it's unused. + } + }; +} + +/// Macro for logging error messages in OpenTelemetry. +/// +/// # Fields: +/// - `name`: The operation or action being logged. +/// - Additional optional key-value pairs can be passed as attributes. +/// +/// # Example: +/// ```rust +/// use opentelemetry::otel_error; +/// otel_error!(name: "export_failure", error_code = 500, version = "1.0.0"); +/// ``` +#[macro_export] +macro_rules! otel_error { + (name: $name:expr $(,)?) => { + #[cfg(feature = "internal-logs")] + { + tracing::error!(name: $name, target: env!("CARGO_PKG_NAME"), name = $name, ""); + } + #[cfg(not(feature = "internal-logs"))] + { + let _ = $name; // Compiler will optimize this out as it's unused. + } + }; + (name: $name:expr, $($key:ident = $value:expr),+ $(,)?) => { + #[cfg(feature = "internal-logs")] + { + tracing::error!(name: $name, + target: env!("CARGO_PKG_NAME"), + name = $name, + $($key = { + $value + }),+, + "" + ) + } + #[cfg(not(feature = "internal-logs"))] + { + let _ = ($name, $($value),+); // Compiler will optimize this out as it's unused. + } + }; +} diff --git a/opentelemetry/src/global/metrics.rs b/opentelemetry/src/global/metrics.rs index 7826f9920e..457bc662a6 100644 --- a/opentelemetry/src/global/metrics.rs +++ b/opentelemetry/src/global/metrics.rs @@ -1,89 +1,16 @@ use crate::metrics::{self, Meter, MeterProvider}; -use crate::KeyValue; -use core::fmt; -use once_cell::sync::Lazy; -use std::{ - borrow::Cow, - sync::{Arc, RwLock}, -}; +use crate::{otel_error, otel_info, InstrumentationScope}; +use std::sync::{Arc, OnceLock, RwLock}; -/// The global `MeterProvider` singleton. -static GLOBAL_METER_PROVIDER: Lazy> = Lazy::new(|| { - RwLock::new(GlobalMeterProvider::new( - metrics::noop::NoopMeterProvider::new(), - )) -}); - -/// Allows a specific [MeterProvider] to be used generically by the -/// [GlobalMeterProvider] by mirroring the interface and boxing the return types. -trait ObjectSafeMeterProvider { - /// Creates a versioned named meter instance that is a trait object through the underlying - /// [MeterProvider]. - fn versioned_meter_cow( - &self, - name: Cow<'static, str>, - version: Option>, - schema_url: Option>, - attributes: Option>, - ) -> Meter; -} - -impl

ObjectSafeMeterProvider for P -where - P: MeterProvider, -{ - /// Return a versioned boxed tracer - fn versioned_meter_cow( - &self, - name: Cow<'static, str>, - version: Option>, - schema_url: Option>, - attributes: Option>, - ) -> Meter { - self.versioned_meter(name, version, schema_url, attributes) - } -} - -/// Represents the globally configured [`MeterProvider`] instance for this -/// application. -#[derive(Clone)] -pub struct GlobalMeterProvider { - provider: Arc, -} - -impl fmt::Debug for GlobalMeterProvider { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("GlobalMeterProvider").finish() - } -} +type GlobalMeterProvider = Arc; -impl MeterProvider for GlobalMeterProvider { - fn versioned_meter( - &self, - name: impl Into>, - version: Option>>, - schema_url: Option>>, - attributes: Option>, - ) -> Meter { - self.provider.versioned_meter_cow( - name.into(), - version.map(Into::into), - schema_url.map(Into::into), - attributes, - ) - } -} +/// The global `MeterProvider` singleton. +static GLOBAL_METER_PROVIDER: OnceLock> = OnceLock::new(); -impl GlobalMeterProvider { - /// Create a new global meter provider - fn new

(provider: P) -> Self - where - P: MeterProvider + Send + Sync + 'static, - { - GlobalMeterProvider { - provider: Arc::new(provider), - } - } +#[inline] +fn global_meter_provider() -> &'static RwLock { + GLOBAL_METER_PROVIDER + .get_or_init(|| RwLock::new(Arc::new(crate::metrics::noop::NoopMeterProvider::new()))) } /// Sets the given [`MeterProvider`] instance as the current global meter @@ -92,61 +19,55 @@ pub fn set_meter_provider

(new_provider: P) where P: metrics::MeterProvider + Send + Sync + 'static, { - let mut global_provider = GLOBAL_METER_PROVIDER - .write() - .expect("GLOBAL_METER_PROVIDER RwLock poisoned"); - *global_provider = GlobalMeterProvider::new(new_provider); + // Try to set the global meter provider. If the RwLock is poisoned, we'll log an error. + let mut global_provider = global_meter_provider().write(); + if let Ok(ref mut provider) = global_provider { + **provider = Arc::new(new_provider); + otel_info!(name: "MeterProvider.GlobalSet", message = "Global meter provider is set. Meters can now be created using global::meter() or global::meter_with_scope()."); + } else { + otel_error!(name: "MeterProvider.GlobalSetFailed", message = "Setting global meter provider failed. Meters created using global::meter() or global::meter_with_scope() will not function. Report this issue in OpenTelemetry repo."); + } } -/// Returns an instance of the currently configured global [`MeterProvider`] -/// through [`GlobalMeterProvider`]. +/// Returns an instance of the currently configured global [`MeterProvider`]. pub fn meter_provider() -> GlobalMeterProvider { - GLOBAL_METER_PROVIDER - .read() - .expect("GLOBAL_METER_PROVIDER RwLock poisoned") - .clone() + // Try to get the global meter provider. If the RwLock is poisoned, we'll log an error and return a NoopMeterProvider. + let global_provider = global_meter_provider().read(); + if let Ok(provider) = global_provider { + provider.clone() + } else { + otel_error!(name: "MeterProvider.GlobalGetFailed", message = "Getting global meter provider failed. Meters created using global::meter() or global::meter_with_scope() will not function. Report this issue in OpenTelemetry repo."); + Arc::new(crate::metrics::noop::NoopMeterProvider::new()) + } } -/// Creates a named [`Meter`] via the configured [`GlobalMeterProvider`]. -/// -/// If the name is an empty string, the provider will use a default name. +/// Creates a named [`Meter`] via the currently configured global [`MeterProvider`]. /// /// This is a more convenient way of expressing `global::meter_provider().meter(name)`. -pub fn meter(name: impl Into>) -> Meter { - meter_provider().meter(name.into()) +pub fn meter(name: &'static str) -> Meter { + meter_provider().meter(name) } -/// Creates a [`Meter`] with the name, version and schema url. -/// -/// - name SHOULD uniquely identify the instrumentation scope, such as the instrumentation library (e.g. io.opentelemetry.contrib.mongodb), package, module or class name. -/// - version specifies the version of the instrumentation scope if the scope has a version -/// - schema url specifies the Schema URL that should be recorded in the emitted telemetry. +/// Creates a [`Meter`] with the given instrumentation scope. /// -/// This is a convenient way of `global::meter_provider().versioned_meter(...)` +/// This is a simpler alternative to `global::meter_provider().meter_with_scope(...)` /// /// # Example /// /// ``` -/// use opentelemetry::global::meter_with_version; +/// use std::sync::Arc; +/// use opentelemetry::global::meter_with_scope; +/// use opentelemetry::InstrumentationScope; /// use opentelemetry::KeyValue; /// -/// let meter = meter_with_version( -/// "io.opentelemetry", -/// Some("0.17"), -/// Some("https://opentelemetry.io/schemas/1.2.0"), -/// Some(vec![KeyValue::new("key", "value")]), -/// ); +/// let scope = InstrumentationScope::builder("io.opentelemetry") +/// .with_version("0.17") +/// .with_schema_url("https://opentelemetry.io/schema/1.2.0") +/// .with_attributes(vec![(KeyValue::new("key", "value"))]) +/// .build(); +/// +/// let meter = meter_with_scope(scope); /// ``` -pub fn meter_with_version( - name: impl Into>, - version: Option>>, - schema_url: Option>>, - attributes: Option>, -) -> Meter { - meter_provider().versioned_meter( - name.into(), - version.map(Into::into), - schema_url.map(Into::into), - attributes, - ) +pub fn meter_with_scope(scope: InstrumentationScope) -> Meter { + meter_provider().meter_with_scope(scope) } diff --git a/opentelemetry/src/global/mod.rs b/opentelemetry/src/global/mod.rs index a1e7b1da72..182364a18c 100644 --- a/opentelemetry/src/global/mod.rs +++ b/opentelemetry/src/global/mod.rs @@ -50,16 +50,21 @@ //! ``` //! # #[cfg(feature="trace")] //! # { -//! use opentelemetry::trace::{Tracer, TracerProvider}; +//! use std::sync::Arc; +//! use opentelemetry::trace::Tracer; //! use opentelemetry::global; +//! use opentelemetry::InstrumentationScope; //! //! pub fn my_traced_library_function() { //! // End users of your library will configure their global tracer provider //! // so you can use the global tracer without any setup -//! let tracer = global::tracer_provider().tracer_builder("my-library-name"). -//! with_version(env!("CARGO_PKG_VERSION")). -//! with_schema_url("https://opentelemetry.io/schemas/1.17.0"). -//! build(); +//! +//! let scope = InstrumentationScope::builder("my_library-name") +//! .with_version(env!("CARGO_PKG_VERSION")) +//! .with_schema_url("https://opentelemetry.io/schemas/1.17.0") +//! .build(); +//! +//! let tracer = global::tracer_with_scope(scope); //! //! tracer.in_span("doing_library_work", |cx| { //! // Traced library logic here... @@ -96,7 +101,7 @@ //! let meter = global::meter("my-component"); //! // It is recommended to reuse the same counter instance for the //! // lifetime of the application -//! let counter = meter.u64_counter("my_counter").init(); +//! let counter = meter.u64_counter("my_counter").build(); //! //! // record measurements //! counter.add(1, &[KeyValue::new("mykey", "myvalue")]); @@ -125,7 +130,7 @@ //! [`MeterProvider`]: crate::metrics::MeterProvider //! [`set_meter_provider`]: crate::global::set_meter_provider -mod error_handler; +mod internal_logging; #[cfg(feature = "metrics")] mod metrics; #[cfg(feature = "trace")] @@ -133,7 +138,6 @@ mod propagation; #[cfg(feature = "trace")] mod trace; -pub use error_handler::{handle_error, set_error_handler, Error}; #[cfg(feature = "metrics")] #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] pub use metrics::*; diff --git a/opentelemetry/src/global/propagation.rs b/opentelemetry/src/global/propagation.rs index 30d5b8f86b..4a8a296820 100644 --- a/opentelemetry/src/global/propagation.rs +++ b/opentelemetry/src/global/propagation.rs @@ -1,19 +1,29 @@ use crate::propagation::TextMapPropagator; use crate::trace::noop::NoopTextMapPropagator; -use once_cell::sync::Lazy; -use std::sync::RwLock; +use std::sync::{OnceLock, RwLock}; /// The current global `TextMapPropagator` propagator. -static GLOBAL_TEXT_MAP_PROPAGATOR: Lazy>> = - Lazy::new(|| RwLock::new(Box::new(NoopTextMapPropagator::new()))); +static GLOBAL_TEXT_MAP_PROPAGATOR: OnceLock>> = + OnceLock::new(); /// The global default `TextMapPropagator` propagator. -static DEFAULT_TEXT_MAP_PROPAGATOR: Lazy = - Lazy::new(NoopTextMapPropagator::new); +static DEFAULT_TEXT_MAP_PROPAGATOR: OnceLock = OnceLock::new(); + +/// Ensures the `GLOBAL_TEXT_MAP_PROPAGATOR` is initialized with a `NoopTextMapPropagator`. +#[inline] +fn global_text_map_propagator() -> &'static RwLock> { + GLOBAL_TEXT_MAP_PROPAGATOR.get_or_init(|| RwLock::new(Box::new(NoopTextMapPropagator::new()))) +} + +/// Ensures the `DEFAULT_TEXT_MAP_PROPAGATOR` is initialized. +#[inline] +fn default_text_map_propagator() -> &'static NoopTextMapPropagator { + DEFAULT_TEXT_MAP_PROPAGATOR.get_or_init(NoopTextMapPropagator::new) +} /// Sets the given [`TextMapPropagator`] propagator as the current global propagator. pub fn set_text_map_propagator(propagator: P) { - let _lock = GLOBAL_TEXT_MAP_PROPAGATOR + let _lock = global_text_map_propagator() .write() .map(|mut global_propagator| *global_propagator = Box::new(propagator)); } @@ -23,8 +33,11 @@ pub fn get_text_map_propagator(mut f: F) -> T where F: FnMut(&dyn TextMapPropagator) -> T, { - GLOBAL_TEXT_MAP_PROPAGATOR + global_text_map_propagator() .read() .map(|propagator| f(&**propagator)) - .unwrap_or_else(|_| f(&*DEFAULT_TEXT_MAP_PROPAGATOR as &dyn TextMapPropagator)) + .unwrap_or_else(|_| { + let default_propagator = default_text_map_propagator(); + f(default_propagator as &dyn TextMapPropagator) + }) } diff --git a/opentelemetry/src/global/trace.rs b/opentelemetry/src/global/trace.rs index d3058dc533..8121e4fd9a 100644 --- a/opentelemetry/src/global/trace.rs +++ b/opentelemetry/src/global/trace.rs @@ -1,11 +1,10 @@ use crate::trace::{noop::NoopTracerProvider, SpanContext, Status}; -use crate::InstrumentationLibrary; +use crate::InstrumentationScope; use crate::{trace, trace::TracerProvider, Context, KeyValue}; -use once_cell::sync::Lazy; use std::borrow::Cow; use std::fmt; use std::mem; -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, OnceLock, RwLock}; use std::time::SystemTime; /// Allows a specific [`crate::trace::Span`] to be used generically by [`BoxedSpan`] @@ -305,10 +304,7 @@ where pub trait ObjectSafeTracerProvider { /// Creates a versioned named tracer instance that is a trait object through the underlying /// `TracerProvider`. - fn boxed_tracer( - &self, - library: Arc, - ) -> Box; + fn boxed_tracer(&self, scope: InstrumentationScope) -> Box; } impl ObjectSafeTracerProvider for P @@ -318,11 +314,8 @@ where P: trace::TracerProvider, { /// Return a versioned boxed tracer - fn boxed_tracer( - &self, - library: Arc, - ) -> Box { - Box::new(self.library_tracer(library)) + fn boxed_tracer(&self, scope: InstrumentationScope) -> Box { + Box::new(self.tracer_with_scope(scope)) } } @@ -360,17 +353,19 @@ impl trace::TracerProvider for GlobalTracerProvider { type Tracer = BoxedTracer; /// Create a tracer using the global provider. - fn library_tracer(&self, library: Arc) -> Self::Tracer { - BoxedTracer(self.provider.boxed_tracer(library)) + fn tracer_with_scope(&self, scope: InstrumentationScope) -> Self::Tracer { + BoxedTracer(self.provider.boxed_tracer(scope)) } } /// The global `Tracer` provider singleton. -static GLOBAL_TRACER_PROVIDER: Lazy> = Lazy::new(|| { - RwLock::new(GlobalTracerProvider::new( - trace::noop::NoopTracerProvider::new(), - )) -}); +static GLOBAL_TRACER_PROVIDER: OnceLock> = OnceLock::new(); + +#[inline] +fn global_tracer_provider() -> &'static RwLock { + GLOBAL_TRACER_PROVIDER + .get_or_init(|| RwLock::new(GlobalTracerProvider::new(NoopTracerProvider::new()))) +} /// Returns an instance of the currently configured global [`TracerProvider`] through /// [`GlobalTracerProvider`]. @@ -378,7 +373,7 @@ static GLOBAL_TRACER_PROVIDER: Lazy> = Lazy::new(|| /// [`TracerProvider`]: crate::trace::TracerProvider /// [`GlobalTracerProvider`]: crate::global::GlobalTracerProvider pub fn tracer_provider() -> GlobalTracerProvider { - GLOBAL_TRACER_PROVIDER + global_tracer_provider() .read() .expect("GLOBAL_TRACER_PROVIDER RwLock poisoned") .clone() @@ -395,6 +390,33 @@ pub fn tracer(name: impl Into>) -> BoxedTracer { tracer_provider().tracer(name.into()) } +/// Creates a [`Tracer`] with the given instrumentation scope +/// via the configured [`GlobalTracerProvider`]. +/// +/// This is a simpler alternative to `global::tracer_provider().tracer_with_scope(...)` +/// +/// # Example +/// +/// ``` +/// use std::sync::Arc; +/// use opentelemetry::global::tracer_with_scope; +/// use opentelemetry::InstrumentationScope; +/// use opentelemetry::KeyValue; +/// +/// let scope = InstrumentationScope::builder("io.opentelemetry") +/// .with_version("0.17") +/// .with_schema_url("https://opentelemetry.io/schema/1.2.0") +/// .with_attributes(vec![(KeyValue::new("key", "value"))]) +/// .build(); +/// +/// let tracer = tracer_with_scope(scope); +/// ``` +/// +/// [`Tracer`]: crate::trace::Tracer +pub fn tracer_with_scope(scope: InstrumentationScope) -> BoxedTracer { + tracer_provider().tracer_with_scope(scope) +} + /// Sets the given [`TracerProvider`] instance as the current global provider. /// /// It returns the [`TracerProvider`] instance that was previously mounted as global provider @@ -407,7 +429,7 @@ where T: trace::Tracer + Send + Sync + 'static, P: trace::TracerProvider + Send + Sync + 'static, { - let mut tracer_provider = GLOBAL_TRACER_PROVIDER + let mut tracer_provider = global_tracer_provider() .write() .expect("GLOBAL_TRACER_PROVIDER RwLock poisoned"); mem::replace( @@ -415,16 +437,3 @@ where GlobalTracerProvider::new(new_provider), ) } - -/// Shut down the current tracer provider. This will invoke the shutdown method on all span processors. -/// span processors should export remaining spans before return -pub fn shutdown_tracer_provider() { - let mut tracer_provider = GLOBAL_TRACER_PROVIDER - .write() - .expect("GLOBAL_TRACER_PROVIDER RwLock poisoned"); - - let _ = mem::replace( - &mut *tracer_provider, - GlobalTracerProvider::new(NoopTracerProvider::new()), - ); -} diff --git a/opentelemetry/src/lib.rs b/opentelemetry/src/lib.rs index 6e42f506de..10f8facef7 100644 --- a/opentelemetry/src/lib.rs +++ b/opentelemetry/src/lib.rs @@ -1,31 +1,11 @@ //! Implements the [`API`] component of [OpenTelemetry]. //! -//! *Compiler support: [requires `rustc` 1.64+][msrv]* +//! *[Supported Rust Versions](#supported-rust-versions)* //! //! [`API`]: https://opentelemetry.io/docs/specs/otel/overview/#api //! [OpenTelemetry]: https://opentelemetry.io/docs/what-is-opentelemetry/ -//! [msrv]: #supported-rust-versions //! -//! # Getting Started -//! -//! ```no_run -//! # #[cfg(feature = "trace")] -//! # { -//! use opentelemetry::{global, trace::{TraceContextExt, Tracer}, Context }; -//! -//! fn do_something() { -//! let tracer = global::tracer("my_component"); -//! let _guard = Context::current_with_span(tracer.start("my_span")).attach(); -//! // do work tracked by the now current span -//! } -//! # } -//! ``` -//! -//! See the [examples] directory for different integration patterns. -//! -//! [examples]: https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples -//! -//! # Traces +//! # Getting Started with Traces //! //! The [`trace`] module includes types for tracking the progression of a single //! request while it is handled by services that make up an application. A trace @@ -33,8 +13,6 @@ //! by individual services or components involved in a request as it flows //! through a system. //! -//! ### Creating and exporting spans -//! //! ``` //! # #[cfg(feature = "trace")] //! # { @@ -56,40 +34,111 @@ //! # } //! ``` //! +//! See the [examples](https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples) directory for different integration patterns. +//! //! See the [`trace`] module docs for more information on creating and managing //! spans. //! //! [`Span`]: crate::trace::Span //! -//! # Metrics -//! -//! -//! The [`metrics`] module includes types for recording measurements about a -//! service at runtime. -//! -//! ### Creating instruments and recording measurements -//! +//! # Getting Started with Metrics +//! +//! The [`metrics`] module provides types for recording measurements about a +//! service at runtime. Below are the key steps to report measurements using +//! OpenTelemetry Metrics: +//! +//! 1. **Obtain a Meter:** Get a `Meter` from a `MeterProvider`. +//! 2. **Create Instruments:** Use the `Meter` to create one or more instruments +//! (e.g., counters, histograms). +//! 3. **Record Measurements:** Use the instruments to record measurement values +//! along with optional attributes. +//! +//! ## How Metrics work in OpenTelemetry +//! In OpenTelemetry, raw measurements recorded using instruments are +//! **aggregated in memory** to form metrics. These aggregated metrics are +//! periodically exported by the [`opentelemetry_sdk`] at fixed intervals (e.g., +//! every 60 seconds) via exporters such as [`opentelemetry-stdout`] or +//! [`opentelemetry-otlp`]. This reduces reporting overhead while ensuring +//! up-to-date data. The aggregation strategy and export interval can be +//! customized in the [`opentelemetry_sdk`] based on your use case. +//! +//! ## Choosing the Right Instrument +//! Selecting the correct instrument is critical for accurately representing +//! your metrics data: +//! +//! - Use **Counters** for values that only increase, such as the number of +//! requests served or errors encountered. +//! - Use **UpDownCounters** for values that can increase or decrease, such as +//! the number of active connections, number of items in a queue etc. +//! - **Gauges:** Use for values that can go up or down and represent the +//! current state, such as CPU usage, temperature etc. +//! - Use **Histograms** for measuring the distribution of a value, such as +//! response times or payload sizes. +//! +//! ### Observable Instruments +//! +//! Counters, UpDownCounters, and Gauges have Observable variants that allow +//! values to be reported through a callback function. Observable instruments +//! are ideal when the metric value is managed elsewhere and needs to be +//! observed by OpenTelemetry instrumentation. The callbacks are automatically +//! invoked by the OpenTelemetry SDK before every export (e.g., every 60 +//! seconds). +//! +//! For example: +//! - An **ObservableCounter** can monitor the number of page faults in a +//! process as reported by the operating system. +//! - An **ObservableUpDownCounter** can monitor the size of an in-memory queue +//! by reporting the size using queue's len() method within the callback +//! function. +//! - An **ObservableGauge** can monitor the CPU temperature by using +//! temperature sensor APIs within the callback function. +//! +//! For detailed guidance, refer to [OpenTelemetry Metrics API - Instrumentation +//! Guidance](https://opentelemetry.io/docs/specs/otel/metrics/supplementary-guidelines/#instrument-selection). +//! +//! ## Best Practices +//! - **Re-use Instruments:** Instruments are designed for +//! reuse. Avoid creating new instruments repeatedly. +//! - **Clone for Sharing:** If the same instrument needs to be used across +//! multiple parts of your code, you can safely clone it to share. +//! +//! ## Example Usage //! ``` -//! # #[cfg(feature = "metrics")] -//! # { //! use opentelemetry::{global, KeyValue}; //! -//! // get a meter from a provider +//! // Get a meter from a provider. //! let meter = global::meter("my_service"); //! -//! // create an instrument -//! let counter = meter.u64_counter("my_counter").init(); +//! // Create an instrument (in this case, a Counter). +//! let counter = meter.u64_counter("request.count").build(); //! -//! // record a measurement +//! // Record a measurement by passing the value and a set of attributes. //! counter.add(1, &[KeyValue::new("http.client_ip", "83.164.160.102")]); -//! # } +//! +//! // Create an ObservableCounter and register a callback that reports the measurement. +//! let _observable_counter = meter +//! .u64_observable_counter("bytes_received") +//! .with_callback(|observer| { +//! observer.observe( +//! 100, +//! &[ +//! KeyValue::new("protocol", "udp"), +//! ], +//! ) +//! }) +//! .build(); //! ``` //! +//! See the +//! [examples](https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples/metrics-basic) +//! directory that show a runnable example with all type of instruments. +//! +//! //! See the [`metrics`] module docs for more information on creating and //! managing instruments. //! //! -//! # Logs +//! # Getting Started with Logs //! //! The [`logs`] module contains the Logs Bridge API. It is not intended to be //! called by application developers directly. It is provided for logging @@ -103,23 +152,24 @@ //! [`opentelemetry-appender-tracing`](https://crates.io/crates/opentelemetry-appender-tracing) //! crates. //! -//! ## Crate Feature Flags +//! # Crate Feature Flags //! //! The following core crate feature flags are available: //! //! * `trace`: Includes the trace API. //! * `metrics`: Includes the metrics API. //! * `logs`: Includes the logs bridge API. +//! * `internal-logs`: Includes internal logging for the OpenTelemetry library via `tracing`. //! -//! The default feature flags are ["trace", "metrics", "logs"] +//! The default feature flags are ["trace", "metrics", "logs", "internal-logs"]. //! //! The following feature flags provides additional configuration for `logs`: -//! * `logs_level_enabled`: Allow users to control the log level +//! * `spec_unstable_logs_enabled`: Allow users to control the log level //! //! The following feature flags enable APIs defined in OpenTelemetry specification that is in experimental phase: -//! * `otel_unstable`: Includes unstable APIs. +//! * `otel_unstable`: Includes unstable APIs. There are no features behind this flag at the moment. //! -//! ## Related Crates +//! # Related Crates //! //! In addition to `opentelemetry`, the [`open-telemetry/opentelemetry-rust`] //! repository contains several additional crates designed to be used with the @@ -134,6 +184,8 @@ //! trace information from [`http`] headers. //! - [`opentelemetry-otlp`] exporter for sending telemetry in the //! OTLP format. +//! - [`opentelemetry-stdout`] provides ability to output telemetry to stdout, +//! primarily used for learning/debugging purposes. //! - [`opentelemetry-prometheus`] provides a pipeline and exporter for sending //! metrics information to [`Prometheus`]. //! - [`opentelemetry-zipkin`] provides a pipeline and exporter for sending @@ -149,6 +201,7 @@ //! [`http`]: https://crates.io/crates/http //! [`open-telemetry/opentelemetry-rust`]: https://github.com/open-telemetry/opentelemetry-rust //! [`opentelemetry_sdk`]: https://crates.io/crates/opentelemetry_sdk +//! [`opentelemetry-stdout`]: https://crates.io/crates/opentelemetry_stdout //! [`opentelemetry-http`]: https://crates.io/crates/opentelemetry-http //! [`opentelemetry-otlp`]: https://crates.io/crates/opentelemetry-otlp //! [`opentelemetry-prometheus`]: https://crates.io/crates/opentelemetry-prometheus @@ -156,10 +209,10 @@ //! [`Prometheus`]: https://prometheus.io //! [`Zipkin`]: https://zipkin.io //! -//! ## Supported Rust Versions +//! # Supported Rust Versions //! //! OpenTelemetry is built against the latest stable release. The minimum -//! supported version is 1.64. The current OpenTelemetry version is not +//! supported version is 1.70. The current OpenTelemetry version is not //! guaranteed to build on Rust versions earlier than the minimum supported //! version. //! @@ -204,14 +257,15 @@ mod common; pub mod testing; pub use common::{ - Array, ExportError, InstrumentationLibrary, InstrumentationLibraryBuilder, Key, KeyValue, - StringValue, Value, + Array, InstrumentationScope, InstrumentationScopeBuilder, Key, KeyValue, StringValue, Value, }; #[cfg(feature = "metrics")] #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] pub mod metrics; +#[cfg(feature = "trace")] +#[cfg_attr(docsrs, doc(cfg(feature = "trace")))] pub mod propagation; #[cfg(feature = "trace")] diff --git a/opentelemetry/src/logs/logger.rs b/opentelemetry/src/logs/logger.rs index fd4e18e043..ae04946eec 100644 --- a/opentelemetry/src/logs/logger.rs +++ b/opentelemetry/src/logs/logger.rs @@ -1,12 +1,11 @@ -use std::{borrow::Cow, sync::Arc}; +use std::borrow::Cow; -use crate::{logs::LogRecord, InstrumentationLibrary, InstrumentationLibraryBuilder, KeyValue}; +use crate::{logs::LogRecord, InstrumentationScope}; -#[cfg(feature = "logs_level_enabled")] +#[cfg(feature = "spec_unstable_logs_enabled")] use super::Severity; /// The interface for emitting [`LogRecord`]s. - pub trait Logger { /// Specifies the `LogRecord` type associated with this logger. type LogRecord: LogRecord; @@ -20,7 +19,7 @@ pub trait Logger { /// [`Context`]: crate::Context fn emit(&self, record: Self::LogRecord); - #[cfg(feature = "logs_level_enabled")] + #[cfg(feature = "spec_unstable_logs_enabled")] /// Check if the given log level is enabled. fn event_enabled(&self, level: Severity, target: &str) -> bool; } @@ -30,128 +29,36 @@ pub trait LoggerProvider { /// The [`Logger`] type that this provider will return. type Logger: Logger; - /// Deprecated, use [`LoggerProvider::logger_builder()`] - /// - /// Returns a new versioned logger with a given name. - /// - /// The `name` should be the application name or the name of the library - /// providing instrumentation. If the name is empty, then an - /// implementation-defined default name may be used instead. - /// Create a new versioned `Logger` instance. - #[deprecated(since = "0.23.0", note = "Please use logger_builder() instead")] - fn versioned_logger( - &self, - name: impl Into>, - version: Option>, - schema_url: Option>, - attributes: Option>, - ) -> Self::Logger { - let mut builder = self.logger_builder(name); - if let Some(v) = version { - builder = builder.with_version(v); - } - if let Some(s) = schema_url { - builder = builder.with_schema_url(s); - } - if let Some(a) = attributes { - builder = builder.with_attributes(a); - } - builder.build() - } - - /// Returns a new builder for creating a [`Logger`] instance - /// - /// The `name` should be the application name or the name of the library - /// providing instrumentation. If the name is empty, then an - /// implementation-defined default name may be used instead. + /// Returns a new logger with the given instrumentation scope. /// /// # Examples /// /// ``` - /// use opentelemetry::InstrumentationLibrary; - /// use crate::opentelemetry::logs::LoggerProvider; + /// use opentelemetry::InstrumentationScope; + /// use opentelemetry::logs::LoggerProvider; /// use opentelemetry_sdk::logs::LoggerProvider as SdkLoggerProvider; /// /// let provider = SdkLoggerProvider::builder().build(); /// /// // logger used in applications/binaries - /// let logger = provider.logger_builder("my_app").build(); + /// let logger = provider.logger("my_app"); /// /// // logger used in libraries/crates that optionally includes version and schema url - /// let logger = provider.logger_builder("my_library") + /// let scope = InstrumentationScope::builder(env!("CARGO_PKG_NAME")) /// .with_version(env!("CARGO_PKG_VERSION")) /// .with_schema_url("https://opentelemetry.io/schema/1.0.0") /// .build(); - /// ``` - fn logger_builder(&self, name: impl Into>) -> LoggerBuilder<'_, Self> { - LoggerBuilder { - provider: self, - library_builder: InstrumentationLibrary::builder(name), - } - } - - /// Returns a new versioned logger with the given instrumentation library. - /// - /// # Examples - /// - /// ``` - /// use opentelemetry::InstrumentationLibrary; - /// use crate::opentelemetry::logs::LoggerProvider; - /// use opentelemetry_sdk::logs::LoggerProvider as SdkLoggerProvider; /// - /// let provider = SdkLoggerProvider::builder().build(); - /// - /// // logger used in applications/binaries - /// let logger = provider.logger("my_app"); - /// - /// // logger used in libraries/crates that optionally includes version and schema url - /// let library = std::sync::Arc::new( - /// InstrumentationLibrary::builder(env!("CARGO_PKG_NAME")) - /// .with_version(env!("CARGO_PKG_VERSION")) - /// .with_schema_url("https://opentelemetry.io/schema/1.0.0") - /// .build(), - /// ); - /// let logger = provider.library_logger(library); + /// let logger = provider.logger_with_scope(scope); /// ``` - fn library_logger(&self, library: Arc) -> Self::Logger; + fn logger_with_scope(&self, scope: InstrumentationScope) -> Self::Logger; /// Returns a new logger with the given name. /// /// The `name` should be the application name or the name of the library - /// providing instrumentation. If the name is empty, then an - /// implementation-defined default name may be used instead. + /// providing instrumentation. fn logger(&self, name: impl Into>) -> Self::Logger { - self.logger_builder(name).build() - } -} - -#[derive(Debug)] -pub struct LoggerBuilder<'a, T: LoggerProvider + ?Sized> { - provider: &'a T, - library_builder: InstrumentationLibraryBuilder, -} - -impl<'a, T: LoggerProvider + ?Sized> LoggerBuilder<'a, T> { - pub fn with_version(mut self, version: impl Into>) -> Self { - self.library_builder = self.library_builder.with_version(version); - self - } - - pub fn with_schema_url(mut self, schema_url: impl Into>) -> Self { - self.library_builder = self.library_builder.with_schema_url(schema_url); - self - } - - pub fn with_attributes(mut self, attributes: I) -> Self - where - I: IntoIterator, - { - self.library_builder = self.library_builder.with_attributes(attributes); - self - } - - pub fn build(self) -> T::Logger { - self.provider - .library_logger(Arc::new(self.library_builder.build())) + let scope = InstrumentationScope::builder(name).build(); + self.logger_with_scope(scope) } } diff --git a/opentelemetry/src/logs/mod.rs b/opentelemetry/src/logs/mod.rs index f0bbe0d660..e57684bc0f 100644 --- a/opentelemetry/src/logs/mod.rs +++ b/opentelemetry/src/logs/mod.rs @@ -2,11 +2,6 @@ /// This API is not intended to be called by application developers directly. /// It is provided for logging library authors to build log appenders, that /// bridges existing logging systems with OpenTelemetry. -use crate::ExportError; - -use std::{sync::PoisonError, time::Duration}; -use thiserror::Error; - mod logger; mod noop; mod record; @@ -14,54 +9,3 @@ mod record; pub use logger::{Logger, LoggerProvider}; pub use noop::NoopLoggerProvider; pub use record::{AnyValue, LogRecord, Severity}; - -/// Describe the result of operations in log SDK. -pub type LogResult = Result; - -#[derive(Error, Debug)] -#[non_exhaustive] -/// Errors returned by the log SDK. -pub enum LogError { - /// Export failed with the error returned by the exporter. - #[error("Exporter {} encountered the following errors: {0}", .0.exporter_name())] - ExportFailed(Box), - - /// Export failed to finish after certain period and processor stopped the export. - #[error("Exporter timed out after {} seconds", .0.as_secs())] - ExportTimedOut(Duration), - - /// Other errors propagated from log SDK that weren't covered above. - #[error(transparent)] - Other(#[from] Box), -} - -impl From for LogError -where - T: ExportError, -{ - fn from(err: T) -> Self { - LogError::ExportFailed(Box::new(err)) - } -} - -impl From for LogError { - fn from(err_msg: String) -> Self { - LogError::Other(Box::new(Custom(err_msg))) - } -} - -impl From<&'static str> for LogError { - fn from(err_msg: &'static str) -> Self { - LogError::Other(Box::new(Custom(err_msg.into()))) - } -} - -impl From> for LogError { - fn from(err: PoisonError) -> Self { - LogError::Other(err.to_string().into()) - } -} -/// Wrap type for string -#[derive(Error, Debug)] -#[error("{0}")] -struct Custom(String); diff --git a/opentelemetry/src/logs/noop.rs b/opentelemetry/src/logs/noop.rs index 8c31328e5d..a9706fa5a4 100644 --- a/opentelemetry/src/logs/noop.rs +++ b/opentelemetry/src/logs/noop.rs @@ -1,8 +1,8 @@ -use std::{borrow::Cow, sync::Arc, time::SystemTime}; +use std::{borrow::Cow, time::SystemTime}; use crate::{ logs::{AnyValue, LogRecord, Logger, LoggerProvider, Severity}, - InstrumentationLibrary, Key, KeyValue, + InstrumentationScope, Key, }; /// A no-op implementation of a [`LoggerProvider`]. @@ -19,17 +19,7 @@ impl NoopLoggerProvider { impl LoggerProvider for NoopLoggerProvider { type Logger = NoopLogger; - fn library_logger(&self, _library: Arc) -> Self::Logger { - NoopLogger(()) - } - - fn versioned_logger( - &self, - _name: impl Into>, - _version: Option>, - _schema_url: Option>, - _attributes: Option>, - ) -> Self::Logger { + fn logger_with_scope(&self, _scope: InstrumentationScope) -> Self::Logger { NoopLogger(()) } } @@ -88,7 +78,7 @@ impl Logger for NoopLogger { NoopLogRecord {} } fn emit(&self, _record: Self::LogRecord) {} - #[cfg(feature = "logs_level_enabled")] + #[cfg(feature = "spec_unstable_logs_enabled")] fn event_enabled(&self, _level: super::Severity, _target: &str) -> bool { false } diff --git a/opentelemetry/src/logs/record.rs b/opentelemetry/src/logs/record.rs index 2e171ef0a1..d53caa7cc1 100644 --- a/opentelemetry/src/logs/record.rs +++ b/opentelemetry/src/logs/record.rs @@ -1,4 +1,8 @@ -use crate::{Array, Key, StringValue, Value}; +use crate::{Key, StringValue}; + +#[cfg(feature = "trace")] +use crate::trace::{SpanId, TraceFlags, TraceId}; + use std::{borrow::Cow, collections::HashMap, time::SystemTime}; /// SDK implemented trait for managing log records @@ -41,10 +45,32 @@ pub trait LogRecord { where K: Into, V: Into; + + /// Sets the trace context of the log. + #[cfg(feature = "trace")] + fn set_trace_context( + &mut self, + trace_id: TraceId, + span_id: SpanId, + trace_flags: Option, + ) { + let _ = trace_id; + let _ = span_id; + let _ = trace_flags; + } } /// Value types for representing arbitrary values in a log record. +/// Note: The `tracing` and `log` crates only support basic types that can be +/// converted to these core variants: `i64`, `f64`, `StringValue`, and `bool`. +/// Any complex and custom types are supported through their Debug implementation, +/// and converted to String. More complex types (`Bytes`, `ListAny`, and `Map`) are +/// included here to meet specification requirements and are available to support +/// custom appenders that may be implemented for other logging crates. +/// These types allow for handling dynamic data structures, so keep in mind the +/// potential performance overhead of using boxed vectors and maps in appenders. #[derive(Debug, Clone, PartialEq)] +#[non_exhaustive] pub enum AnyValue { /// An integer value Int(i64), @@ -108,23 +134,6 @@ impl, V: Into> FromIterator<(K, V)> for AnyValue { } } -impl From for AnyValue { - fn from(value: Value) -> Self { - match value { - Value::Bool(b) => b.into(), - Value::I64(i) => i.into(), - Value::F64(f) => f.into(), - Value::String(s) => s.into(), - Value::Array(a) => match a { - Array::Bool(b) => AnyValue::from_iter(b), - Array::F64(f) => AnyValue::from_iter(f), - Array::I64(i) => AnyValue::from_iter(i), - Array::String(s) => AnyValue::from_iter(s), - }, - } - } -} - /// A normalized severity value. #[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd)] pub enum Severity { diff --git a/opentelemetry/src/metrics/instruments/counter.rs b/opentelemetry/src/metrics/instruments/counter.rs index 3972580628..8d72657686 100644 --- a/opentelemetry/src/metrics/instruments/counter.rs +++ b/opentelemetry/src/metrics/instruments/counter.rs @@ -1,20 +1,17 @@ -use crate::{ - metrics::{AsyncInstrument, AsyncInstrumentBuilder, InstrumentBuilder, MetricsError}, - KeyValue, -}; +use crate::KeyValue; use core::fmt; -use std::any::Any; use std::sync::Arc; -/// An SDK implemented instrument that records increasing values. -pub trait SyncCounter { - /// Records an increment to the counter. - fn add(&self, value: T, attributes: &[KeyValue]); -} +use super::SyncInstrument; /// An instrument that records increasing values. +/// +/// [`Counter`] can be cloned to create multiple handles to the same instrument. If a [`Counter`] needs to be shared, +/// users are recommended to clone the [`Counter`] instead of creating duplicate [`Counter`]s for the same metric. Creating +/// duplicate [`Counter`]s for the same metric could lower SDK performance. #[derive(Clone)] -pub struct Counter(Arc + Send + Sync>); +#[non_exhaustive] +pub struct Counter(Arc + Send + Sync>); impl fmt::Debug for Counter where @@ -27,44 +24,30 @@ where impl Counter { /// Create a new counter. - pub fn new(inner: Arc + Send + Sync>) -> Self { + pub fn new(inner: Arc + Send + Sync>) -> Self { Counter(inner) } /// Records an increment to the counter. pub fn add(&self, value: T, attributes: &[KeyValue]) { - self.0.add(value, attributes) - } -} - -impl TryFrom>> for Counter { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, Counter>) -> Result { - builder - .instrument_provider - .u64_counter(builder.name, builder.description, builder.unit) - } -} - -impl TryFrom>> for Counter { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, Counter>) -> Result { - builder - .instrument_provider - .f64_counter(builder.name, builder.description, builder.unit) + self.0.measure(value, attributes) } } /// An async instrument that records increasing values. #[derive(Clone)] -pub struct ObservableCounter(Arc>); +#[non_exhaustive] +pub struct ObservableCounter { + _marker: std::marker::PhantomData, +} impl ObservableCounter { /// Create a new observable counter. - pub fn new(inner: Arc>) -> Self { - ObservableCounter(inner) + #[allow(clippy::new_without_default)] + pub fn new() -> Self { + ObservableCounter { + _marker: std::marker::PhantomData, + } } } @@ -76,59 +59,3 @@ impl fmt::Debug for ObservableCounter { )) } } - -impl ObservableCounter { - /// Records an increment to the counter. - /// - /// It is only valid to call this within a callback. If called outside of the - /// registered callback it should have no effect on the instrument, and an - /// error will be reported via the error handler. - pub fn observe(&self, value: T, attributes: &[KeyValue]) { - self.0.observe(value, attributes) - } - - /// Used for SDKs to downcast instruments in callbacks. - pub fn as_any(&self) -> Arc { - self.0.as_any() - } -} - -impl AsyncInstrument for ObservableCounter { - fn observe(&self, measurement: T, attributes: &[KeyValue]) { - self.0.observe(measurement, attributes) - } - - fn as_any(&self) -> Arc { - self.0.as_any() - } -} - -impl TryFrom, u64>> for ObservableCounter { - type Error = MetricsError; - - fn try_from( - builder: AsyncInstrumentBuilder<'_, ObservableCounter, u64>, - ) -> Result { - builder.meter.instrument_provider.u64_observable_counter( - builder.name, - builder.description, - builder.unit, - builder.callbacks, - ) - } -} - -impl TryFrom, f64>> for ObservableCounter { - type Error = MetricsError; - - fn try_from( - builder: AsyncInstrumentBuilder<'_, ObservableCounter, f64>, - ) -> Result { - builder.meter.instrument_provider.f64_observable_counter( - builder.name, - builder.description, - builder.unit, - builder.callbacks, - ) - } -} diff --git a/opentelemetry/src/metrics/instruments/gauge.rs b/opentelemetry/src/metrics/instruments/gauge.rs index 274134aaba..0b2bb4d82c 100644 --- a/opentelemetry/src/metrics/instruments/gauge.rs +++ b/opentelemetry/src/metrics/instruments/gauge.rs @@ -1,20 +1,17 @@ -use crate::{ - metrics::{AsyncInstrument, AsyncInstrumentBuilder, InstrumentBuilder, MetricsError}, - KeyValue, -}; +use crate::KeyValue; use core::fmt; -use std::any::Any; use std::sync::Arc; -/// An SDK implemented instrument that records independent values -pub trait SyncGauge { - /// Records an independent value. - fn record(&self, value: T, attributes: &[KeyValue]); -} +use super::SyncInstrument; /// An instrument that records independent values +/// +/// [`Gauge`] can be cloned to create multiple handles to the same instrument. If a [`Gauge`] needs to be shared, +/// users are recommended to clone the [`Gauge`] instead of creating duplicate [`Gauge`]s for the same metric. Creating +/// duplicate [`Gauge`]s for the same metric could lower SDK performance. #[derive(Clone)] -pub struct Gauge(Arc + Send + Sync>); +#[non_exhaustive] +pub struct Gauge(Arc + Send + Sync>); impl fmt::Debug for Gauge where @@ -27,49 +24,22 @@ where impl Gauge { /// Create a new gauge. - pub fn new(inner: Arc + Send + Sync>) -> Self { + pub fn new(inner: Arc + Send + Sync>) -> Self { Gauge(inner) } /// Records an independent value. pub fn record(&self, value: T, attributes: &[KeyValue]) { - self.0.record(value, attributes) - } -} - -impl TryFrom>> for Gauge { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, Gauge>) -> Result { - builder - .instrument_provider - .u64_gauge(builder.name, builder.description, builder.unit) - } -} - -impl TryFrom>> for Gauge { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, Gauge>) -> Result { - builder - .instrument_provider - .f64_gauge(builder.name, builder.description, builder.unit) - } -} - -impl TryFrom>> for Gauge { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, Gauge>) -> Result { - builder - .instrument_provider - .i64_gauge(builder.name, builder.description, builder.unit) + self.0.measure(value, attributes) } } /// An async instrument that records independent readings. #[derive(Clone)] -pub struct ObservableGauge(Arc>); +#[non_exhaustive] +pub struct ObservableGauge { + _marker: std::marker::PhantomData, +} impl fmt::Debug for ObservableGauge where @@ -83,80 +53,12 @@ where } } -impl ObservableGauge { - /// Records the state of the instrument. - /// - /// It is only valid to call this within a callback. If called outside of the - /// registered callback it should have no effect on the instrument, and an - /// error will be reported via the error handler. - pub fn observe(&self, measurement: T, attributes: &[KeyValue]) { - self.0.observe(measurement, attributes) - } - - /// Used by SDKs to downcast instruments in callbacks. - pub fn as_any(&self) -> Arc { - self.0.as_any() - } -} - -impl AsyncInstrument for ObservableGauge { - fn observe(&self, measurement: M, attributes: &[KeyValue]) { - self.observe(measurement, attributes) - } - - fn as_any(&self) -> Arc { - self.0.as_any() - } -} - impl ObservableGauge { /// Create a new gauge - pub fn new(inner: Arc>) -> Self { - ObservableGauge(inner) - } -} - -impl TryFrom, u64>> for ObservableGauge { - type Error = MetricsError; - - fn try_from( - builder: AsyncInstrumentBuilder<'_, ObservableGauge, u64>, - ) -> Result { - builder.meter.instrument_provider.u64_observable_gauge( - builder.name, - builder.description, - builder.unit, - builder.callbacks, - ) - } -} - -impl TryFrom, f64>> for ObservableGauge { - type Error = MetricsError; - - fn try_from( - builder: AsyncInstrumentBuilder<'_, ObservableGauge, f64>, - ) -> Result { - builder.meter.instrument_provider.f64_observable_gauge( - builder.name, - builder.description, - builder.unit, - builder.callbacks, - ) - } -} - -impl TryFrom, i64>> for ObservableGauge { - type Error = MetricsError; - - fn try_from( - builder: AsyncInstrumentBuilder<'_, ObservableGauge, i64>, - ) -> Result { - builder.meter.instrument_provider.i64_observable_gauge( - builder.name, - builder.description, - builder.unit, - builder.callbacks, - ) + #[allow(clippy::new_without_default)] + pub fn new() -> Self { + ObservableGauge { + _marker: std::marker::PhantomData, + } } } diff --git a/opentelemetry/src/metrics/instruments/histogram.rs b/opentelemetry/src/metrics/instruments/histogram.rs index 167da10f7c..73c7d0bc96 100644 --- a/opentelemetry/src/metrics/instruments/histogram.rs +++ b/opentelemetry/src/metrics/instruments/histogram.rs @@ -1,19 +1,17 @@ -use crate::{ - metrics::{InstrumentBuilder, MetricsError}, - KeyValue, -}; +use crate::KeyValue; use core::fmt; use std::sync::Arc; -/// An SDK implemented instrument that records a distribution of values. -pub trait SyncHistogram { - /// Adds an additional value to the distribution. - fn record(&self, value: T, attributes: &[KeyValue]); -} +use super::SyncInstrument; /// An instrument that records a distribution of values. +/// +/// [`Histogram`] can be cloned to create multiple handles to the same instrument. If a [`Histogram`] needs to be shared, +/// users are recommended to clone the [`Histogram`] instead of creating duplicate [`Histogram`]s for the same metric. Creating +/// duplicate [`Histogram`]s for the same metric could lower SDK performance. #[derive(Clone)] -pub struct Histogram(Arc + Send + Sync>); +#[non_exhaustive] +pub struct Histogram(Arc + Send + Sync>); impl fmt::Debug for Histogram where @@ -26,32 +24,12 @@ where impl Histogram { /// Create a new histogram. - pub fn new(inner: Arc + Send + Sync>) -> Self { + pub fn new(inner: Arc + Send + Sync>) -> Self { Histogram(inner) } /// Adds an additional value to the distribution. pub fn record(&self, value: T, attributes: &[KeyValue]) { - self.0.record(value, attributes) - } -} - -impl TryFrom>> for Histogram { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, Histogram>) -> Result { - builder - .instrument_provider - .f64_histogram(builder.name, builder.description, builder.unit) - } -} - -impl TryFrom>> for Histogram { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, Histogram>) -> Result { - builder - .instrument_provider - .u64_histogram(builder.name, builder.description, builder.unit) + self.0.measure(value, attributes) } } diff --git a/opentelemetry/src/metrics/instruments/mod.rs b/opentelemetry/src/metrics/instruments/mod.rs index 3ee530453a..48d238ef5b 100644 --- a/opentelemetry/src/metrics/instruments/mod.rs +++ b/opentelemetry/src/metrics/instruments/mod.rs @@ -1,12 +1,15 @@ -use crate::metrics::{Meter, MetricsError, Result}; +use gauge::{Gauge, ObservableGauge}; + +use crate::metrics::Meter; use crate::KeyValue; use core::fmt; -use std::any::Any; use std::borrow::Cow; use std::marker; -use std::sync::Arc; -use super::InstrumentProvider; +use super::{ + Counter, Histogram, InstrumentProvider, ObservableCounter, ObservableUpDownCounter, + UpDownCounter, +}; pub(super) mod counter; pub(super) mod gauge; @@ -19,31 +22,45 @@ pub trait AsyncInstrument: Send + Sync { /// /// It is only valid to call this within a callback. fn observe(&self, measurement: T, attributes: &[KeyValue]); +} - /// Used for SDKs to downcast instruments in callbacks. - fn as_any(&self) -> Arc; +/// An SDK implemented instrument that records measurements synchronously. +pub trait SyncInstrument: Send + Sync { + /// Records a measurement synchronously. + fn measure(&self, measurement: T, attributes: &[KeyValue]); } -/// Configuration for building a sync instrument. -pub struct InstrumentBuilder<'a, T> { - instrument_provider: &'a dyn InstrumentProvider, - name: Cow<'static, str>, - description: Option>, - unit: Option>, +/// Configuration for building a Histogram. +#[non_exhaustive] // We expect to add more configuration fields in the future +pub struct HistogramBuilder<'a, T> { + /// Instrument provider is used to create the instrument. + pub instrument_provider: &'a dyn InstrumentProvider, + + /// Name of the Histogram. + pub name: Cow<'static, str>, + + /// Description of the Histogram. + pub description: Option>, + + /// Unit of the Histogram. + pub unit: Option>, + + /// Bucket boundaries for the histogram. + pub boundaries: Option>, + + // boundaries: Vec, _marker: marker::PhantomData, } -impl<'a, T> InstrumentBuilder<'a, T> -where - T: TryFrom, -{ +impl<'a, T> HistogramBuilder<'a, T> { /// Create a new instrument builder pub(crate) fn new(meter: &'a Meter, name: Cow<'static, str>) -> Self { - InstrumentBuilder { + HistogramBuilder { instrument_provider: meter.instrument_provider.as_ref(), name, description: None, unit: None, + boundaries: None, _marker: marker::PhantomData, } } @@ -66,24 +83,128 @@ where self } - /// Validate the instrument configuration and creates a new instrument. - pub fn try_init(self) -> Result { - T::try_from(self) + /// Set the boundaries for this histogram. + /// + /// Setting boundaries is optional. By default, the boundaries are set to: + /// + /// `[0.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 750.0, 1000.0, + /// 2500.0, 5000.0, 7500.0, 10000.0]` + /// + /// # Notes + /// - Boundaries must not contain `f64::NAN`, `f64::INFINITY` or + /// `f64::NEG_INFINITY` + /// - Values must be in strictly increasing order (e.g., each value must be + /// greater than the previous). + /// - Boundaries must not contain duplicate values. + /// + /// If invalid boundaries are provided, the instrument will not report + /// measurements. + /// Providing an empty `vec![]` means no bucket information will be + /// calculated. + /// + /// # Warning + /// Using more buckets can improve the accuracy of percentile calculations in backends. + /// However, this comes at a cost, including increased memory, CPU, and network usage. + /// Choose the number of buckets carefully, considering your application's performance + /// and resource requirements. + pub fn with_boundaries(mut self, boundaries: Vec) -> Self { + self.boundaries = Some(boundaries); + self } +} +impl HistogramBuilder<'_, Histogram> { /// Creates a new instrument. /// - /// Validate the instrument configuration and crates a new instrument. + /// Validates the instrument configuration and creates a new instrument. In + /// case of invalid configuration, a no-op instrument is returned + /// and an error is logged using internal logging. + pub fn build(self) -> Histogram { + self.instrument_provider.f64_histogram(self) + } +} + +impl HistogramBuilder<'_, Histogram> { + /// Creates a new instrument. /// - /// # Panics + /// Validates the instrument configuration and creates a new instrument. In + /// case of invalid configuration, a no-op instrument is returned + /// and an error is logged using internal logging. + pub fn build(self) -> Histogram { + self.instrument_provider.u64_histogram(self) + } +} + +/// Configuration for building a sync instrument. +#[non_exhaustive] // We expect to add more configuration fields in the future +pub struct InstrumentBuilder<'a, T> { + /// Instrument provider is used to create the instrument. + pub instrument_provider: &'a dyn InstrumentProvider, + + /// Name of the instrument. + pub name: Cow<'static, str>, + + /// Description of the instrument. + pub description: Option>, + + /// Unit of the instrument. + pub unit: Option>, + + _marker: marker::PhantomData, +} + +impl<'a, T> InstrumentBuilder<'a, T> { + /// Create a new instrument builder + pub(crate) fn new(meter: &'a Meter, name: Cow<'static, str>) -> Self { + InstrumentBuilder { + instrument_provider: meter.instrument_provider.as_ref(), + name, + description: None, + unit: None, + _marker: marker::PhantomData, + } + } + + /// Set the description for this instrument + pub fn with_description>>(mut self, description: S) -> Self { + self.description = Some(description.into()); + self + } + + /// Set the unit for this instrument. + /// + /// Unit is case sensitive(`kb` is not the same as `kB`). /// - /// Panics if the instrument cannot be created. Use - /// [`try_init`](InstrumentBuilder::try_init) if you want to handle errors. - pub fn init(self) -> T { - T::try_from(self).unwrap() + /// Unit must be: + /// - ASCII string + /// - No longer than 63 characters + pub fn with_unit>>(mut self, unit: S) -> Self { + self.unit = Some(unit.into()); + self } } +macro_rules! build_instrument { + ($name:ident, $inst:ty) => { + impl<'a> InstrumentBuilder<'a, $inst> { + #[doc = concat!("Validates the instrument configuration and creates a new `", stringify!($inst), "`.")] + /// In case of invalid configuration, a no-op instrument is returned + /// and an error is logged using internal logging. + pub fn build(self) -> $inst { + self.instrument_provider.$name(self) + } + } + }; +} + +build_instrument!(u64_counter, Counter); +build_instrument!(f64_counter, Counter); +build_instrument!(u64_gauge, Gauge); +build_instrument!(f64_gauge, Gauge); +build_instrument!(i64_gauge, Gauge); +build_instrument!(i64_up_down_counter, UpDownCounter); +build_instrument!(f64_up_down_counter, UpDownCounter); + impl fmt::Debug for InstrumentBuilder<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("InstrumentBuilder") @@ -95,6 +216,21 @@ impl fmt::Debug for InstrumentBuilder<'_, T> { } } +impl fmt::Debug for HistogramBuilder<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("HistogramBuilder") + .field("name", &self.name) + .field("description", &self.description) + .field("unit", &self.unit) + .field("boundaries", &self.boundaries) + .field( + "kind", + &format!("Histogram<{}>", &std::any::type_name::()), + ) + .finish() + } +} + /// A function registered with a [Meter] that makes observations for the /// instruments it is registered with. /// @@ -105,27 +241,31 @@ impl fmt::Debug for InstrumentBuilder<'_, T> { pub type Callback = Box) + Send + Sync>; /// Configuration for building an async instrument. -pub struct AsyncInstrumentBuilder<'a, I, M> -where - I: AsyncInstrument, -{ - meter: &'a Meter, - name: Cow<'static, str>, - description: Option>, - unit: Option>, +#[non_exhaustive] // We expect to add more configuration fields in the future +pub struct AsyncInstrumentBuilder<'a, I, M> { + /// Instrument provider is used to create the instrument. + pub instrument_provider: &'a dyn InstrumentProvider, + + /// Name of the instrument. + pub name: Cow<'static, str>, + + /// Description of the instrument. + pub description: Option>, + + /// Unit of the instrument. + pub unit: Option>, + + /// Callbacks to be called for this instrument. + pub callbacks: Vec>, + _inst: marker::PhantomData, - callbacks: Vec>, } -impl<'a, I, M> AsyncInstrumentBuilder<'a, I, M> -where - I: TryFrom, - I: AsyncInstrument, -{ +impl<'a, I, M> AsyncInstrumentBuilder<'a, I, M> { /// Create a new instrument builder pub(crate) fn new(meter: &'a Meter, name: Cow<'static, str>) -> Self { AsyncInstrumentBuilder { - meter, + instrument_provider: meter.instrument_provider.as_ref(), name, description: None, unit: None, @@ -160,25 +300,37 @@ where self.callbacks.push(Box::new(callback)); self } +} - /// Validate the instrument configuration and creates a new instrument. - pub fn try_init(self) -> Result { - I::try_from(self) - } - - /// Creates a new instrument. - /// - /// Validate the instrument configuration and creates a new instrument. - /// - /// # Panics - /// - /// Panics if the instrument cannot be created. Use - /// [`try_init`](InstrumentBuilder::try_init) if you want to handle errors. - pub fn init(self) -> I { - I::try_from(self).unwrap() - } +macro_rules! build_async_instrument { + ($name:ident, $inst:ty, $measurement:ty) => { + impl<'a> AsyncInstrumentBuilder<'a, $inst, $measurement> { + #[doc = concat!("Validates the instrument configuration and creates a new `", stringify!($inst), "`.")] + /// In case of invalid configuration, a no-op instrument is returned + /// and an error is logged using internal logging. + pub fn build(self) -> $inst { + self.instrument_provider.$name(self) + } + } + }; } +build_async_instrument!(u64_observable_counter, ObservableCounter, u64); +build_async_instrument!(f64_observable_counter, ObservableCounter, f64); +build_async_instrument!(u64_observable_gauge, ObservableGauge, u64); +build_async_instrument!(f64_observable_gauge, ObservableGauge, f64); +build_async_instrument!(i64_observable_gauge, ObservableGauge, i64); +build_async_instrument!( + i64_observable_up_down_counter, + ObservableUpDownCounter, + i64 +); +build_async_instrument!( + f64_observable_up_down_counter, + ObservableUpDownCounter, + f64 +); + impl fmt::Debug for AsyncInstrumentBuilder<'_, I, M> where I: AsyncInstrument, diff --git a/opentelemetry/src/metrics/instruments/up_down_counter.rs b/opentelemetry/src/metrics/instruments/up_down_counter.rs index f7a2b5e8c2..b9fb996329 100644 --- a/opentelemetry/src/metrics/instruments/up_down_counter.rs +++ b/opentelemetry/src/metrics/instruments/up_down_counter.rs @@ -1,22 +1,17 @@ -use crate::{ - metrics::{InstrumentBuilder, MetricsError}, - KeyValue, -}; +use crate::KeyValue; use core::fmt; -use std::any::Any; use std::sync::Arc; -use super::{AsyncInstrument, AsyncInstrumentBuilder}; - -/// An SDK implemented instrument that records increasing or decreasing values. -pub trait SyncUpDownCounter { - /// Records an increment or decrement to the counter. - fn add(&self, value: T, attributes: &[KeyValue]); -} +use super::SyncInstrument; /// An instrument that records increasing or decreasing values. +/// +/// [`UpDownCounter`] can be cloned to create multiple handles to the same instrument. If a [`UpDownCounter`] needs to be shared, +/// users are recommended to clone the [`UpDownCounter`] instead of creating duplicate [`UpDownCounter`]s for the same metric. Creating +/// duplicate [`UpDownCounter`]s for the same metric could lower SDK performance. #[derive(Clone)] -pub struct UpDownCounter(Arc + Send + Sync>); +#[non_exhaustive] +pub struct UpDownCounter(Arc + Send + Sync>); impl fmt::Debug for UpDownCounter where @@ -32,43 +27,22 @@ where impl UpDownCounter { /// Create a new up down counter. - pub fn new(inner: Arc + Send + Sync>) -> Self { + pub fn new(inner: Arc + Send + Sync>) -> Self { UpDownCounter(inner) } /// Records an increment or decrement to the counter. pub fn add(&self, value: T, attributes: &[KeyValue]) { - self.0.add(value, attributes) - } -} - -impl TryFrom>> for UpDownCounter { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, UpDownCounter>) -> Result { - builder.instrument_provider.i64_up_down_counter( - builder.name, - builder.description, - builder.unit, - ) - } -} - -impl TryFrom>> for UpDownCounter { - type Error = MetricsError; - - fn try_from(builder: InstrumentBuilder<'_, UpDownCounter>) -> Result { - builder.instrument_provider.f64_up_down_counter( - builder.name, - builder.description, - builder.unit, - ) + self.0.measure(value, attributes) } } /// An async instrument that records increasing or decreasing values. #[derive(Clone)] -pub struct ObservableUpDownCounter(Arc>); +#[non_exhaustive] +pub struct ObservableUpDownCounter { + _marker: std::marker::PhantomData, +} impl fmt::Debug for ObservableUpDownCounter where @@ -84,71 +58,10 @@ where impl ObservableUpDownCounter { /// Create a new observable up down counter. - pub fn new(inner: Arc>) -> Self { - ObservableUpDownCounter(inner) - } - - /// Records the increment or decrement to the counter. - /// - /// It is only valid to call this within a callback. If called outside of the - /// registered callback it should have no effect on the instrument, and an - /// error will be reported via the error handler. - pub fn observe(&self, value: T, attributes: &[KeyValue]) { - self.0.observe(value, attributes) - } - - /// Used for SDKs to downcast instruments in callbacks. - pub fn as_any(&self) -> Arc { - self.0.as_any() - } -} - -impl AsyncInstrument for ObservableUpDownCounter { - fn observe(&self, measurement: T, attributes: &[KeyValue]) { - self.0.observe(measurement, attributes) - } - - fn as_any(&self) -> Arc { - self.0.as_any() - } -} - -impl TryFrom, i64>> - for ObservableUpDownCounter -{ - type Error = MetricsError; - - fn try_from( - builder: AsyncInstrumentBuilder<'_, ObservableUpDownCounter, i64>, - ) -> Result { - builder - .meter - .instrument_provider - .i64_observable_up_down_counter( - builder.name, - builder.description, - builder.unit, - builder.callbacks, - ) - } -} - -impl TryFrom, f64>> - for ObservableUpDownCounter -{ - type Error = MetricsError; - - fn try_from( - builder: AsyncInstrumentBuilder<'_, ObservableUpDownCounter, f64>, - ) -> Result { - builder - .meter - .instrument_provider - .f64_observable_up_down_counter( - builder.name, - builder.description, - builder.unit, - builder.callbacks, - ) + #[allow(clippy::new_without_default)] + pub fn new() -> Self { + ObservableUpDownCounter { + _marker: std::marker::PhantomData, + } } } diff --git a/opentelemetry/src/metrics/meter.rs b/opentelemetry/src/metrics/meter.rs index 3422240b48..95fd14d8a4 100644 --- a/opentelemetry/src/metrics/meter.rs +++ b/opentelemetry/src/metrics/meter.rs @@ -3,10 +3,12 @@ use std::borrow::Cow; use std::sync::Arc; use crate::metrics::{ - AsyncInstrumentBuilder, Counter, Gauge, Histogram, InstrumentBuilder, InstrumentProvider, - ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter, + AsyncInstrumentBuilder, Gauge, InstrumentBuilder, InstrumentProvider, ObservableCounter, + ObservableGauge, ObservableUpDownCounter, UpDownCounter, }; -use crate::KeyValue; +use crate::InstrumentationScope; + +use super::{Counter, Histogram, HistogramBuilder}; /// Provides access to named [Meter] instances, for instrumenting an application /// or crate. @@ -17,8 +19,6 @@ pub trait MeterProvider { /// name needs to be unique so it does not collide with other names used by /// an application, nor other applications. /// - /// If the name is empty, then an implementation defined default name will - /// be used instead. /// /// # Examples /// @@ -30,37 +30,36 @@ pub trait MeterProvider { /// /// // meter used in applications /// let meter = provider.meter("my_app"); - /// - /// // meter used in libraries/crates that optionally includes version and schema url - /// let meter = provider.versioned_meter( - /// "my_library", - /// Some(env!("CARGO_PKG_VERSION")), - /// Some("https://opentelemetry.io/schema/1.0.0"), - /// Some(vec![KeyValue::new("key", "value")]), - /// ); /// ``` - fn meter(&self, name: impl Into>) -> Meter { - self.versioned_meter( - name, - None::>, - None::>, - None, - ) + fn meter(&self, name: &'static str) -> Meter { + let scope = InstrumentationScope::builder(name).build(); + self.meter_with_scope(scope) } - /// Returns a new versioned meter with a given name. + /// Returns a new [Meter] with the given instrumentation scope. /// - /// The instrumentation name must be the name of the library providing instrumentation. This - /// name may be the same as the instrumented code only if that code provides built-in - /// instrumentation. If the instrumentation name is empty, then a implementation defined - /// default name will be used instead. - fn versioned_meter( - &self, - name: impl Into>, - version: Option>>, - schema_url: Option>>, - attributes: Option>, - ) -> Meter; + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use opentelemetry::InstrumentationScope; + /// use opentelemetry::metrics::MeterProvider; + /// use opentelemetry_sdk::metrics::SdkMeterProvider; + /// + /// let provider = SdkMeterProvider::default(); + /// + /// // meter used in applications/binaries + /// let meter = provider.meter("my_app"); + /// + /// // meter used in libraries/crates that optionally includes version and schema url + /// let scope = InstrumentationScope::builder(env!("CARGO_PKG_NAME")) + /// .with_version(env!("CARGO_PKG_VERSION")) + /// .with_schema_url("https://opentelemetry.io/schema/1.0.0") + /// .build(); + /// + /// let meter = provider.meter_with_scope(scope); + /// ``` + fn meter_with_scope(&self, scope: InstrumentationScope) -> Meter; } /// Provides the ability to create instruments for recording measurements or @@ -74,10 +73,10 @@ pub trait MeterProvider { /// your application's processing logic. For example, you might use a Counter /// to record the number of HTTP requests received. /// -/// - **Asynchronous Instruments** (e.g., Gauge): These allow you to register a -/// callback function that is invoked during export. For instance, you could -/// use an asynchronous gauge to monitor temperature from a sensor every time -/// metrics are exported. +/// - **Asynchronous Instruments** (e.g., ObservableGauge): These allow you to +/// register a callback function that is invoked during export. For instance, +/// you could use an asynchronous gauge to monitor temperature from a sensor +/// every time metrics are exported. /// /// # Example Usage /// @@ -89,7 +88,7 @@ pub trait MeterProvider { /// // Synchronous Instruments /// /// // u64 Counter -/// let u64_counter = meter.u64_counter("my_u64_counter").init(); +/// let u64_counter = meter.u64_counter("my_u64_counter").build(); /// u64_counter.add( /// 10, /// &[ @@ -99,7 +98,7 @@ pub trait MeterProvider { /// ); /// /// // f64 Counter -/// let f64_counter = meter.f64_counter("my_f64_counter").init(); +/// let f64_counter = meter.f64_counter("my_f64_counter").build(); /// f64_counter.add( /// 3.15, /// &[ @@ -108,7 +107,6 @@ pub trait MeterProvider { /// ], /// ); /// -/// // Asynchronous Instruments /// /// // u64 Observable Counter /// let _observable_u64_counter = meter @@ -124,7 +122,7 @@ pub trait MeterProvider { /// ], /// ) /// }) -/// .init(); +/// .build(); /// /// // f64 Observable Counter /// let _observable_f64_counter = meter @@ -140,10 +138,10 @@ pub trait MeterProvider { /// ], /// ) /// }) -/// .init(); +/// .build(); /// /// // i64 UpDownCounter -/// let updown_i64_counter = meter.i64_up_down_counter("my_updown_i64_counter").init(); +/// let updown_i64_counter = meter.i64_up_down_counter("my_updown_i64_counter").build(); /// updown_i64_counter.add( /// -10, /// &[ @@ -153,7 +151,7 @@ pub trait MeterProvider { /// ); /// /// // f64 UpDownCounter -/// let updown_f64_counter = meter.f64_up_down_counter("my_updown_f64_counter").init(); +/// let updown_f64_counter = meter.f64_up_down_counter("my_updown_f64_counter").build(); /// updown_f64_counter.add( /// -10.67, /// &[ @@ -176,7 +174,7 @@ pub trait MeterProvider { /// ], /// ) /// }) -/// .init(); +/// .build(); /// /// // f64 Observable UpDownCounter /// let _observable_updown_f64_counter = meter @@ -192,7 +190,37 @@ pub trait MeterProvider { /// ], /// ) /// }) -/// .init(); +/// .build(); +/// +/// // i64 Gauge +/// let gauge = meter.i64_gauge("my_gauge").build(); +/// gauge.record( +/// -10, +/// &[ +/// KeyValue::new("mykey1", "myvalue1"), +/// KeyValue::new("mykey2", "myvalue2"), +/// ], +/// ); +/// +/// // u64 Gauge +/// let gauge = meter.u64_gauge("my_gauge").build(); +/// gauge.record( +/// 101, +/// &[ +/// KeyValue::new("mykey1", "myvalue1"), +/// KeyValue::new("mykey2", "myvalue2"), +/// ], +/// ); +/// +/// // f64 Gauge +/// let gauge = meter.f64_gauge("my_gauge").build(); +/// gauge.record( +/// 12.5, +/// &[ +/// KeyValue::new("mykey1", "myvalue1"), +/// KeyValue::new("mykey2", "myvalue2"), +/// ], +/// ); /// /// // u64 Observable Gauge /// let _observable_u64_gauge = meter @@ -208,7 +236,7 @@ pub trait MeterProvider { /// ], /// ) /// }) -/// .init(); +/// .build(); /// /// // f64 Observable Gauge /// let _observable_f64_gauge = meter @@ -224,7 +252,7 @@ pub trait MeterProvider { /// ], /// ) /// }) -/// .init(); +/// .build(); /// /// // i64 Observable Gauge /// let _observable_i64_gauge = meter @@ -240,10 +268,10 @@ pub trait MeterProvider { /// ], /// ) /// }) -/// .init(); +/// .build(); /// /// // f64 Histogram -/// let f64_histogram = meter.f64_histogram("my_f64_histogram").init(); +/// let f64_histogram = meter.f64_histogram("my_f64_histogram").build(); /// f64_histogram.record( /// 10.5, /// &[ @@ -253,7 +281,7 @@ pub trait MeterProvider { /// ); /// /// // u64 Histogram -/// let u64_histogram = meter.u64_histogram("my_u64_histogram").init(); +/// let u64_histogram = meter.u64_histogram("my_u64_histogram").build(); /// u64_histogram.record( /// 12, /// &[ @@ -264,6 +292,7 @@ pub trait MeterProvider { /// ``` /// #[derive(Clone)] +#[non_exhaustive] pub struct Meter { pub(crate) instrument_provider: Arc, } @@ -278,6 +307,10 @@ impl Meter { } /// creates an instrument builder for recording increasing values. + /// + /// [`Counter`] can be cloned to create multiple handles to the same instrument. If a [`Counter`] needs to be shared, + /// users are recommended to clone the [`Counter`] instead of creating duplicate [`Counter`]s for the same metric. Creating + /// duplicate [`Counter`]s for the same metric could lower SDK performance. pub fn u64_counter( &self, name: impl Into>, @@ -286,6 +319,10 @@ impl Meter { } /// creates an instrument builder for recording increasing values. + /// + /// [`Counter`] can be cloned to create multiple handles to the same instrument. If a [`Counter`] needs to be shared, + /// users are recommended to clone the [`Counter`] instead of creating duplicate [`Counter`]s for the same metric. Creating + /// duplicate [`Counter`]s for the same metric could lower SDK performance. pub fn f64_counter( &self, name: impl Into>, @@ -310,6 +347,10 @@ impl Meter { } /// creates an instrument builder for recording changes of a value. + /// + /// [`UpDownCounter`] can be cloned to create multiple handles to the same instrument. If a [`UpDownCounter`] needs to be shared, + /// users are recommended to clone the [`UpDownCounter`] instead of creating duplicate [`UpDownCounter`]s for the same metric. Creating + /// duplicate [`UpDownCounter`]s for the same metric could lower SDK performance. pub fn i64_up_down_counter( &self, name: impl Into>, @@ -318,6 +359,10 @@ impl Meter { } /// creates an instrument builder for recording changes of a value. + /// + /// [`UpDownCounter`] can be cloned to create multiple handles to the same instrument. If a [`UpDownCounter`] needs to be shared, + /// users are recommended to clone the [`UpDownCounter`] instead of creating duplicate [`UpDownCounter`]s for the same metric. Creating + /// duplicate [`UpDownCounter`]s for the same metric could lower SDK performance. pub fn f64_up_down_counter( &self, name: impl Into>, @@ -326,6 +371,10 @@ impl Meter { } /// creates an instrument builder for recording changes of a value via callback. + /// + /// [`UpDownCounter`] can be cloned to create multiple handles to the same instrument. If a [`UpDownCounter`] needs to be shared, + /// users are recommended to clone the [`UpDownCounter`] instead of creating duplicate [`UpDownCounter`]s for the same metric. Creating + /// duplicate [`UpDownCounter`]s for the same metric could lower SDK performance. pub fn i64_observable_up_down_counter( &self, name: impl Into>, @@ -342,6 +391,10 @@ impl Meter { } /// creates an instrument builder for recording independent values. + /// + /// [`Gauge`] can be cloned to create multiple handles to the same instrument. If a [`Gauge`] needs to be shared, + /// users are recommended to clone the [`Gauge`] instead of creating duplicate [`Gauge`]s for the same metric. Creating + /// duplicate [`Gauge`]s for the same metric could lower SDK performance. pub fn u64_gauge( &self, name: impl Into>, @@ -350,6 +403,10 @@ impl Meter { } /// creates an instrument builder for recording independent values. + /// + /// [`Gauge`] can be cloned to create multiple handles to the same instrument. If a [`Gauge`] needs to be shared, + /// users are recommended to clone the [`Gauge`] instead of creating duplicate [`Gauge`]s for the same metric. Creating + /// duplicate [`Gauge`]s for the same metric could lower SDK performance. pub fn f64_gauge( &self, name: impl Into>, @@ -358,6 +415,9 @@ impl Meter { } /// creates an instrument builder for recording independent values. + /// [`Gauge`] can be cloned to create multiple handles to the same instrument. If a [`Gauge`] needs to be shared, + /// users are recommended to clone the [`Gauge`] instead of creating duplicate [`Gauge`]s for the same metric. Creating + /// duplicate [`Gauge`]s for the same metric could lower SDK performance. pub fn i64_gauge( &self, name: impl Into>, @@ -390,19 +450,27 @@ impl Meter { } /// creates an instrument builder for recording a distribution of values. + /// + /// [`Histogram`] can be cloned to create multiple handles to the same instrument. If a [`Histogram`] needs to be shared, + /// users are recommended to clone the [`Histogram`] instead of creating duplicate [`Histogram`]s for the same metric. Creating + /// duplicate [`Histogram`]s for the same metric could lower SDK performance. pub fn f64_histogram( &self, name: impl Into>, - ) -> InstrumentBuilder<'_, Histogram> { - InstrumentBuilder::new(self, name.into()) + ) -> HistogramBuilder<'_, Histogram> { + HistogramBuilder::new(self, name.into()) } /// creates an instrument builder for recording a distribution of values. + /// + /// [`Histogram`] can be cloned to create multiple handles to the same instrument. If a [`Histogram`] needs to be shared, + /// users are recommended to clone the [`Histogram`] instead of creating duplicate [`Histogram`]s for the same metric. Creating + /// duplicate [`Histogram`]s for the same metric could lower SDK performance. pub fn u64_histogram( &self, name: impl Into>, - ) -> InstrumentBuilder<'_, Histogram> { - InstrumentBuilder::new(self, name.into()) + ) -> HistogramBuilder<'_, Histogram> { + HistogramBuilder::new(self, name.into()) } } diff --git a/opentelemetry/src/metrics/mod.rs b/opentelemetry/src/metrics/mod.rs index f929c16d27..075d5cccbe 100644 --- a/opentelemetry/src/metrics/mod.rs +++ b/opentelemetry/src/metrics/mod.rs @@ -1,61 +1,23 @@ //! # OpenTelemetry Metrics API -use std::cmp::Ordering; use std::hash::{Hash, Hasher}; -use std::result; -use std::sync::PoisonError; -use std::{borrow::Cow, sync::Arc}; -use thiserror::Error; +use std::sync::Arc; mod instruments; mod meter; -pub mod noop; +pub(crate) mod noop; -use crate::{Array, ExportError, KeyValue, Value}; +use crate::{Array, KeyValue, Value}; pub use instruments::{ - counter::{Counter, ObservableCounter, SyncCounter}, - gauge::{Gauge, ObservableGauge, SyncGauge}, - histogram::{Histogram, SyncHistogram}, - up_down_counter::{ObservableUpDownCounter, SyncUpDownCounter, UpDownCounter}, - AsyncInstrument, AsyncInstrumentBuilder, Callback, InstrumentBuilder, + counter::{Counter, ObservableCounter}, + gauge::{Gauge, ObservableGauge}, + histogram::Histogram, + up_down_counter::{ObservableUpDownCounter, UpDownCounter}, + AsyncInstrument, AsyncInstrumentBuilder, Callback, HistogramBuilder, InstrumentBuilder, + SyncInstrument, }; pub use meter::{Meter, MeterProvider}; -/// A specialized `Result` type for metric operations. -pub type Result = result::Result; - -/// Errors returned by the metrics API. -#[derive(Error, Debug)] -#[non_exhaustive] -pub enum MetricsError { - /// Other errors not covered by specific cases. - #[error("Metrics error: {0}")] - Other(String), - /// Invalid configuration - #[error("Config error {0}")] - Config(String), - /// Fail to export metrics - #[error("Metrics exporter {} failed with {0}", .0.exporter_name())] - ExportErr(Box), - /// Invalid instrument configuration such invalid instrument name, invalid instrument description, invalid instrument unit, etc. - /// See [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/api.md#general-characteristics) - /// for full list of requirements. - #[error("Invalid instrument configuration: {0}")] - InvalidInstrumentConfiguration(&'static str), -} - -impl From for MetricsError { - fn from(err: T) -> Self { - MetricsError::ExportErr(Box::new(err)) - } -} - -impl From> for MetricsError { - fn from(err: PoisonError) -> Self { - MetricsError::Other(err.to_string()) - } -} - struct F64Hashable(f64); impl PartialEq for F64Hashable { @@ -90,206 +52,115 @@ impl Hash for KeyValue { } } -impl PartialOrd for KeyValue { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -/// Ordering is based on the key only. -impl Ord for KeyValue { - fn cmp(&self, other: &Self) -> Ordering { - self.key.cmp(&other.key) - } -} - impl Eq for KeyValue {} /// SDK implemented trait for creating instruments pub trait InstrumentProvider { /// creates an instrument for recording increasing values. - fn u64_counter( - &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - ) -> Result> { - Ok(Counter::new(Arc::new(noop::NoopSyncInstrument::new()))) + fn u64_counter(&self, _builder: InstrumentBuilder<'_, Counter>) -> Counter { + Counter::new(Arc::new(noop::NoopSyncInstrument::new())) } /// creates an instrument for recording increasing values. - fn f64_counter( - &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - ) -> Result> { - Ok(Counter::new(Arc::new(noop::NoopSyncInstrument::new()))) + fn f64_counter(&self, _builder: InstrumentBuilder<'_, Counter>) -> Counter { + Counter::new(Arc::new(noop::NoopSyncInstrument::new())) } /// creates an instrument for recording increasing values via callback. fn u64_observable_counter( &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - _callback: Vec>, - ) -> Result> { - Ok(ObservableCounter::new(Arc::new( - noop::NoopAsyncInstrument::new(), - ))) + _builder: AsyncInstrumentBuilder<'_, ObservableCounter, u64>, + ) -> ObservableCounter { + ObservableCounter::new() } /// creates an instrument for recording increasing values via callback. fn f64_observable_counter( &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - _callback: Vec>, - ) -> Result> { - Ok(ObservableCounter::new(Arc::new( - noop::NoopAsyncInstrument::new(), - ))) + _builder: AsyncInstrumentBuilder<'_, ObservableCounter, f64>, + ) -> ObservableCounter { + ObservableCounter::new() } /// creates an instrument for recording changes of a value. fn i64_up_down_counter( &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - ) -> Result> { - Ok(UpDownCounter::new( - Arc::new(noop::NoopSyncInstrument::new()), - )) + _builder: InstrumentBuilder<'_, UpDownCounter>, + ) -> UpDownCounter { + UpDownCounter::new(Arc::new(noop::NoopSyncInstrument::new())) } /// creates an instrument for recording changes of a value. fn f64_up_down_counter( &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - ) -> Result> { - Ok(UpDownCounter::new( - Arc::new(noop::NoopSyncInstrument::new()), - )) + _builder: InstrumentBuilder<'_, UpDownCounter>, + ) -> UpDownCounter { + UpDownCounter::new(Arc::new(noop::NoopSyncInstrument::new())) } /// creates an instrument for recording changes of a value. fn i64_observable_up_down_counter( &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - _callback: Vec>, - ) -> Result> { - Ok(ObservableUpDownCounter::new(Arc::new( - noop::NoopAsyncInstrument::new(), - ))) + _builder: AsyncInstrumentBuilder<'_, ObservableUpDownCounter, i64>, + ) -> ObservableUpDownCounter { + ObservableUpDownCounter::new() } /// creates an instrument for recording changes of a value via callback. fn f64_observable_up_down_counter( &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - _callback: Vec>, - ) -> Result> { - Ok(ObservableUpDownCounter::new(Arc::new( - noop::NoopAsyncInstrument::new(), - ))) + _builder: AsyncInstrumentBuilder<'_, ObservableUpDownCounter, f64>, + ) -> ObservableUpDownCounter { + ObservableUpDownCounter::new() } /// creates an instrument for recording independent values. - fn u64_gauge( - &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - ) -> Result> { - Ok(Gauge::new(Arc::new(noop::NoopSyncInstrument::new()))) + fn u64_gauge(&self, _builder: InstrumentBuilder<'_, Gauge>) -> Gauge { + Gauge::new(Arc::new(noop::NoopSyncInstrument::new())) } /// creates an instrument for recording independent values. - fn f64_gauge( - &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - ) -> Result> { - Ok(Gauge::new(Arc::new(noop::NoopSyncInstrument::new()))) + fn f64_gauge(&self, _builder: InstrumentBuilder<'_, Gauge>) -> Gauge { + Gauge::new(Arc::new(noop::NoopSyncInstrument::new())) } /// creates an instrument for recording independent values. - fn i64_gauge( - &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - ) -> Result> { - Ok(Gauge::new(Arc::new(noop::NoopSyncInstrument::new()))) + fn i64_gauge(&self, _builder: InstrumentBuilder<'_, Gauge>) -> Gauge { + Gauge::new(Arc::new(noop::NoopSyncInstrument::new())) } /// creates an instrument for recording the current value via callback. fn u64_observable_gauge( &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - _callback: Vec>, - ) -> Result> { - Ok(ObservableGauge::new(Arc::new( - noop::NoopAsyncInstrument::new(), - ))) + _builder: AsyncInstrumentBuilder<'_, ObservableGauge, u64>, + ) -> ObservableGauge { + ObservableGauge::new() } /// creates an instrument for recording the current value via callback. fn i64_observable_gauge( &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - _callback: Vec>, - ) -> Result> { - Ok(ObservableGauge::new(Arc::new( - noop::NoopAsyncInstrument::new(), - ))) + _builder: AsyncInstrumentBuilder<'_, ObservableGauge, i64>, + ) -> ObservableGauge { + ObservableGauge::new() } /// creates an instrument for recording the current value via callback. fn f64_observable_gauge( &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - _callback: Vec>, - ) -> Result> { - Ok(ObservableGauge::new(Arc::new( - noop::NoopAsyncInstrument::new(), - ))) + _builder: AsyncInstrumentBuilder<'_, ObservableGauge, f64>, + ) -> ObservableGauge { + ObservableGauge::new() } /// creates an instrument for recording a distribution of values. - fn f64_histogram( - &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - ) -> Result> { - Ok(Histogram::new(Arc::new(noop::NoopSyncInstrument::new()))) + fn f64_histogram(&self, _builder: HistogramBuilder<'_, Histogram>) -> Histogram { + Histogram::new(Arc::new(noop::NoopSyncInstrument::new())) } /// creates an instrument for recording a distribution of values. - fn u64_histogram( - &self, - _name: Cow<'static, str>, - _description: Option>, - _unit: Option>, - ) -> Result> { - Ok(Histogram::new(Arc::new(noop::NoopSyncInstrument::new()))) + fn u64_histogram(&self, _builder: HistogramBuilder<'_, Histogram>) -> Histogram { + Histogram::new(Arc::new(noop::NoopSyncInstrument::new())) } } @@ -367,27 +238,6 @@ mod tests { } } - #[test] - fn kv_float_order() { - // TODO: Extend this test to all value types, not just F64 - let float_vals = [ - 0.0, - 1.0, - -1.0, - f64::INFINITY, - f64::NEG_INFINITY, - f64::NAN, - f64::MIN, - f64::MAX, - ]; - - for v in float_vals { - let kv1 = KeyValue::new("a", v); - let kv2 = KeyValue::new("b", v); - assert!(kv1 < kv2, "Order is solely based on key!"); - } - } - fn hash_helper(item: &T) -> u64 { let mut hasher = DefaultHasher::new(); item.hash(&mut hasher); diff --git a/opentelemetry/src/metrics/noop.rs b/opentelemetry/src/metrics/noop.rs index 716e4ca3c8..2d74be2805 100644 --- a/opentelemetry/src/metrics/noop.rs +++ b/opentelemetry/src/metrics/noop.rs @@ -4,110 +4,63 @@ //! has been set. It is expected to have minimal resource utilization and //! runtime impact. use crate::{ - metrics::{ - AsyncInstrument, InstrumentProvider, Meter, MeterProvider, SyncCounter, SyncGauge, - SyncHistogram, SyncUpDownCounter, - }, - KeyValue, + metrics::{InstrumentProvider, Meter, MeterProvider}, + otel_debug, KeyValue, }; -use std::{any::Any, borrow::Cow, sync::Arc}; +use std::sync::Arc; + +use super::instruments::SyncInstrument; /// A no-op instance of a `MetricProvider` #[derive(Debug, Default)] -pub struct NoopMeterProvider { +pub(crate) struct NoopMeterProvider { _private: (), } impl NoopMeterProvider { /// Create a new no-op meter provider. - pub fn new() -> Self { + pub(crate) fn new() -> Self { NoopMeterProvider { _private: () } } } impl MeterProvider for NoopMeterProvider { - fn versioned_meter( - &self, - _name: impl Into>, - _version: Option>>, - _schema_url: Option>>, - _attributes: Option>, - ) -> Meter { - Meter::new(Arc::new(NoopMeterCore::new())) + fn meter_with_scope(&self, scope: crate::InstrumentationScope) -> Meter { + otel_debug!(name: "NoopMeterProvider.MeterCreation", meter_name = scope.name(), message = "Meter was obtained from a NoopMeterProvider. No metrics will be recorded. If global::meter_with_scope()/meter() was used, ensure that a valid MeterProvider is set globally before creating Meter."); + Meter::new(Arc::new(NoopMeter::new())) } } /// A no-op instance of a `Meter` #[derive(Debug, Default)] -pub struct NoopMeterCore { +pub(crate) struct NoopMeter { _private: (), } -impl NoopMeterCore { +impl NoopMeter { /// Create a new no-op meter core. - pub fn new() -> Self { - NoopMeterCore { _private: () } + pub(crate) fn new() -> Self { + NoopMeter { _private: () } } } -impl InstrumentProvider for NoopMeterCore {} +impl InstrumentProvider for NoopMeter {} /// A no-op sync instrument #[derive(Debug, Default)] -pub struct NoopSyncInstrument { +pub(crate) struct NoopSyncInstrument { _private: (), } impl NoopSyncInstrument { /// Create a new no-op sync instrument - pub fn new() -> Self { + pub(crate) fn new() -> Self { NoopSyncInstrument { _private: () } } } -impl SyncCounter for NoopSyncInstrument { - fn add(&self, _value: T, _attributes: &[KeyValue]) { - // Ignored - } -} - -impl SyncUpDownCounter for NoopSyncInstrument { - fn add(&self, _value: T, _attributes: &[KeyValue]) { - // Ignored - } -} - -impl SyncHistogram for NoopSyncInstrument { - fn record(&self, _value: T, _attributes: &[KeyValue]) { - // Ignored - } -} - -impl SyncGauge for NoopSyncInstrument { - fn record(&self, _value: T, _attributes: &[KeyValue]) { +impl SyncInstrument for NoopSyncInstrument { + fn measure(&self, _value: T, _attributes: &[KeyValue]) { // Ignored } } - -/// A no-op async instrument. -#[derive(Debug, Default)] -pub struct NoopAsyncInstrument { - _private: (), -} - -impl NoopAsyncInstrument { - /// Create a new no-op async instrument - pub fn new() -> Self { - NoopAsyncInstrument { _private: () } - } -} - -impl AsyncInstrument for NoopAsyncInstrument { - fn observe(&self, _value: T, _attributes: &[KeyValue]) { - // Ignored - } - - fn as_any(&self) -> Arc { - Arc::new(()) - } -} diff --git a/opentelemetry/src/trace/context.rs b/opentelemetry/src/trace/context.rs index 681c7b2e0c..125c96f5f5 100644 --- a/opentelemetry/src/trace/context.rs +++ b/opentelemetry/src/trace/context.rs @@ -1,6 +1,6 @@ //! Context extensions for tracing use crate::{ - global, + global, otel_debug, trace::{Span, SpanContext, Status}, Context, ContextGuard, KeyValue, }; @@ -32,6 +32,12 @@ pub(crate) struct SynchronizedSpan { inner: Option>, } +impl SynchronizedSpan { + pub(crate) fn span_context(&self) -> &SpanContext { + &self.span_context + } +} + impl From for SynchronizedSpan { fn from(value: SpanContext) -> Self { Self { @@ -55,7 +61,13 @@ impl SpanRef<'_> { if let Some(ref inner) = self.0.inner { match inner.lock() { Ok(mut locked) => f(&mut locked), - Err(err) => global::handle_error(err), + Err(err) => { + otel_debug!( + name: "SpanRef.LockFailed", + message = "Failed to acquire lock for SpanRef: {:?}", + reason = format!("{:?}", err), + span_context = format!("{:?}", self.0.span_context)); + } } } } diff --git a/opentelemetry/src/trace/mod.rs b/opentelemetry/src/trace/mod.rs index 5e3edc3519..c93e258893 100644 --- a/opentelemetry/src/trace/mod.rs +++ b/opentelemetry/src/trace/mod.rs @@ -44,6 +44,8 @@ //! //! ``` //! use opentelemetry::{global, trace::{Span, Tracer, TracerProvider}}; +//! use opentelemetry::InstrumentationScope; +//! use std::sync::Arc; //! //! fn my_library_function() { //! // Use the global tracer provider to get access to the user-specified @@ -51,10 +53,12 @@ //! let tracer_provider = global::tracer_provider(); //! //! // Get a tracer for this library -//! let tracer = tracer_provider.tracer_builder("my_name"). -//! with_version(env!("CARGO_PKG_VERSION")). -//! with_schema_url("https://opentelemetry.io/schemas/1.17.0"). -//! build(); +//! let scope = InstrumentationScope::builder("my_name") +//! .with_version(env!("CARGO_PKG_VERSION")) +//! .with_schema_url("https://opentelemetry.io/schemas/1.17.0") +//! .build(); +//! +//! let tracer = tracer_provider.tracer_with_scope(scope); //! //! // Create spans //! let mut span = tracer.start("doing_work"); @@ -182,9 +186,17 @@ pub use self::{ tracer::{SamplingDecision, SamplingResult, SpanBuilder, Tracer}, tracer_provider::TracerProvider, }; -use crate::{ExportError, KeyValue}; +use crate::KeyValue; use std::sync::PoisonError; +// TODO - Move ExportError and TraceError to opentelemetry-sdk + +/// Trait for errors returned by exporters +pub trait ExportError: std::error::Error + Send + Sync + 'static { + /// The name of exporter that returned this error + fn exporter_name(&self) -> &'static str; +} + /// Describe the result of operations in tracing API. pub type TraceResult = Result; @@ -193,13 +205,17 @@ pub type TraceResult = Result; #[non_exhaustive] pub enum TraceError { /// Export failed with the error returned by the exporter - #[error("Exporter {} encountered the following error(s): {0}", .0.exporter_name())] + #[error("Exporter {0} encountered the following error(s): {name}", name = .0.exporter_name())] ExportFailed(Box), /// Export failed to finish after certain period and processor stopped the export. #[error("Exporting timed out after {} seconds", .0.as_secs())] ExportTimedOut(time::Duration), + /// already shutdown error + #[error("TracerProvider already shutdown")] + TracerProviderAlreadyShutdown, + /// Other errors propagated from trace SDK that weren't covered above #[error(transparent)] Other(#[from] Box), diff --git a/opentelemetry/src/trace/noop.rs b/opentelemetry/src/trace/noop.rs index d46a657a2e..4cb8ac4513 100644 --- a/opentelemetry/src/trace/noop.rs +++ b/opentelemetry/src/trace/noop.rs @@ -6,9 +6,9 @@ use crate::{ propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, trace::{self, TraceContextExt as _}, - Context, InstrumentationLibrary, KeyValue, + Context, InstrumentationScope, KeyValue, }; -use std::{borrow::Cow, sync::Arc, time::SystemTime}; +use std::{borrow::Cow, time::SystemTime}; /// A no-op instance of a `TracerProvider`. #[derive(Clone, Debug, Default)] @@ -27,7 +27,7 @@ impl trace::TracerProvider for NoopTracerProvider { type Tracer = NoopTracer; /// Returns a new `NoopTracer` instance. - fn library_tracer(&self, _library: Arc) -> Self::Tracer { + fn tracer_with_scope(&self, _scope: InstrumentationScope) -> Self::Tracer { NoopTracer::new() } } diff --git a/opentelemetry/src/trace/span_context.rs b/opentelemetry/src/trace/span_context.rs index 2e1502299b..151e2416fd 100644 --- a/opentelemetry/src/trace/span_context.rs +++ b/opentelemetry/src/trace/span_context.rs @@ -544,6 +544,7 @@ impl SpanContext { #[cfg(test)] mod tests { use super::*; + use crate::{trace::TraceContextExt, Context}; #[rustfmt::skip] fn trace_id_test_data() -> Vec<(TraceId, &'static str, [u8; 16])> { @@ -647,4 +648,27 @@ mod tests { assert!(trace_state.get("testkey").is_none()); // The original state doesn't change assert_eq!(inserted_trace_state.get("testkey").unwrap(), "testvalue"); // } + + #[test] + fn test_context_span_debug() { + let cx = Context::current(); + assert_eq!( + format!("{:?}", cx), + "Context { span: \"None\", entries: 0 }" + ); + let cx = Context::current().with_remote_span_context(SpanContext::NONE); + assert_eq!( + format!("{:?}", cx), + "Context { \ + span: SpanContext { \ + trace_id: 00000000000000000000000000000000, \ + span_id: 0000000000000000, \ + trace_flags: TraceFlags(0), \ + is_remote: false, \ + trace_state: TraceState(None) \ + }, \ + entries: 1 \ + }" + ); + } } diff --git a/opentelemetry/src/trace/tracer_provider.rs b/opentelemetry/src/trace/tracer_provider.rs index c17c3fdd58..8e48da8796 100644 --- a/opentelemetry/src/trace/tracer_provider.rs +++ b/opentelemetry/src/trace/tracer_provider.rs @@ -1,5 +1,5 @@ -use crate::{trace::Tracer, InstrumentationLibrary, InstrumentationLibraryBuilder, KeyValue}; -use std::{borrow::Cow, sync::Arc}; +use crate::{trace::Tracer, InstrumentationScope}; +use std::borrow::Cow; /// Types that can create instances of [`Tracer`]. /// @@ -27,101 +27,18 @@ pub trait TracerProvider { /// /// // tracer used in applications/binaries /// let tracer = provider.tracer("my_app"); - /// - /// // tracer used in libraries/crates that optionally includes version and schema url - /// let tracer = provider.tracer_builder("my_library"). - /// with_version(env!("CARGO_PKG_VERSION")). - /// with_schema_url("https://opentelemetry.io/schema/1.0.0"). - /// with_attributes([KeyValue::new("key", "value")]). - /// build(); /// ``` fn tracer(&self, name: impl Into>) -> Self::Tracer { - self.tracer_builder(name).build() - } - - /// Deprecated, use [`TracerProvider::tracer_builder()`] - /// - /// Returns a new versioned tracer with a given name. - /// - /// The `name` should be the application name or the name of the library - /// providing instrumentation. If the name is empty, then an - /// implementation-defined default name may be used instead. - /// - /// # Examples - /// - /// ``` - /// use opentelemetry::{global, trace::TracerProvider}; - /// - /// let provider = global::tracer_provider(); - /// - /// // tracer used in applications/binaries - /// let tracer = provider.tracer("my_app"); - /// - /// // tracer used in libraries/crates that optionally includes version and schema url - /// let tracer = provider.versioned_tracer( - /// "my_library", - /// Some(env!("CARGO_PKG_VERSION")), - /// Some("https://opentelemetry.io/schema/1.0.0"), - /// None, - /// ); - /// ``` - #[deprecated(since = "0.23.0", note = "Please use tracer_builder() instead")] - fn versioned_tracer( - &self, - name: impl Into>, - version: Option>>, - schema_url: Option>>, - attributes: Option>, - ) -> Self::Tracer { - let mut builder = self.tracer_builder(name); - if let Some(v) = version { - builder = builder.with_version(v); - } - if let Some(s) = schema_url { - builder = builder.with_version(s); - } - if let Some(a) = attributes { - builder = builder.with_attributes(a); - } - - builder.build() - } - - /// Returns a new builder for creating a [`Tracer`] instance - /// - /// The `name` should be the application name or the name of the library - /// providing instrumentation. If the name is empty, then an - /// implementation-defined default name may be used instead. - /// - /// # Examples - /// - /// ``` - /// use opentelemetry::{global, trace::TracerProvider}; - /// - /// let provider = global::tracer_provider(); - /// - /// // tracer used in applications/binaries - /// let tracer = provider.tracer_builder("my_app").build(); - /// - /// // tracer used in libraries/crates that optionally includes version and schema url - /// let tracer = provider.tracer_builder("my_library") - /// .with_version(env!("CARGO_PKG_VERSION")) - /// .with_schema_url("https://opentelemetry.io/schema/1.0.0") - /// .build(); - /// ``` - fn tracer_builder(&self, name: impl Into>) -> TracerBuilder<'_, Self> { - TracerBuilder { - provider: self, - library_builder: InstrumentationLibrary::builder(name), - } + let scope = InstrumentationScope::builder(name).build(); + self.tracer_with_scope(scope) } - /// Returns a new versioned tracer with the given instrumentation library. + /// Returns a new versioned tracer with the given instrumentation scope. /// /// # Examples /// /// ``` - /// use opentelemetry::{global, InstrumentationLibrary, trace::TracerProvider}; + /// use opentelemetry::{global, InstrumentationScope, trace::TracerProvider}; /// /// let provider = global::tracer_provider(); /// @@ -129,45 +46,13 @@ pub trait TracerProvider { /// let tracer = provider.tracer("my_app"); /// /// // tracer used in libraries/crates that optionally includes version and schema url - /// let library = std::sync::Arc::new( - /// InstrumentationLibrary::builder(env!("CARGO_PKG_NAME")) + /// let scope = + /// InstrumentationScope::builder(env!("CARGO_PKG_NAME")) /// .with_version(env!("CARGO_PKG_VERSION")) /// .with_schema_url("https://opentelemetry.io/schema/1.0.0") - /// .build(), - /// ); + /// .build(); /// - /// let tracer = provider.library_tracer(library); + /// let tracer = provider.tracer_with_scope(scope); /// ``` - fn library_tracer(&self, library: Arc) -> Self::Tracer; -} - -#[derive(Debug)] -pub struct TracerBuilder<'a, T: TracerProvider + ?Sized> { - provider: &'a T, - library_builder: InstrumentationLibraryBuilder, -} - -impl<'a, T: TracerProvider + ?Sized> TracerBuilder<'a, T> { - pub fn with_version(mut self, version: impl Into>) -> Self { - self.library_builder = self.library_builder.with_version(version); - self - } - - pub fn with_schema_url(mut self, schema_url: impl Into>) -> Self { - self.library_builder = self.library_builder.with_schema_url(schema_url); - self - } - - pub fn with_attributes(mut self, attributes: I) -> Self - where - I: IntoIterator, - { - self.library_builder = self.library_builder.with_attributes(attributes); - self - } - - pub fn build(self) -> T::Tracer { - self.provider - .library_tracer(Arc::new(self.library_builder.build())) - } + fn tracer_with_scope(&self, scope: InstrumentationScope) -> Self::Tracer; } diff --git a/scripts/integration_tests.sh b/scripts/integration_tests.sh index 361098a5fc..b984cc023f 100755 --- a/scripts/integration_tests.sh +++ b/scripts/integration_tests.sh @@ -1 +1,37 @@ -cargo test ./opentelemetry-otlp/tests/integration_test/tests -- --ignored +set -e + +TEST_DIR="./opentelemetry-otlp/tests/integration_test/tests" + +if [ -d "$TEST_DIR" ]; then + cd "$TEST_DIR" + + # Run tests with the grpc-tonic feature + echo + echo #### + echo Integration Tests: gRPC Tonic Client + echo #### + echo + cargo test --no-default-features --features "tonic-client","internal-logs" + + # Run tests with the reqwest-client feature + echo + echo #### + echo Integration Tests: Reqwest Client + echo #### + echo + cargo test --no-default-features --features "reqwest-client","internal-logs" + + # TODO - Uncomment the following lines once the reqwest-blocking-client feature is working. + # cargo test --no-default-features --features "reqwest-blocking-client" + + # Run tests with the hyper-client feature + echo + echo #### + echo Integration Tests: Hyper Client + echo #### + echo + cargo test --no-default-features --features "hyper-client","internal-logs" +else + echo "Directory $TEST_DIR does not exist. Skipping tests." + exit 1 +fi diff --git a/scripts/lint.sh b/scripts/lint.sh index 86bd383cc0..cea26cd7e1 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -28,7 +28,7 @@ if rustup component add clippy; then -Dwarnings done - cargo_feature opentelemetry "trace,metrics,logs,logs_level_enabled,testing" + cargo_feature opentelemetry "trace,metrics,logs,spec_unstable_logs_enabled,testing" cargo_feature opentelemetry-otlp "default" cargo_feature opentelemetry-otlp "default,tls" diff --git a/scripts/msrv_config.json b/scripts/msrv_config.json index 321c2afda3..05f9f5615c 100644 --- a/scripts/msrv_config.json +++ b/scripts/msrv_config.json @@ -1,17 +1,14 @@ { - "1.65.0": [ - "opentelemetry/Cargo.toml", - "opentelemetry-sdk/Cargo.toml", - "opentelemetry-stdout/Cargo.toml", - "opentelemetry-http/Cargo.toml", - "opentelemetry-jaeger-propagator/Cargo.toml", - "opentelemetry-zipkin/Cargo.toml", - "opentelemetry-appender-log/Cargo.toml", - "opentelemetry-appender-tracing/Cargo.toml" - ], - "1.70.0": [ - "opentelemetry-otlp/Cargo.toml", - "opentelemetry-proto/Cargo.toml" - ] - } - \ No newline at end of file + "1.75.0": [ + "opentelemetry/Cargo.toml", + "opentelemetry-sdk/Cargo.toml", + "opentelemetry-stdout/Cargo.toml", + "opentelemetry-http/Cargo.toml", + "opentelemetry-jaeger-propagator/Cargo.toml", + "opentelemetry-zipkin/Cargo.toml", + "opentelemetry-appender-log/Cargo.toml", + "opentelemetry-appender-tracing/Cargo.toml", + "opentelemetry-otlp/Cargo.toml", + "opentelemetry-proto/Cargo.toml" + ] +} diff --git a/scripts/patch_dependencies.sh b/scripts/patch_dependencies.sh index ead57c189f..1bd1f05ceb 100755 --- a/scripts/patch_dependencies.sh +++ b/scripts/patch_dependencies.sh @@ -1,14 +1,9 @@ #!/bin/bash function patch_version() { - local latest_version=$(cargo search --limit 1 $1 | head -1 | cut -d'"' -f2) - echo "patching $1 from $latest_version to $2" - cargo update -p $1:$latest_version --precise $2 + local latest_version=$(cargo search --limit 1 $1 | head -1 | cut -d'"' -f2) + echo "patching $1 from $latest_version to $2" + cargo update -p $1:$latest_version --precise $2 } -patch_version cc 1.0.105 -patch_version url 2.5.0 -patch_version hyper-rustls 0.27.2 # 0.27.3 needs rustc v1.70.0 -patch_version tokio-util 0.7.11 # 0.7.12 needs rustc v1.70.0 -patch_version tokio-stream 0.1.15 # 0.1.16 needs rustc v1.70.0 -patch_version tokio 1.38.0 # 1.39 needs msrv bump to rustc 1.70 +patch_version url 2.5.2 #https://github.com/servo/rust-url/issues/992 diff --git a/scripts/test.sh b/scripts/test.sh index dfcb925659..467d5f7c4a 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -2,15 +2,19 @@ set -eu +# +# Using '--lib' skips integration tests +# + echo "Running tests for all packages in workspace with --all-features" -cargo test --workspace --all-features +cargo test --workspace --all-features --lib # See https://github.com/rust-lang/cargo/issues/5364 echo "Running tests for opentelemetry package with --no-default-features" -cargo test --manifest-path=opentelemetry/Cargo.toml --no-default-features +cargo test --manifest-path=opentelemetry/Cargo.toml --no-default-features --lib # Run global tracer provider test in single thread # //TODO: This tests were not running for a while. Need to find out how to run # run them. Using --ignored will run other tests as well, so that cannot be used. # echo "Running global tracer provider for opentelemetry-sdk package with single thread." -# cargo test --manifest-path=opentelemetry-sdk/Cargo.toml --all-features -- --test-threads=1 +# cargo test --manifest-path=opentelemetry-sdk/Cargo.toml --all-features -- --test-threads=1 --lib diff --git a/stress/Cargo.toml b/stress/Cargo.toml index 0591cde7eb..b4b86ba330 100644 --- a/stress/Cargo.toml +++ b/stress/Cargo.toml @@ -43,14 +43,16 @@ doc = false ctrlc = "3.2.5" lazy_static = "1.4.0" num_cpus = "1.15.0" -opentelemetry = { path = "../opentelemetry", features = ["metrics", "logs", "trace", "logs_level_enabled"] } -opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["metrics", "logs", "trace", "logs_level_enabled"] } +opentelemetry = { path = "../opentelemetry", features = ["metrics", "logs", "trace", "spec_unstable_logs_enabled"] } +opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["metrics", "logs", "trace", "spec_unstable_logs_enabled"] } opentelemetry-appender-tracing = { path = "../opentelemetry-appender-tracing"} rand = { version = "0.8.4", features = ["small_rng"] } tracing = { workspace = true, features = ["std"]} tracing-subscriber = { workspace = true, features = ["registry", "std"] } num-format = "0.4.4" -sysinfo = { version = "0.30.12", optional = true } +sysinfo = { version = "0.32", optional = true } +async-trait = "0.1.51" +futures-executor = { workspace = true } [features] -stats = ["sysinfo"] \ No newline at end of file +stats = ["sysinfo"] diff --git a/stress/src/logs.rs b/stress/src/logs.rs index 7744708db9..87d5e2c6ed 100644 --- a/stress/src/logs.rs +++ b/stress/src/logs.rs @@ -6,33 +6,45 @@ ~31 M/sec Hardware: AMD EPYC 7763 64-Core Processor - 2.44 GHz, 16vCPUs, - ~44 M /sec + ~40 M /sec */ -use opentelemetry::InstrumentationLibrary; +use opentelemetry::InstrumentationScope; use opentelemetry_appender_tracing::layer; -use opentelemetry_sdk::logs::{LogProcessor, LoggerProvider}; +use opentelemetry_sdk::export::logs::{LogBatch, LogExporter}; +use opentelemetry_sdk::logs::{LogProcessor, LogRecord, LogResult, LoggerProvider}; use tracing::error; use tracing_subscriber::prelude::*; mod throughput; +use async_trait::async_trait; + +#[derive(Debug, Clone)] +struct MockLogExporter; + +#[async_trait] +impl LogExporter for MockLogExporter { + async fn export(&self, _: LogBatch<'_>) -> LogResult<()> { + LogResult::Ok(()) + } +} #[derive(Debug)] -pub struct NoOpLogProcessor; - -impl LogProcessor for NoOpLogProcessor { - fn emit( - &self, - _record: &mut opentelemetry_sdk::logs::LogRecord, - _library: &InstrumentationLibrary, - ) { +pub struct MockLogProcessor { + exporter: MockLogExporter, +} + +impl LogProcessor for MockLogProcessor { + fn emit(&self, record: &mut opentelemetry_sdk::logs::LogRecord, scope: &InstrumentationScope) { + let log_tuple = &[(record as &LogRecord, scope)]; + let _ = futures_executor::block_on(self.exporter.export(LogBatch::new(log_tuple))); } - fn force_flush(&self) -> opentelemetry::logs::LogResult<()> { + fn force_flush(&self) -> opentelemetry_sdk::logs::LogResult<()> { Ok(()) } - fn shutdown(&self) -> opentelemetry::logs::LogResult<()> { + fn shutdown(&self) -> opentelemetry_sdk::logs::LogResult<()> { Ok(()) } } @@ -40,7 +52,9 @@ impl LogProcessor for NoOpLogProcessor { fn main() { // LoggerProvider with a no-op processor. let provider: LoggerProvider = LoggerProvider::builder() - .with_log_processor(NoOpLogProcessor {}) + .with_log_processor(MockLogProcessor { + exporter: MockLogExporter {}, + }) .build(); // Use the OpenTelemetryTracingBridge to test the throughput of the appender-tracing. diff --git a/stress/src/metrics_counter.rs b/stress/src/metrics_counter.rs index 452907f2bf..d64f2d11f8 100644 --- a/stress/src/metrics_counter.rs +++ b/stress/src/metrics_counter.rs @@ -31,7 +31,7 @@ lazy_static! { "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10" ]; - static ref COUNTER: Counter = PROVIDER.meter("test").u64_counter("hello").init(); + static ref COUNTER: Counter = PROVIDER.meter("test").u64_counter("hello").build(); } thread_local! { diff --git a/stress/src/metrics_gauge.rs b/stress/src/metrics_gauge.rs index 9f01dabb16..d69efb3c4f 100644 --- a/stress/src/metrics_gauge.rs +++ b/stress/src/metrics_gauge.rs @@ -28,7 +28,7 @@ lazy_static! { "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10" ]; - static ref GAUGE: Gauge = PROVIDER.meter("test").u64_gauge("test_gauge").init(); + static ref GAUGE: Gauge = PROVIDER.meter("test").u64_gauge("test_gauge").build(); } thread_local! { diff --git a/stress/src/metrics_histogram.rs b/stress/src/metrics_histogram.rs index e0f469fc33..860d2bdd20 100644 --- a/stress/src/metrics_histogram.rs +++ b/stress/src/metrics_histogram.rs @@ -31,7 +31,7 @@ lazy_static! { "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", "value10" ]; - static ref HISTOGRAM: Histogram = PROVIDER.meter("test").u64_histogram("hello").init(); + static ref HISTOGRAM: Histogram = PROVIDER.meter("test").u64_histogram("hello").build(); } thread_local! { diff --git a/stress/src/metrics_overflow.rs b/stress/src/metrics_overflow.rs index d2e552ed67..bbd79db780 100644 --- a/stress/src/metrics_overflow.rs +++ b/stress/src/metrics_overflow.rs @@ -24,7 +24,7 @@ lazy_static! { static ref PROVIDER: SdkMeterProvider = SdkMeterProvider::builder() .with_reader(ManualReader::builder().build()) .build(); - static ref COUNTER: Counter = PROVIDER.meter("test").u64_counter("hello").init(); + static ref COUNTER: Counter = PROVIDER.meter("test").u64_counter("hello").build(); } thread_local! { diff --git a/stress/src/throughput.rs b/stress/src/throughput.rs index c77832a33c..8116f904ee 100644 --- a/stress/src/throughput.rs +++ b/stress/src/throughput.rs @@ -1,6 +1,7 @@ use num_format::{Locale, ToFormattedString}; +use std::cell::UnsafeCell; use std::env; -use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; @@ -8,14 +9,13 @@ use std::time::{Duration, Instant}; use sysinfo::{Pid, System}; const SLIDING_WINDOW_SIZE: u64 = 2; // In seconds -const BATCH_SIZE: u64 = 1000; static STOP: AtomicBool = AtomicBool::new(false); #[repr(C)] #[derive(Default)] struct WorkerStats { - count: AtomicU64, + count: u64, /// We use a padding for the struct to allow each thread to have exclusive access to each WorkerStat /// Otherwise, there would be some cpu contention with threads needing to take ownership of the cache lines padding: [u64; 15], @@ -58,91 +58,111 @@ where } println!("Number of threads: {}\n", num_threads); - let mut handles = Vec::with_capacity(num_threads); let func_arc = Arc::new(func); let mut worker_stats_vec: Vec = Vec::new(); for _ in 0..num_threads { worker_stats_vec.push(WorkerStats::default()); } - let worker_stats_shared = Arc::new(worker_stats_vec); - let worker_stats_shared_monitor = Arc::clone(&worker_stats_shared); - - let handle_main_thread = thread::spawn(move || { - let mut last_collect_time = Instant::now(); - let mut total_count_old: u64 = 0; - - #[cfg(feature = "stats")] - let pid = Pid::from(std::process::id() as usize); - #[cfg(feature = "stats")] - let mut system = System::new_all(); - - loop { - let current_time = Instant::now(); - let elapsed = current_time.duration_since(last_collect_time).as_secs(); - if elapsed >= SLIDING_WINDOW_SIZE { - let total_count_u64: u64 = worker_stats_shared_monitor - .iter() - .map(|worker_stat| worker_stat.count.load(Ordering::Relaxed)) - .sum(); - last_collect_time = Instant::now(); - let current_count = total_count_u64 - total_count_old; - total_count_old = total_count_u64; - let throughput = current_count / elapsed; - println!( - "Throughput: {} iterations/sec", - throughput.to_formatted_string(&Locale::en) - ); - #[cfg(feature = "stats")] - { - system.refresh_all(); - if let Some(process) = system.process(pid) { - println!( - "Memory usage: {:.2} MB", - process.memory() as f64 / (1024.0 * 1024.0) - ); - println!("CPU usage: {}%", process.cpu_usage() / num_threads as f32); - println!( - "Virtual memory usage: {:.2} MB", - process.virtual_memory() as f64 / (1024.0 * 1024.0) - ); - } else { - println!("Process not found"); + let shared_mutable_stats_slice = UnsafeSlice::new(&mut worker_stats_vec); + + thread::scope(|s| { + s.spawn(|| { + let mut last_collect_time = Instant::now(); + let mut total_count_old: u64 = 0; + + #[cfg(feature = "stats")] + let pid = Pid::from(std::process::id() as usize); + #[cfg(feature = "stats")] + let mut system = System::new_all(); + + loop { + let current_time = Instant::now(); + let elapsed = current_time.duration_since(last_collect_time).as_secs(); + if elapsed >= SLIDING_WINDOW_SIZE { + let total_count_u64 = shared_mutable_stats_slice.sum(); + last_collect_time = Instant::now(); + let current_count = total_count_u64 - total_count_old; + total_count_old = total_count_u64; + let throughput = current_count / elapsed; + println!( + "Throughput: {} iterations/sec", + throughput.to_formatted_string(&Locale::en) + ); + + #[cfg(feature = "stats")] + { + system.refresh_all(); + if let Some(process) = system.process(pid) { + println!( + "Memory usage: {:.2} MB", + process.memory() as f64 / (1024.0 * 1024.0) + ); + println!("CPU usage: {}%", process.cpu_usage() / num_threads as f32); + println!( + "Virtual memory usage: {:.2} MB", + process.virtual_memory() as f64 / (1024.0 * 1024.0) + ); + } else { + println!("Process not found"); + } } + + println!("\n"); } - println!("\n"); - } + if STOP.load(Ordering::SeqCst) { + break; + } - if STOP.load(Ordering::SeqCst) { - break; + thread::sleep(Duration::from_millis(5000)); } + }); - thread::sleep(Duration::from_millis(5000)); + for thread_index in 0..num_threads { + let func_arc_clone = Arc::clone(&func_arc); + s.spawn(move || loop { + func_arc_clone(); + unsafe { + shared_mutable_stats_slice.increment(thread_index); + } + if STOP.load(Ordering::SeqCst) { + break; + } + }); } }); +} - handles.push(handle_main_thread); +#[derive(Copy, Clone)] +struct UnsafeSlice<'a> { + slice: &'a [UnsafeCell], +} - for thread_index in 0..num_threads { - let worker_stats_shared = Arc::clone(&worker_stats_shared); - let func_arc_clone = Arc::clone(&func_arc); - let handle = thread::spawn(move || loop { - for _ in 0..BATCH_SIZE { - func_arc_clone(); - } - worker_stats_shared[thread_index] - .count - .fetch_add(BATCH_SIZE, Ordering::Relaxed); - if STOP.load(Ordering::SeqCst) { - break; - } - }); - handles.push(handle) +unsafe impl<'a> Send for UnsafeSlice<'a> {} +unsafe impl<'a> Sync for UnsafeSlice<'a> {} + +impl<'a> UnsafeSlice<'a> { + fn new(slice: &'a mut [WorkerStats]) -> Self { + let ptr = slice as *mut [WorkerStats] as *const [UnsafeCell]; + Self { + slice: unsafe { &*ptr }, + } + } + + // SAFETY: It's assumed that no two threads will write to the same index at the same time + #[inline(always)] + unsafe fn increment(&self, i: usize) { + let value = self.slice[i].get(); + (*value).count = (*value).count + 1; } - for handle in handles { - handle.join().unwrap(); + #[inline(always)] + fn sum(&self) -> u64 { + self.slice + .iter() + .map(|cell| unsafe { (*cell.get()).count }) + .sum() } } diff --git a/stress/src/traces.rs b/stress/src/traces.rs index 62598b10ad..73b5563c36 100644 --- a/stress/src/traces.rs +++ b/stress/src/traces.rs @@ -23,7 +23,7 @@ mod throughput; lazy_static! { static ref PROVIDER: sdktrace::TracerProvider = sdktrace::TracerProvider::builder() - .with_config(sdktrace::Config::default().with_sampler(sdktrace::Sampler::AlwaysOn)) + .with_sampler(sdktrace::Sampler::AlwaysOn) .with_span_processor(NoOpSpanProcessor {}) .build(); static ref TRACER: sdktrace::Tracer = PROVIDER.tracer("stress");