Skip to content

Commit

Permalink
Merge branch 'main' into dependabot/cargo/prost-types-0.12
Browse files Browse the repository at this point in the history
  • Loading branch information
tobz authored Dec 22, 2023
2 parents c8ba18e + 851c930 commit 041ab6a
Show file tree
Hide file tree
Showing 24 changed files with 1,324 additions and 570 deletions.
20 changes: 18 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,11 @@ jobs:
- name: Install Rust Stable
run: rustup default stable
- name: Install cargo-hack
run: cargo install cargo-hack
uses: taiki-e/install-action@v2
with:
tool: cargo-hack
- name: Check Feature Matrix
run: cargo hack build --all --all-targets --feature-powerset
run: cargo hack check --all --all-targets --feature-powerset --release
test:
name: Test ${{ matrix.rust_version }}
runs-on: ubuntu-latest
Expand Down Expand Up @@ -64,3 +66,17 @@ jobs:
run: rustup default stable
- name: Run Benchmarks
run: cargo bench --all-features --workspace --exclude=metrics-observer
clippy:
name: Clippy ${{ matrix.rust_version }}
runs-on: ubuntu-latest
strategy:
matrix:
rust_version: ['1.61.0', 'stable', 'nightly']
steps:
- uses: actions/checkout@v3
- name: Install Protobuf Compiler
run: sudo apt-get install protobuf-compiler
- name: Install Rust ${{ matrix.rust_version }}
run: rustup default ${{ matrix.rust_version }}
- name: Run Clippy
run: cargo clippy --all-features --workspace --exclude=metrics-observer
10 changes: 10 additions & 0 deletions metrics-exporter-prometheus/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased] - ReleaseDate

### Added

- Support for using HTTPS in Push Gateway mode. ([#392](https://github.com/metrics-rs/metrics/pull/392))

## [0.12.2] - 2023-12-13

### Fixed

- Fixed overflow/underflow panic with time moving backwards ([#423](https://github.com/metrics-rs/metrics/pull/423))

## [0.12.1] - 2023-05-09

### Added
Expand Down
2 changes: 1 addition & 1 deletion metrics-exporter-prometheus/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "metrics-exporter-prometheus"
version = "0.12.1"
version = "0.12.2"
authors = ["Toby Lawrence <[email protected]>"]
edition = "2018"
rust-version = "1.61.0"
Expand Down
4 changes: 3 additions & 1 deletion metrics-exporter-prometheus/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ use hyper::{
http::HeaderValue,
Method, Request, Uri,
};
#[cfg(feature = "push-gateway")]
use hyper_tls::HttpsConnector;

use indexmap::IndexMap;
Expand Down Expand Up @@ -403,6 +404,7 @@ impl PrometheusBuilder {
///
/// If there is an error while building the recorder and exporter, an error variant will be
/// returned describing the error.
#[warn(clippy::too_many_lines)]
#[cfg(any(feature = "http-listener", feature = "push-gateway"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http-listener", feature = "push-gateway"))))]
#[cfg_attr(not(feature = "http-listener"), allow(unused_mut))]
Expand Down Expand Up @@ -500,7 +502,7 @@ impl PrometheusBuilder {
.map(|mut b| b.copy_to_bytes(b.remaining()))
.map(|b| b[..].to_vec())
.and_then(|s| String::from_utf8(s).map_err(|_| ()))
.unwrap_or_else(|_| {
.unwrap_or_else(|()| {
String::from("<failed to read response body>")
});
error!(
Expand Down
77 changes: 22 additions & 55 deletions metrics-exporter-prometheus/src/distribution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ pub enum Distribution {

impl Distribution {
/// Creates a histogram distribution.
#[warn(clippy::missing_panics_doc)]
pub fn new_histogram(buckets: &[f64]) -> Distribution {
let hist = Histogram::new(buckets).expect("buckets should never be empty");
Distribution::Histogram(hist)
Expand Down Expand Up @@ -83,7 +84,7 @@ impl DistributionBuilder {
/// Returns a distribution for the given metric key.
pub fn get_distribution(&self, name: &str) -> Distribution {
if let Some(ref overrides) = self.bucket_overrides {
for (matcher, buckets) in overrides.iter() {
for (matcher, buckets) in overrides {
if matcher.matches(name) {
return Distribution::new_histogram(buckets);
}
Expand All @@ -104,7 +105,7 @@ impl DistributionBuilder {
}

if let Some(ref overrides) = self.bucket_overrides {
for (matcher, _) in overrides.iter() {
for (matcher, _) in overrides {
if matcher.matches(name) {
return "histogram";
}
Expand Down Expand Up @@ -218,15 +219,6 @@ impl RollingSummary {

self.buckets.truncate(self.max_buckets - 1);
self.buckets.insert(0, Bucket { begin, summary });
} else {
begin = reftime - self.bucket_duration;
while now < begin {
begin -= self.bucket_duration;
}

self.buckets.truncate(self.max_buckets - 1);
self.buckets.push(Bucket { begin, summary });
self.buckets.sort_unstable_by(|a, b| b.begin.cmp(&a.begin));
}
}

Expand Down Expand Up @@ -358,58 +350,33 @@ mod tests {
}

#[test]
fn add_to_tail() {
fn add_value_ts_before_first_bucket() {
let (clock, mock) = Clock::mock();
mock.increment(Duration::from_secs(3600));

let mut summary = RollingSummary::default();
summary.add(42.0, clock.now());
let mut expected = Vec::new();
expected.push(clock.now());
mock.decrement(Duration::from_secs(20));
summary.add(42.0, clock.now());
expected.push(clock.now());
mock.increment(Duration::from_secs(4));

let actual: Vec<Instant> = summary.buckets().iter().map(|b| b.begin).collect();
assert_eq!(expected, actual);
}
let bucket_count = NonZeroU32::new(2).unwrap();
let bucket_width = Duration::from_secs(5);

#[test]
fn add_to_tail_with_gap() {
let (clock, mock) = Clock::mock();
mock.increment(Duration::from_secs(3600));
let mut summary = RollingSummary::new(bucket_count, bucket_width);
assert_eq!(0, summary.buckets().len());
assert_eq!(0, summary.count());

let mut summary = RollingSummary::default();
summary.add(42.0, clock.now());
let mut expected = Vec::new();
expected.push(clock.now());
mock.decrement(Duration::from_secs(40));
// Add a single value to create our first bucket.
summary.add(42.0, clock.now());
expected.push(clock.now());

let actual: Vec<Instant> = summary.buckets().iter().map(|b| b.begin).collect();
assert_eq!(expected, actual);
}
// Make sure the value got added.
assert_eq!(1, summary.buckets().len());
assert_eq!(1, summary.count());
assert!(!summary.is_empty());

#[test]
fn add_to_middle_gap() {
let (clock, mock) = Clock::mock();
mock.increment(Duration::from_secs(3600));
// Our first bucket is now marked as begin=4/width=5, so make sure that if we add a version
// with now=3, the count goes up but it's not actually added.
mock.decrement(Duration::from_secs(1));

let mut expected = Vec::new();
expected.resize(3, Instant::now());
summary.add(43.0, clock.now());

let mut summary = RollingSummary::default();
summary.add(42.0, clock.now());
expected[0] = clock.now();
mock.decrement(Duration::from_secs(40));
summary.add(42.0, clock.now());
expected[2] = clock.now();
mock.increment(Duration::from_secs(20));
summary.add(42.0, clock.now());
expected[1] = clock.now();

let actual: Vec<Instant> = summary.buckets().iter().map(|b| b.begin).collect();
assert_eq!(expected, actual);
assert_eq!(1, summary.buckets().len());
assert_eq!(2, summary.count());
assert!(!summary.is_empty());
}
}
2 changes: 1 addition & 1 deletion metrics-exporter-prometheus/src/recorder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ impl Inner {
let mut wg = self.distributions.write().unwrap_or_else(PoisonError::into_inner);
let entry = wg
.entry(name.clone())
.or_insert_with(IndexMap::new)
.or_default()
.entry(labels)
.or_insert_with(|| self.distribution_builder.get_distribution(name.as_str()));

Expand Down
8 changes: 7 additions & 1 deletion metrics-observer/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased] - ReleaseDate

### Fixed

- All addresses returned when trying to connect to the specified exporter endpoint will be tried, in
order, instead of only trying the first and then giving up.
([#429](https://github.com/metrics-rs/metrics/pull/429))

## [0.2.0] - 2023-04-16

### Added

- Update hdrhistogram dependency to 7.2
- Update `hdrhistogram`` dependency to 7.2

### Changed

Expand Down
38 changes: 15 additions & 23 deletions metrics-observer/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,30 +114,22 @@ impl Runner {
let mut state = self.client_state.lock().unwrap();
*state = ClientState::Disconnected(None);
}

// Try to connect to our target and transition into Connected.
let addr = match self.addr.to_socket_addrs() {
Ok(mut addrs) => match addrs.next() {
Some(addr) => addr,
None => {
let mut state = self.client_state.lock().unwrap();
*state = ClientState::Disconnected(Some(
"failed to resolve specified host".to_string(),
));
break;
}
},
Err(_) => {
let mut state = self.client_state.lock().unwrap();
*state = ClientState::Disconnected(Some(
"failed to resolve specified host".to_string(),
));
break;
}
// Resolve the target address.
let Ok(mut addrs) = self.addr.to_socket_addrs() else {
let mut state = self.client_state.lock().unwrap();
*state = ClientState::Disconnected(Some(
"failed to resolve specified host".to_string(),
));
break;
};
match TcpStream::connect_timeout(&addr, Duration::from_secs(3)) {
Ok(stream) => RunnerState::Connected(stream),
Err(_) => RunnerState::ErrorBackoff(
// Some of the resolved addresses may be unreachable (e.g. IPv6).
// Pick the first one that works.
let maybe_stream = addrs.find_map(|addr| {
TcpStream::connect_timeout(&addr, Duration::from_secs(3)).ok()
});
match maybe_stream {
Some(stream) => RunnerState::Connected(stream),
None => RunnerState::ErrorBackoff(
"error while connecting",
Duration::from_secs(3),
),
Expand Down
4 changes: 4 additions & 0 deletions metrics-tracing-context/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased] - ReleaseDate

### Added

- Support for dynamism using `tracing::Span::record` to add label values. ([#408](https://github.com/metrics-rs/metrics/pull/408))

## [0.14.0] - 2023-04-16

### Changed
Expand Down
2 changes: 2 additions & 0 deletions metrics-tracing-context/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ itoa = { version = "1", default-features = false }
metrics = { version = "^0.21", path = "../metrics" }
metrics-util = { version = "^0.15", path = "../metrics-util" }
lockfree-object-pool = { version = "0.1.3", default-features = false }
indexmap = { version = "2.1", default-features = false, features = ["std"] }
once_cell = { version = "1", default-features = false, features = ["std"] }
tracing = { version = "0.1.29", default-features = false }
tracing-core = { version = "0.1.21", default-features = false }
Expand All @@ -42,3 +43,4 @@ criterion = { version = "=0.3.3", default-features = false }
parking_lot = { version = "0.12.1", default-features = false }
tracing = { version = "0.1.29", default-features = false, features = ["std"] }
tracing-subscriber = { version = "0.3.1", default-features = false, features = ["registry"] }
itertools = { version = "0.12.0", default-features = false, features = ["use_std"] }
11 changes: 7 additions & 4 deletions metrics-tracing-context/benches/visit.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
use std::sync::Arc;

use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use indexmap::IndexMap;
use lockfree_object_pool::LinearObjectPool;
use metrics::Label;
use metrics::SharedString;
use metrics_tracing_context::Labels;
use once_cell::sync::OnceCell;
use tracing::Metadata;
Expand All @@ -13,9 +14,11 @@ use tracing_core::{
Callsite, Interest,
};

fn get_pool() -> &'static Arc<LinearObjectPool<Vec<Label>>> {
static POOL: OnceCell<Arc<LinearObjectPool<Vec<Label>>>> = OnceCell::new();
POOL.get_or_init(|| Arc::new(LinearObjectPool::new(|| Vec::new(), |vec| vec.clear())))
type Map = IndexMap<SharedString, SharedString>;

fn get_pool() -> &'static Arc<LinearObjectPool<Map>> {
static POOL: OnceCell<Arc<LinearObjectPool<Map>>> = OnceCell::new();
POOL.get_or_init(|| Arc::new(LinearObjectPool::new(Map::new, Map::clear)))
}

const BATCH_SIZE: usize = 1000;
Expand Down
Loading

0 comments on commit 041ab6a

Please sign in to comment.