Skip to content

Commit

Permalink
Fix failing CI and diable integration test CI
Browse files Browse the repository at this point in the history
  • Loading branch information
Alex Coats committed May 6, 2024
1 parent 57ba866 commit 8bfbe5d
Show file tree
Hide file tree
Showing 7 changed files with 192 additions and 21 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ jobs:
uses: ./.github/workflows/_check.yml
with: { os: ubuntu-latest, rust: stable }

test-int:
name: "integration tests"
uses: ./.github/workflows/_test_int.yml
with: { os: ubuntu-latest, rust: stable, mongodb: "6.0" }
# test-int:
# name: "integration tests"
# uses: ./.github/workflows/_test_int.yml
# with: { os: ubuntu-latest, rust: stable, mongodb: "6.0" }

format:
uses: ./.github/workflows/_fmt.yml
Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ repository = "https://github.com/iotaledger/inx-chronicle"
license = "Apache-2.0"
keywords = ["iota", "storage", "permanode", "chronicle", "inx"]
homepage = "https://www.iota.org"
rust-version = "1.60"
rust-version = "1.67"

[lib]
name = "chronicle"
Expand Down
159 changes: 159 additions & 0 deletions docker/docker-compose-test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
version: "3"
services:
mongo:
image: mongo:latest
container_name: mongo
# Warning: We don't keep logs to make development simpler
command: ["--quiet", "--logpath", "/dev/null"]
volumes:
- ./data/chronicle/mongodb:/data/db
# environment:
# - MONGO_INITDB_ROOT_USERNAME=${MONGODB_USERNAME}
# - MONGO_INITDB_ROOT_PASSWORD=${MONGODB_PASSWORD}
ports:
- 27017:27017

# inx-chronicle:
# container_name: inx-chronicle
# depends_on:
# influx:
# condition: service_started
# build:
# context: ..
# dockerfile: docker/Dockerfile.debug
# image: inx-chronicle:dev
# ports:
# - "8042:8042/tcp" # REST API
# - "9100:9100/tcp" # Metrics
# tty: true
# deploy:
# restart_policy:
# condition: on-failure
# delay: 5s
# max_attempts: 3
# command:
# - "--mongodb-conn-str=${MONGODB_CONN_STR}"
# - "--influxdb-url=http://influx:8086"
# - "--influxdb-username=${INFLUXDB_USERNAME}"
# - "--influxdb-password=${INFLUXDB_PASSWORD}"
# - "--inx-url=http://hornet:9029"
# - "--jwt-password=${JWT_PASSWORD}"
# - "--jwt-salt=${JWT_SALT}"

influx:
image: influxdb:1.8
container_name: influx
volumes:
- ./data/chronicle/influxdb:/var/lib/influxdb
- ./assets/influxdb/init.iql:/docker-entrypoint-initdb.d/influx_init.iql
environment:
- INFLUXDB_ADMIN_USER=${INFLUXDB_USERNAME}
- INFLUXDB_ADMIN_PASSWORD=${INFLUXDB_PASSWORD}
- INFLUXDB_HTTP_AUTH_ENABLED=true
ports:
- 8086:8086

# hornet:
# image: iotaledger/hornet:2.0-rc
# container_name: hornet
# ulimits:
# nofile:
# soft: 8192
# hard: 8192
# stop_grace_period: 5m
# ports:
# - "15600:15600/tcp" # Gossip
# - "14626:14626/udp" # Autopeering
# - "14265:14265/tcp" # REST API
# - "8081:8081/tcp" # Dashboard
# - "8091:8091/tcp" # Faucet
# - "9311:9311/tcp" # Prometheus
# - "9029:9029/tcp" # INX
# cap_drop:
# - ALL
# volumes:
# - ./data/hornet/alphanet/:/app/alphanet
# - ./data/hornet/testnet/:/app/testnet
# - ./data/hornet/shimmer/:/app/shimmer
# - ./config.testnet.hornet.json:/app/config_testnet.json:ro
# - ./config.alphanet.hornet.json:/app/config_alphanet.json:ro
# command:
# # We can connect to the non-default networks by choosing a different Hornet configuration file.
# # - "-c"
# # - "config_testnet.json"
# # - "config_alphanet.json"
# - "--config=${HORNET_CONFIG_PATH}"
# - "--inx.enabled=true"
# - "--inx.bindAddress=hornet:9029"
# - "--prometheus.enabled=true"
# - "--prometheus.bindAddress=0.0.0.0:9311"

################################################################################
# The following services can be enabled by setting the `debug` profile.

mongo-express:
image: mongo-express
depends_on:
- mongo
profiles:
- debug
restart: unless-stopped
ports:
- 8084:8084
environment:
- ME_CONFIG_MONGODB_SERVER=mongo
- ME_CONFIG_MONGODB_PORT=27017
- ME_CONFIG_OPTIONS_READONLY=true
- VCAP_APP_PORT=8084

################################################################################
# The following services can be enabled by setting the `metrics` profile.

prometheus:
image: prom/prometheus:latest
profiles:
- metrics
container_name: prometheus
restart: unless-stopped
user: "65532"
ports:
- 9090:9090
volumes:
- ./data/prometheus/:/prometheus
- ./assets/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- /etc/localtime:/etc/localtime:ro
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--web.external-url=http://localhost:9090"

mongodb-exporter:
image: percona/mongodb_exporter:0.34
profiles:
- metrics
depends_on:
- mongo
container_name: mongodb-exporter
restart: unless-stopped
user: "65532"
ports:
- 9216:9261
command:
- "--mongodb.uri=mongodb://mongo:27017"
- "--mongodb.direct-connect=true"
- "--web.listen-address=:9216"
- "--log.level=info"
- "--discovering-mode"
- "--collect-all"

grafana:
image: grafana/grafana-oss:latest
profiles:
- metrics
container_name: grafana
restart: unless-stopped
user: "65532"
ports:
- 3000:3000
volumes:
- ./data/grafana:/var/lib/grafana
- ./assets/grafana/:/etc/grafana/provisioning/
12 changes: 8 additions & 4 deletions src/bin/inx-chronicle/api/explorer/responses.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
// Copyright 2023 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0

use std::ops::Range;

use chronicle::db::mongodb::collections::{DistributionStat, LedgerUpdateByAddressRecord};
#[cfg(feature = "analytics")]
use chronicle::db::mongodb::collections::DistributionStat;
use chronicle::db::mongodb::collections::LedgerUpdateByAddressRecord;
use iota_sdk::{
types::block::{
address::Bech32Address,
Expand Down Expand Up @@ -153,24 +153,28 @@ pub struct AddressStatDto {
pub balance: u64,
}

#[cfg(feature = "analytics")]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TokenDistributionResponse {
pub distribution: Vec<DistributionStatDto>,
pub ledger_index: SlotIndex,
}

#[cfg(feature = "analytics")]
impl_success_response!(TokenDistributionResponse);

#[cfg(feature = "analytics")]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct DistributionStatDto {
pub range: Range<u64>,
pub range: core::ops::Range<u64>,
pub address_count: String,
#[serde(with = "string")]
pub total_balance: u64,
}

#[cfg(feature = "analytics")]
impl From<DistributionStat> for DistributionStatDto {
fn from(s: DistributionStat) -> Self {
Self {
Expand Down
20 changes: 13 additions & 7 deletions src/bin/inx-chronicle/api/explorer/routes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,12 @@ use axum::{
extract::{Path, State},
routing::get,
};
#[cfg(feature = "analytics")]
use chronicle::db::mongodb::collections::AddressBalanceCollection;
use chronicle::db::{
mongodb::collections::{
AddressBalanceCollection, ApplicationStateCollection, BlockCollection, CommittedSlotCollection,
LedgerUpdateCollection, OutputCollection, ParentsCollection,
ApplicationStateCollection, BlockCollection, CommittedSlotCollection, LedgerUpdateCollection, OutputCollection,
ParentsCollection,
},
MongoDb,
};
Expand All @@ -19,16 +21,20 @@ use iota_sdk::types::block::{
BlockId,
};

#[cfg(feature = "analytics")]
use super::{
extractors::RichestAddressesQuery,
responses::{AddressStatDto, RichestAddressesResponse, TokenDistributionResponse},
};
use super::{
extractors::{
BlocksBySlotCursor, BlocksBySlotIndexPagination, LedgerUpdatesByAddressCursor,
LedgerUpdatesByAddressPagination, LedgerUpdatesBySlotCursor, LedgerUpdatesBySlotPagination,
RichestAddressesQuery, SlotsCursor, SlotsPagination,
LedgerUpdatesByAddressPagination, LedgerUpdatesBySlotCursor, LedgerUpdatesBySlotPagination, SlotsCursor,
SlotsPagination,
},
responses::{
AddressStatDto, Balance, BalanceResponse, BlockChildrenResponse, BlockPayloadTypeDto, BlocksBySlotResponse,
DecayedMana, LedgerUpdateBySlotDto, LedgerUpdatesByAddressResponse, LedgerUpdatesBySlotResponse,
RichestAddressesResponse, SlotDto, SlotsResponse, TokenDistributionResponse,
Balance, BalanceResponse, BlockChildrenResponse, BlockPayloadTypeDto, BlocksBySlotResponse, DecayedMana,
LedgerUpdateBySlotDto, LedgerUpdatesByAddressResponse, LedgerUpdatesBySlotResponse, SlotDto, SlotsResponse,
},
};
use crate::api::{
Expand Down
9 changes: 5 additions & 4 deletions src/bin/inx-chronicle/api/router.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@

//! This `Router` wraps the functionality we use from [`axum::Router`] and tracks the string routes
//! as they are added in a tree node structure. The reason for this ugliness is to provide a routes
//! endpoint which can output a list of unique routes at any depth level. The most critical part of
//! this is the [`Router::into_make_service()`] function, which adds an [`Extension`] containing the
//! root [`RouteNode`]. These routes can also be filtered using a [`RegexSet`] to allow the exclusion
//! of unauthorized routes.
//! endpoint which can output a list of unique routes at any depth level. This router cannot be used
//! directly, instead the underlying axum router must be retrieved using the [`Router::finish()`]
//! method, which returns the root [`RouteNode`]. These routes can also be filtered using a
//! [`RegexSet`] to allow the exclusion of unauthorized routes.
use std::{
collections::{btree_map::Entry, BTreeMap, BTreeSet},
Expand Down Expand Up @@ -132,6 +132,7 @@ where
}
}

#[allow(unused)]
pub fn merge(mut self, other: Router<S>) -> Self {
for (path, node) in other.root.children {
match self.root.children.entry(path) {
Expand Down
3 changes: 2 additions & 1 deletion src/db/mongodb/collections/parents.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ impl MongoDbCollection for ParentsCollection {
}

impl ParentsCollection {
/// Inserts [`Block`]s together with their associated [`BlockMetadata`].
/// Inserts [`Block`](iota_sdk::types::block::Block)s together with their associated
/// [`BlockMetadata`](crate::model::block_metadata::BlockMetadata).
#[instrument(skip_all, err, level = "trace")]
pub async fn insert_blocks<'a, I>(&self, blocks_with_metadata: I) -> Result<(), DbError>
where
Expand Down

0 comments on commit 8bfbe5d

Please sign in to comment.