Skip to content

Commit

Permalink
fix: resolve conflicts with develop branch
Browse files Browse the repository at this point in the history
  • Loading branch information
niebayes committed Dec 22, 2023
2 parents dafd3f5 + 43f01cc commit 5ee37f3
Show file tree
Hide file tree
Showing 71 changed files with 2,715 additions and 324 deletions.
19 changes: 19 additions & 0 deletions .github/workflows/user-doc-label-checker.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
name: Check user doc labels
on:
pull_request:
types:
- opened
- reopened
- labeled
- unlabeled

jobs:

check_labels:
name: Check doc labels
runs-on: ubuntu-latest
steps:
- uses: docker://agilepathway/pull-request-label-checker:latest
with:
one_of: Doc update required,Doc not needed
repo_token: ${{ secrets.GITHUB_TOKEN }}
3 changes: 3 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

20 changes: 20 additions & 0 deletions config/datanode.example.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ rpc_runtime_size = 8
# It will block the datanode start if it can't receive leases in the heartbeat from metasrv.
require_lease_before_startup = false

# Initialize all regions in the background during the startup.
# By default, it provides services after all regions have been initialized.
initialize_region_in_background = false

[heartbeat]
# Interval for sending heartbeat messages to the Metasrv, 3 seconds by default.
interval = "3s"
Expand Down Expand Up @@ -123,3 +127,19 @@ parallel_scan_channel_size = 32
# [logging]
# dir = "/tmp/greptimedb/logs"
# level = "info"

# Datanode export the metrics generated by itself
# encoded to Prometheus remote-write format
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
# [export_metrics]
# whether enable export metrics, default is false
# enable = false
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
# endpoint = "127.0.0.1:4000"
# The database name of exported metrics stores, user needs to specify a valid database
# db = ""
# The interval of export metrics
# write_interval = "30s"
# HTTP headers of Prometheus remote-write carry
# headers = {}
16 changes: 16 additions & 0 deletions config/frontend.example.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,3 +77,19 @@ tcp_nodelay = true
timeout = "10s"
connect_timeout = "10s"
tcp_nodelay = true

# Frontend export the metrics generated by itself
# encoded to Prometheus remote-write format
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
# [export_metrics]
# whether enable export metrics, default is false
# enable = false
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
# endpoint = "127.0.0.1:4000"
# The database name of exported metrics stores, user needs to specify a valid database
# db = ""
# The interval of export metrics
# write_interval = "30s"
# HTTP headers of Prometheus remote-write carry
# headers = {}
16 changes: 16 additions & 0 deletions config/metasrv.example.toml
Original file line number Diff line number Diff line change
Expand Up @@ -76,3 +76,19 @@ provider = "raft_engine"
# backoff_base = 2.0
# Stop reconnecting if the total wait time reaches the deadline. If this config is missing, the reconnecting won't terminate.
# backoff_deadline = "5mins"

# Metasrv export the metrics generated by itself
# encoded to Prometheus remote-write format
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
# [export_metrics]
# whether enable export metrics, default is false
# enable = false
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
# endpoint = "127.0.0.1:4000"
# The database name of exported metrics stores, user needs to specify a valid database
# db = ""
# The interval of export metrics
# write_interval = "30s"
# HTTP headers of Prometheus remote-write carry
# headers = {}
16 changes: 16 additions & 0 deletions config/standalone.example.toml
Original file line number Diff line number Diff line change
Expand Up @@ -184,3 +184,19 @@ parallel_scan_channel_size = 32
# otlp_endpoint = "localhost:4317"
# The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0
# tracing_sample_ratio = 1.0

# Standalone export the metrics generated by itself
# encoded to Prometheus remote-write format
# and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
# This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
# [export_metrics]
# whether enable export metrics, default is false
# enable = false
# The url of metrics export endpoint, default is `frontend` default HTTP endpoint.
# endpoint = "127.0.0.1:4000"
# The database name of exported metrics stores, user needs to specify a valid database
# db = ""
# The interval of export metrics
# write_interval = "30s"
# HTTP headers of Prometheus remote-write carry
# headers = {}
1 change: 1 addition & 0 deletions src/catalog/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ arc-swap = "1.0"
arrow-schema.workspace = true
async-stream.workspace = true
async-trait = "0.1"
build-data = "0.1"
common-catalog.workspace = true
common-error.workspace = true
common-grpc.workspace = true
Expand Down
4 changes: 3 additions & 1 deletion src/catalog/src/information_schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ lazy_static! {
static ref MEMORY_TABLES: &'static [&'static str] = &[
ENGINES,
COLUMN_PRIVILEGES,
COLUMN_STATISTICS
COLUMN_STATISTICS,
BUILD_INFO,
];
}

Expand Down Expand Up @@ -154,6 +155,7 @@ impl InformationSchemaProvider {
ENGINES => setup_memory_table!(ENGINES),
COLUMN_PRIVILEGES => setup_memory_table!(COLUMN_PRIVILEGES),
COLUMN_STATISTICS => setup_memory_table!(COLUMN_STATISTICS),
BUILD_INFO => setup_memory_table!(BUILD_INFO),
_ => None,
}
}
Expand Down
27 changes: 27 additions & 0 deletions src/catalog/src/information_schema/memory_table/tables.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ use datatypes::vectors::StringVector;

use crate::information_schema::table_names::*;

const UNKNOWN: &str = "unknown";

/// Find the schema and columns by the table_name, only valid for memory tables.
/// Safety: the user MUST ensure the table schema exists, panic otherwise.
pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
Expand Down Expand Up @@ -70,6 +72,31 @@ pub fn get_schema_columns(table_name: &str) -> (SchemaRef, Vec<VectorRef>) {
],
),

BUILD_INFO => (
string_columns(&[
"GIT_BRANCH",
"GIT_COMMIT",
"GIT_COMMIT_SHORT",
"GIT_DIRTY",
"PKG_VERSION",
]),
vec![
Arc::new(StringVector::from(vec![
build_data::get_git_branch().unwrap_or_else(|_| UNKNOWN.to_string())
])),
Arc::new(StringVector::from(vec![
build_data::get_git_commit().unwrap_or_else(|_| UNKNOWN.to_string())
])),
Arc::new(StringVector::from(vec![
build_data::get_git_commit_short().unwrap_or_else(|_| UNKNOWN.to_string())
])),
Arc::new(StringVector::from(vec![
build_data::get_git_dirty().map_or(UNKNOWN.to_string(), |v| v.to_string())
])),
Arc::new(StringVector::from(vec![option_env!("CARGO_PKG_VERSION")])),
],
),

_ => unreachable!("Unknown table in information_schema: {}", table_name),
};

Expand Down
1 change: 1 addition & 0 deletions src/catalog/src/information_schema/table_names.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,4 @@ pub const COLUMNS: &str = "columns";
pub const ENGINES: &str = "engines";
pub const COLUMN_PRIVILEGES: &str = "column_privileges";
pub const COLUMN_STATISTICS: &str = "column_statistics";
pub const BUILD_INFO: &str = "build_info";
10 changes: 9 additions & 1 deletion src/catalog/src/memory/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};

use common_catalog::build_db_string;
use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME};
use common_catalog::consts::{
DEFAULT_CATALOG_NAME, DEFAULT_PRIVATE_SCHEMA_NAME, DEFAULT_SCHEMA_NAME, INFORMATION_SCHEMA_NAME,
};
use snafu::OptionExt;
use table::TableRef;

Expand Down Expand Up @@ -135,6 +137,12 @@ impl MemoryCatalogManager {
schema: DEFAULT_SCHEMA_NAME.to_string(),
})
.unwrap();
manager
.register_schema_sync(RegisterSchemaRequest {
catalog: DEFAULT_CATALOG_NAME.to_string(),
schema: DEFAULT_PRIVATE_SCHEMA_NAME.to_string(),
})
.unwrap();

manager
}
Expand Down
4 changes: 4 additions & 0 deletions src/cmd/src/frontend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,10 @@ impl StartCommand {
.await
.context(StartFrontendSnafu)?;

instance
.build_export_metrics_task(&opts.export_metrics)
.context(StartFrontendSnafu)?;

instance
.build_servers(opts)
.await
Expand Down
19 changes: 18 additions & 1 deletion src/cmd/src/standalone.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ use frontend::service_config::{
};
use mito2::config::MitoConfig;
use serde::{Deserialize, Serialize};
use servers::export_metrics::ExportMetricsOption;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
use servers::Mode;
Expand Down Expand Up @@ -111,6 +112,7 @@ pub struct StandaloneOptions {
pub user_provider: Option<String>,
/// Options for different store engines.
pub region_engine: Vec<RegionEngineConfig>,
pub export_metrics: ExportMetricsOption,
}

impl Default for StandaloneOptions {
Expand All @@ -130,6 +132,7 @@ impl Default for StandaloneOptions {
metadata_store: KvBackendConfig::default(),
procedure: ProcedureConfig::default(),
logging: LoggingOptions::default(),
export_metrics: ExportMetricsOption::default(),
user_provider: None,
region_engine: vec![
RegionEngineConfig::Mito(MitoConfig::default()),
Expand All @@ -153,6 +156,8 @@ impl StandaloneOptions {
meta_client: None,
logging: self.logging,
user_provider: self.user_provider,
// Handle the export metrics task run by standalone to frontend for execution
export_metrics: self.export_metrics,
..Default::default()
}
}
Expand Down Expand Up @@ -254,12 +259,20 @@ pub struct StartCommand {

impl StartCommand {
fn load_options(&self, cli_options: &CliOptions) -> Result<Options> {
let mut opts: StandaloneOptions = Options::load_layered_options(
let opts: StandaloneOptions = Options::load_layered_options(
self.config_file.as_deref(),
self.env_prefix.as_ref(),
None,
)?;

self.convert_options(cli_options, opts)
}

pub fn convert_options(
&self,
cli_options: &CliOptions,
mut opts: StandaloneOptions,
) -> Result<Options> {
opts.mode = Mode::Standalone;

if let Some(dir) = &cli_options.log_dir {
Expand Down Expand Up @@ -403,6 +416,10 @@ impl StartCommand {
.await
.context(StartFrontendSnafu)?;

frontend
.build_export_metrics_task(&opts.frontend.export_metrics)
.context(StartFrontendSnafu)?;

frontend
.build_servers(opts)
.await
Expand Down
3 changes: 3 additions & 0 deletions src/common/catalog/src/consts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ pub const INFORMATION_SCHEMA_NAME: &str = "information_schema";
pub const SYSTEM_CATALOG_TABLE_NAME: &str = "system_catalog";
pub const DEFAULT_CATALOG_NAME: &str = "greptime";
pub const DEFAULT_SCHEMA_NAME: &str = "public";
pub const DEFAULT_PRIVATE_SCHEMA_NAME: &str = "greptime_private";

/// Reserves [0,MIN_USER_TABLE_ID) for internal usage.
/// User defined table id starts from this value.
Expand All @@ -41,6 +42,8 @@ pub const INFORMATION_SCHEMA_ENGINES_TABLE_ID: u32 = 5;
pub const INFORMATION_SCHEMA_COLUMN_PRIVILEGES_TABLE_ID: u32 = 6;
/// id for information_schema.column_statistics
pub const INFORMATION_SCHEMA_COLUMN_STATISTICS_TABLE_ID: u32 = 7;
/// id for information_schema.build_info
pub const INFORMATION_SCHEMA_BUILD_INFO_TABLE_ID: u32 = 8;
/// ----- End of information_schema tables -----
pub const MITO_ENGINE: &str = "mito";
Expand Down
12 changes: 11 additions & 1 deletion src/common/error/src/status_code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@ pub enum StatusCode {
RegionNotFound = 4005,
RegionAlreadyExists = 4006,
RegionReadonly = 4007,
RegionNotReady = 4008,
// If mutually exclusive operations are reached at the same time,
// only one can be executed, another one will get region busy.
RegionBusy = 4009,
// ====== End of catalog related status code =======

// ====== Begin of storage related status code =====
Expand Down Expand Up @@ -103,7 +107,9 @@ impl StatusCode {
match self {
StatusCode::StorageUnavailable
| StatusCode::RuntimeResourcesExhausted
| StatusCode::Internal => true,
| StatusCode::Internal
| StatusCode::RegionNotReady
| StatusCode::RegionBusy => true,

StatusCode::Success
| StatusCode::Unknown
Expand Down Expand Up @@ -152,6 +158,8 @@ impl StatusCode {
| StatusCode::TableAlreadyExists
| StatusCode::TableNotFound
| StatusCode::RegionNotFound
| StatusCode::RegionNotReady
| StatusCode::RegionBusy
| StatusCode::RegionAlreadyExists
| StatusCode::RegionReadonly
| StatusCode::TableColumnNotFound
Expand Down Expand Up @@ -183,6 +191,8 @@ impl StatusCode {
v if v == StatusCode::TableAlreadyExists as u32 => Some(StatusCode::TableAlreadyExists),
v if v == StatusCode::TableNotFound as u32 => Some(StatusCode::TableNotFound),
v if v == StatusCode::RegionNotFound as u32 => Some(StatusCode::RegionNotFound),
v if v == StatusCode::RegionNotReady as u32 => Some(StatusCode::RegionNotReady),
v if v == StatusCode::RegionBusy as u32 => Some(StatusCode::RegionBusy),
v if v == StatusCode::RegionAlreadyExists as u32 => {
Some(StatusCode::RegionAlreadyExists)
}
Expand Down
7 changes: 2 additions & 5 deletions src/common/meta/src/ddl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,15 @@
use std::collections::HashMap;
use std::sync::Arc;

use api::v1::meta::Partition;
use common_telemetry::tracing_context::W3cTrace;
use store_api::storage::{RegionNumber, TableId};
use table::metadata::RawTableInfo;

use crate::cache_invalidator::CacheInvalidatorRef;
use crate::datanode_manager::DatanodeManagerRef;
use crate::error::Result;
use crate::key::TableMetadataManagerRef;
use crate::region_keeper::MemoryRegionKeeperRef;
use crate::rpc::ddl::{SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use crate::rpc::ddl::{CreateTableTask, SubmitDdlTaskRequest, SubmitDdlTaskResponse};
use crate::rpc::router::RegionRoute;

pub mod alter_table;
Expand Down Expand Up @@ -71,8 +69,7 @@ pub trait TableMetadataAllocator: Send + Sync {
async fn create(
&self,
ctx: &TableMetadataAllocatorContext,
table_info: &mut RawTableInfo,
partitions: &[Partition],
task: &CreateTableTask,
) -> Result<TableMetadata>;
}

Expand Down
Loading

0 comments on commit 5ee37f3

Please sign in to comment.