diff --git a/config/standalone.example.toml b/config/standalone.example.toml index a7a7ea0aedf6..77f4b4c3cdb5 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -218,6 +218,8 @@ parallel_scan_channel_size = 32 # otlp_endpoint = "localhost:4317" # The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0 # tracing_sample_ratio = 1.0 +# Whether to append logs to stdout. Defaults to true. +# append_stdout = true # Standalone export the metrics generated by itself # encoded to Prometheus remote-write format diff --git a/src/common/telemetry/src/logging.rs b/src/common/telemetry/src/logging.rs index f825d8f3835b..0b7c3ba1b818 100644 --- a/src/common/telemetry/src/logging.rs +++ b/src/common/telemetry/src/logging.rs @@ -43,6 +43,7 @@ pub struct LoggingOptions { pub enable_otlp_tracing: bool, pub otlp_endpoint: Option, pub tracing_sample_ratio: Option, + pub append_stdout: bool, } impl PartialEq for LoggingOptions { @@ -52,6 +53,7 @@ impl PartialEq for LoggingOptions { && self.enable_otlp_tracing == other.enable_otlp_tracing && self.otlp_endpoint == other.otlp_endpoint && self.tracing_sample_ratio == other.tracing_sample_ratio + && self.append_stdout == other.append_stdout } } @@ -65,6 +67,7 @@ impl Default for LoggingOptions { enable_otlp_tracing: false, otlp_endpoint: None, tracing_sample_ratio: None, + append_stdout: true, } } } @@ -129,10 +132,14 @@ pub fn init_global_logging( // Enable log compatible layer to convert log record to tracing span. LogTracer::init().expect("log tracer must be valid"); - // Stdout layer. - let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout()); - let stdout_logging_layer = Layer::new().with_writer(stdout_writer); - guards.push(stdout_guard); + let stdout_logging_layer = if opts.append_stdout { + let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout()); + guards.push(stdout_guard); + + Some(Layer::new().with_writer(stdout_writer)) + } else { + None + }; // JSON log layer. let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name); @@ -184,7 +191,7 @@ pub fn init_global_logging( None }; - let stdout_logging_layer = stdout_logging_layer.with_filter(filter.clone()); + let stdout_logging_layer = stdout_logging_layer.map(|x| x.with_filter(filter.clone())); let file_logging_layer = file_logging_layer.with_filter(filter); diff --git a/src/operator/src/error.rs b/src/operator/src/error.rs index e96f1aaa21fe..52956e8055f9 100644 --- a/src/operator/src/error.rs +++ b/src/operator/src/error.rs @@ -483,6 +483,12 @@ pub enum Error { location: Location, source: query::error::Error, }, + + #[snafu(display("Invalid table name: {}", table_name))] + InvalidTableName { + table_name: String, + location: Location, + }, } pub type Result = std::result::Result; @@ -507,7 +513,8 @@ impl ErrorExt for Error { | Error::InvalidPartitionColumns { .. } | Error::PrepareFileTable { .. } | Error::InferFileTableSchema { .. } - | Error::SchemaIncompatible { .. } => StatusCode::InvalidArguments, + | Error::SchemaIncompatible { .. } + | Error::InvalidTableName { .. } => StatusCode::InvalidArguments, Error::TableAlreadyExists { .. } => StatusCode::TableAlreadyExists, diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs index 620e3de6445d..43fdf23a4f5b 100644 --- a/src/operator/src/statement/ddl.rs +++ b/src/operator/src/statement/ddl.rs @@ -50,8 +50,8 @@ use table::TableRef; use super::StatementExecutor; use crate::error::{ self, AlterExprToRequestSnafu, CatalogSnafu, ColumnDataTypeSnafu, ColumnNotFoundSnafu, - DeserializePartitionSnafu, InvalidPartitionColumnsSnafu, ParseSqlSnafu, Result, - SchemaNotFoundSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu, + DeserializePartitionSnafu, InvalidPartitionColumnsSnafu, InvalidTableNameSnafu, ParseSqlSnafu, + Result, SchemaNotFoundSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu, UnrecognizedTableOptionSnafu, }; use crate::expr_factory; @@ -131,8 +131,8 @@ impl StatementExecutor { ensure!( NAME_PATTERN_REG.is_match(&create_table.table_name), - error::UnexpectedSnafu { - violated: format!("Invalid table name: {}", create_table.table_name) + InvalidTableNameSnafu { + table_name: create_table.table_name.clone(), } ); diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs index d73fcbe91397..70b4401c9a73 100644 --- a/src/servers/src/error.rs +++ b/src/servers/src/error.rs @@ -24,7 +24,7 @@ use catalog; use common_error::ext::{BoxedError, ErrorExt}; use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; -use common_telemetry::logging; +use common_telemetry::{debug, error}; use datatypes::prelude::ConcreteDataType; use query::parser::PromQuery; use serde_json::json; @@ -620,7 +620,11 @@ impl IntoResponse for Error { | Error::InvalidQuery { .. } | Error::TimePrecision { .. } => HttpStatusCode::BAD_REQUEST, _ => { - logging::error!(self; "Failed to handle HTTP request"); + if self.status_code().should_log_error() { + error!(self; "Failed to handle HTTP request: "); + } else { + debug!("Failed to handle HTTP request: {self}"); + } HttpStatusCode::INTERNAL_SERVER_ERROR } diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index 0decb3821951..9341ba5f09ce 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -747,6 +747,7 @@ enable = true [frontend.logging] enable_otlp_tracing = false +append_stdout = true [frontend.datanode.client] timeout = "10s" @@ -815,6 +816,7 @@ parallel_scan_channel_size = 32 [datanode.logging] enable_otlp_tracing = false +append_stdout = true [datanode.export_metrics] enable = false @@ -825,6 +827,7 @@ write_interval = "30s" [logging] enable_otlp_tracing = false +append_stdout = true [wal_meta] provider = "raft_engine""#, diff --git a/tests/cases/standalone/common/create/create.result b/tests/cases/standalone/common/create/create.result index 08e4b658de2b..436cbfb393db 100644 --- a/tests/cases/standalone/common/create/create.result +++ b/tests/cases/standalone/common/create/create.result @@ -52,7 +52,7 @@ Error: 4000(TableAlreadyExists), Table already exists: `greptime.public.test2` CREATE TABLE 'N.~' (i TIMESTAMP TIME INDEX); -Error: 1002(Unexpected), Unexpected, violated: Invalid table name: N.~ +Error: 1004(InvalidArguments), Invalid table name: N.~ DESC TABLE integers; diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs index b5218979821a..1bd7ad36496a 100644 --- a/tests/runner/src/env.rs +++ b/tests/runner/src/env.rs @@ -190,7 +190,7 @@ impl Env { "start".to_string(), "-c".to_string(), self.generate_config_file(subcommand, db_ctx), - "--http-addr=127.0.0.1:5001".to_string(), + "--http-addr=127.0.0.1:5002".to_string(), ]; (args, SERVER_ADDR.to_string()) } @@ -213,7 +213,7 @@ impl Env { "true".to_string(), "--enable-region-failover".to_string(), "false".to_string(), - "--http-addr=127.0.0.1:5001".to_string(), + "--http-addr=127.0.0.1:5002".to_string(), ]; (args, METASRV_ADDR.to_string()) }