diff --git a/config/standalone.example.toml b/config/standalone.example.toml index a7a7ea0aedf6..77f4b4c3cdb5 100644 --- a/config/standalone.example.toml +++ b/config/standalone.example.toml @@ -218,6 +218,8 @@ parallel_scan_channel_size = 32 # otlp_endpoint = "localhost:4317" # The percentage of tracing will be sampled and exported. Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1. ratio > 1 are treated as 1. Fractions < 0 are treated as 0 # tracing_sample_ratio = 1.0 +# Whether to append logs to stdout. Defaults to true. +# append_stdout = true # Standalone export the metrics generated by itself # encoded to Prometheus remote-write format diff --git a/src/common/telemetry/src/logging.rs b/src/common/telemetry/src/logging.rs index f825d8f3835b..0b7c3ba1b818 100644 --- a/src/common/telemetry/src/logging.rs +++ b/src/common/telemetry/src/logging.rs @@ -43,6 +43,7 @@ pub struct LoggingOptions { pub enable_otlp_tracing: bool, pub otlp_endpoint: Option, pub tracing_sample_ratio: Option, + pub append_stdout: bool, } impl PartialEq for LoggingOptions { @@ -52,6 +53,7 @@ impl PartialEq for LoggingOptions { && self.enable_otlp_tracing == other.enable_otlp_tracing && self.otlp_endpoint == other.otlp_endpoint && self.tracing_sample_ratio == other.tracing_sample_ratio + && self.append_stdout == other.append_stdout } } @@ -65,6 +67,7 @@ impl Default for LoggingOptions { enable_otlp_tracing: false, otlp_endpoint: None, tracing_sample_ratio: None, + append_stdout: true, } } } @@ -129,10 +132,14 @@ pub fn init_global_logging( // Enable log compatible layer to convert log record to tracing span. LogTracer::init().expect("log tracer must be valid"); - // Stdout layer. - let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout()); - let stdout_logging_layer = Layer::new().with_writer(stdout_writer); - guards.push(stdout_guard); + let stdout_logging_layer = if opts.append_stdout { + let (stdout_writer, stdout_guard) = tracing_appender::non_blocking(std::io::stdout()); + guards.push(stdout_guard); + + Some(Layer::new().with_writer(stdout_writer)) + } else { + None + }; // JSON log layer. let rolling_appender = RollingFileAppender::new(Rotation::HOURLY, dir, app_name); @@ -184,7 +191,7 @@ pub fn init_global_logging( None }; - let stdout_logging_layer = stdout_logging_layer.with_filter(filter.clone()); + let stdout_logging_layer = stdout_logging_layer.map(|x| x.with_filter(filter.clone())); let file_logging_layer = file_logging_layer.with_filter(filter); diff --git a/src/operator/src/error.rs b/src/operator/src/error.rs index e96f1aaa21fe..52956e8055f9 100644 --- a/src/operator/src/error.rs +++ b/src/operator/src/error.rs @@ -483,6 +483,12 @@ pub enum Error { location: Location, source: query::error::Error, }, + + #[snafu(display("Invalid table name: {}", table_name))] + InvalidTableName { + table_name: String, + location: Location, + }, } pub type Result = std::result::Result; @@ -507,7 +513,8 @@ impl ErrorExt for Error { | Error::InvalidPartitionColumns { .. } | Error::PrepareFileTable { .. } | Error::InferFileTableSchema { .. } - | Error::SchemaIncompatible { .. } => StatusCode::InvalidArguments, + | Error::SchemaIncompatible { .. } + | Error::InvalidTableName { .. } => StatusCode::InvalidArguments, Error::TableAlreadyExists { .. } => StatusCode::TableAlreadyExists, diff --git a/src/operator/src/insert.rs b/src/operator/src/insert.rs index a17839ed64ae..6ed2528a242b 100644 --- a/src/operator/src/insert.rs +++ b/src/operator/src/insert.rs @@ -27,7 +27,7 @@ use common_meta::datanode_manager::{AffectedRows, DatanodeManagerRef}; use common_meta::peer::Peer; use common_query::Output; use common_telemetry::tracing_context::TracingContext; -use common_telemetry::{error, info}; +use common_telemetry::{debug, error, info}; use datatypes::schema::Schema; use futures_util::future; use meter_macros::write_meter; @@ -301,7 +301,7 @@ impl Inserter { let request_schema = req.rows.as_ref().unwrap().schema.as_slice(); let create_table_expr = &mut build_create_table_expr(&table_ref, request_schema)?; - info!( + debug!( "Table {}.{}.{} does not exist, try create table", table_ref.catalog, table_ref.schema, table_ref.table, ); @@ -320,7 +320,7 @@ impl Inserter { Ok(()) } Err(err) => { - error!( + debug!( "Failed to create table {}.{}.{}: {}", table_ref.catalog, table_ref.schema, table_ref.table, err ); diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs index 620e3de6445d..43fdf23a4f5b 100644 --- a/src/operator/src/statement/ddl.rs +++ b/src/operator/src/statement/ddl.rs @@ -50,8 +50,8 @@ use table::TableRef; use super::StatementExecutor; use crate::error::{ self, AlterExprToRequestSnafu, CatalogSnafu, ColumnDataTypeSnafu, ColumnNotFoundSnafu, - DeserializePartitionSnafu, InvalidPartitionColumnsSnafu, ParseSqlSnafu, Result, - SchemaNotFoundSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu, + DeserializePartitionSnafu, InvalidPartitionColumnsSnafu, InvalidTableNameSnafu, ParseSqlSnafu, + Result, SchemaNotFoundSnafu, TableMetadataManagerSnafu, TableNotFoundSnafu, UnrecognizedTableOptionSnafu, }; use crate::expr_factory; @@ -131,8 +131,8 @@ impl StatementExecutor { ensure!( NAME_PATTERN_REG.is_match(&create_table.table_name), - error::UnexpectedSnafu { - violated: format!("Invalid table name: {}", create_table.table_name) + InvalidTableNameSnafu { + table_name: create_table.table_name.clone(), } ); diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs index d73fcbe91397..70b4401c9a73 100644 --- a/src/servers/src/error.rs +++ b/src/servers/src/error.rs @@ -24,7 +24,7 @@ use catalog; use common_error::ext::{BoxedError, ErrorExt}; use common_error::status_code::StatusCode; use common_macro::stack_trace_debug; -use common_telemetry::logging; +use common_telemetry::{debug, error}; use datatypes::prelude::ConcreteDataType; use query::parser::PromQuery; use serde_json::json; @@ -620,7 +620,11 @@ impl IntoResponse for Error { | Error::InvalidQuery { .. } | Error::TimePrecision { .. } => HttpStatusCode::BAD_REQUEST, _ => { - logging::error!(self; "Failed to handle HTTP request"); + if self.status_code().should_log_error() { + error!(self; "Failed to handle HTTP request: "); + } else { + debug!("Failed to handle HTTP request: {self}"); + } HttpStatusCode::INTERNAL_SERVER_ERROR } diff --git a/src/servers/src/http.rs b/src/servers/src/http.rs index a390f406d778..c4f9cfad57e2 100644 --- a/src/servers/src/http.rs +++ b/src/servers/src/http.rs @@ -49,6 +49,7 @@ use common_error::status_code::StatusCode; use common_query::Output; use common_recordbatch::{util, RecordBatch}; use common_telemetry::logging::{debug, error, info}; +use common_telemetry::tracing::Level; use common_time::timestamp::TimeUnit; use common_time::Timestamp; use datatypes::data_type::DataType; @@ -61,7 +62,7 @@ use tokio::sync::oneshot::{self, Sender}; use tokio::sync::Mutex; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; -use tower_http::trace::TraceLayer; +use tower_http::trace::{DefaultOnFailure, TraceLayer}; use self::authorize::AuthState; use crate::configurator::ConfiguratorRef; @@ -710,7 +711,10 @@ impl HttpServer { .layer( ServiceBuilder::new() .layer(HandleErrorLayer::new(handle_error)) - .layer(TraceLayer::new_for_http()) + .layer( + TraceLayer::new_for_http() + .on_failure(DefaultOnFailure::new().level(Level::DEBUG)), + ) .layer(TimeoutLayer::new(self.options.timeout)) .layer(DefaultBodyLimit::max( self.options diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index 0decb3821951..9341ba5f09ce 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -747,6 +747,7 @@ enable = true [frontend.logging] enable_otlp_tracing = false +append_stdout = true [frontend.datanode.client] timeout = "10s" @@ -815,6 +816,7 @@ parallel_scan_channel_size = 32 [datanode.logging] enable_otlp_tracing = false +append_stdout = true [datanode.export_metrics] enable = false @@ -825,6 +827,7 @@ write_interval = "30s" [logging] enable_otlp_tracing = false +append_stdout = true [wal_meta] provider = "raft_engine""#,