Skip to content

Commit

Permalink
chore: bump typos version and fix typos (#10342)
Browse files Browse the repository at this point in the history
  • Loading branch information
xxchan authored Jun 15, 2023
1 parent 5cf94c9 commit a164ab7
Show file tree
Hide file tree
Showing 18 changed files with 34 additions and 31 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/typo.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ jobs:
uses: actions/checkout@v3

- name: Check spelling of the entire repository
uses: crate-ci/typos@v1.14.12
uses: crate-ci/typos@v1.15.0
19 changes: 11 additions & 8 deletions .typos.toml
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
[default.extend-words]
indexs = "indices"
Stichting = "Stichting"
fo = "fo"
FPR = "FPR"
indexs = "indices" # Both are valid, just pick one.
Stichting = "Stichting" # This is Dutch for "Foundation". From DuckDB.
FPR = "FPR" # False Positive Rate
inout = "inout" # This is a SQL keyword!
numer = "numer" # numerator
nd = "nd" # N-dimentional / 2nd
steam = "stream" # You played with Steam games too much.
# Some weird short variable names
ot = "ot"
inout = "inout"
numer = "numer"
nd = "nd"
steam = "stream"
bui = "bui"

[default.extend-identifiers]

[files]
extend-exclude = [
Expand Down
2 changes: 1 addition & 1 deletion Makefile.toml
Original file line number Diff line number Diff line change
Expand Up @@ -922,7 +922,7 @@ fi
private = true
category = "RiseDev - Check"
description = "Run cargo typos-cli check"
install_crate = { min_version = "1.14.8", crate_name = "typos-cli", binary = "typos", test_arg = [
install_crate = { min_version = "1.15.0", crate_name = "typos-cli", binary = "typos", test_arg = [
"--help",
], install_command = "binstall" }
script = """
Expand Down
8 changes: 4 additions & 4 deletions integration_tests/datagen/sink/sink.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,15 @@ type SinkRecord interface {
// Convert the event to an INSERT INTO command.
ToPostgresSql() string

// Convert the event to a Kakfa message in JSON format.
// Convert the event to a Kafka message in JSON format.
// This interface will also be used for Pulsar and Kinesis.
ToJson() (topic string, key string, data []byte)

// Convert the event to a Kakfa message in Protobuf format.
// Convert the event to a Kafka message in Protobuf format.
// This interface will also be used for Pulsar and Kinesis.
ToProtobuf() (topic string, key string, data []byte)

// Convert the event to a Kakfa message in Avro format.
// Convert the event to a Kafka message in Avro format.
// This interface will also be used for Pulsar and Kinesis.
ToAvro() (topic string, key string, data []byte)
}
Expand All @@ -40,7 +40,7 @@ func (r BaseSinkRecord) ToAvro() (topic string, key string, data []byte) {
panic("not implemented")
}

// Convert the event to a Kakfa message in the given format.
// Convert the event to a Kafka message in the given format.
// This interface will also be used for Pulsar and Kinesis.
func RecordToKafka(r SinkRecord, format string) (topic string, key string, data []byte) {
if format == "json" {
Expand Down
2 changes: 1 addition & 1 deletion integration_tests/tidb-cdc-sink/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ services:
- "tikv2"
restart: on-failure

#=================== Kakfa ==================
#=================== Kafka ==================

# Adapted from https://github.com/confluentinc/demo-scene/blob/master/connect-jdbc/docker-compose.yml
zookeeper:
Expand Down
2 changes: 1 addition & 1 deletion risedev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ profile:
- use: meta-node
- use: compute-node
- use: frontend
# If you want to use google cloud stoage as storage backend, configure bucket name and root path:
# If you want to use google cloud storage as storage backend, configure bucket name and root path:
- use: opendal
engine: gcs
bucket: bucket-name
Expand Down
2 changes: 1 addition & 1 deletion src/expr/src/table_function/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ pub trait TableFunction: std::fmt::Debug + Sync + Send {
///
/// i.e., for the `i`-th input row, the output rows are `(i, output_1)`, `(i, output_2)`, ...
///
/// How the output is splited into the `Stream` is arbitrary. It's usually done by a
/// How the output is split into the `Stream` is arbitrary. It's usually done by a
/// `DataChunkBuilder`.
///
/// ## Example
Expand Down
4 changes: 2 additions & 2 deletions src/expr/src/vector_op/array_length.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,10 +179,10 @@ fn array_length_of_dim(array: ListRef<'_>, d: i32) -> Result<Option<i32>, ExprEr
/// [1:0]
///
/// statement error
/// select array_dims(array[]::int[][]); -- would be `[1:0][1:0]` after multidimension support
/// select array_dims(array[]::int[][]); -- would be `[1:0][1:0]` after multidimensional support
///
/// statement error
/// select array_dims(array[array[]::int[]]); -- would be `[1:1][1:0]` after multidimension support
/// select array_dims(array[array[]::int[]]); -- would be `[1:1][1:0]` after multidimensional support
/// ```
#[function("array_dims(list) -> varchar")]
fn array_dims(array: ListRef<'_>, writer: &mut dyn std::fmt::Write) -> Result<(), ExprError> {
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/src/binder/expr/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ impl Binder {
timestamp,
time_zone,
} => self.bind_at_time_zone(*timestamp, time_zone),
// special syntaxt for string
// special syntax for string
Expr::Trim {
expr,
trim_where,
Expand Down
4 changes: 2 additions & 2 deletions src/frontend/src/binder/relation/watermark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ use crate::binder::statement::RewriteExprsRecursive;
use crate::expr::{ExprImpl, InputRef};

const ERROR_1ST_ARG: &str = "The 1st arg of watermark function should be a table name (incl. source, CTE, view) but not complex structure (subquery, join, another table function). Consider using an intermediate CTE or view as workaround.";
const ERROR_2ND_ARG_EXPR: &str = "The 2st arg of watermark function should be a column name but not complex expression. Consider using an intermediate CTE or view as workaround.";
const ERROR_2ND_ARG_TYPE: &str = "The 2st arg of watermark function should be a column of type timestamp with time zone, timestamp or date.";
const ERROR_2ND_ARG_EXPR: &str = "The 2nd arg of watermark function should be a column name but not complex expression. Consider using an intermediate CTE or view as workaround.";
const ERROR_2ND_ARG_TYPE: &str = "The 2nd arg of watermark function should be a column of type timestamp with time zone, timestamp or date.";

#[derive(Debug, Clone)]
#[expect(dead_code)]
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/src/optimizer/plan_node/logical_agg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ impl LogicalAggBuilder {
}
});

// order by is disallowed occur with distinct because we can not diectly rewrite agg with
// order by is disallowed occur with distinct because we can not directly rewrite agg with
// order by into 2-phase agg.
if has_distinct && has_order_by {
return Err(ErrorCode::InvalidInputSyntax(
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/src/optimizer/plan_node/logical_multi_join.rs
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ impl LogicalMultiJoin {
/// 2. Second, for every isolated node will create connection to every other nodes.
/// 3. Third, select and merge one node for a iteration, and use a bfs policy for which node the
/// selected node merged with.
/// i. The select node mentioned above is the node with least numer of relations and the
/// i. The select node mentioned above is the node with least number of relations and the
/// lowerst join tree.
/// ii. nodes with a join tree higher than the temporal optimal join tree will be pruned.
pub fn as_bushy_tree_join(&self) -> Result<PlanRef> {
Expand Down
2 changes: 1 addition & 1 deletion src/frontend/src/scheduler/distributed/stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ impl DistributedQueryMetrics {

let completed_query_counter = register_int_counter_with_registry!(
"distributed_completed_query_counter",
"The number of query ended sccessfully in distributed execution mode",
"The number of query ended successfully in distributed execution mode",
&registry
)
.unwrap();
Expand Down
4 changes: 2 additions & 2 deletions src/meta/src/rpc/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,7 @@ pub async fn start_fragment_info_monitor<S: MetaStore>(
.collect();
for table_fragments in fragments {
for (fragment_id, fragment) in table_fragments.fragments {
let frament_id_str = fragment_id.to_string();
let fragment_id_str = fragment_id.to_string();
for actor in fragment.actors {
let actor_id_str = actor.actor_id.to_string();
// Report a dummay gauge metrics with (fragment id, actor id, node
Expand All @@ -641,7 +641,7 @@ pub async fn start_fragment_info_monitor<S: MetaStore>(
.actor_info
.with_label_values(&[
&actor_id_str,
&frament_id_str,
&fragment_id_str,
address,
])
.set(1);
Expand Down
2 changes: 1 addition & 1 deletion src/storage/src/filter_key_extractor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ impl MultiFilterKeyExtractor {
self.id_to_filter_key_extractor.len()
}

pub fn get_exsting_table_ids(&self) -> HashSet<u32> {
pub fn get_existing_table_ids(&self) -> HashSet<u32> {
self.id_to_filter_key_extractor.keys().cloned().collect()
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/storage/src/hummock/compactor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ impl Compactor {
};

if let FilterKeyExtractorImpl::Multi(multi) = &multi_filter_key_extractor {
let found_tables = multi.get_exsting_table_ids();
let found_tables = multi.get_existing_table_ids();
let removed_tables = compact_table_ids
.iter()
.filter(|table_id| !found_tables.contains(table_id))
Expand Down
2 changes: 1 addition & 1 deletion src/storage/src/hummock/compactor/shared_buffer_compact.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ async fn compact_shared_buffer(
.acquire(existing_table_ids.clone())
.await?;
if let FilterKeyExtractorImpl::Multi(multi) = &multi_filter_key_extractor {
existing_table_ids = multi.get_exsting_table_ids();
existing_table_ids = multi.get_existing_table_ids();
}
let multi_filter_key_extractor = Arc::new(multi_filter_key_extractor);

Expand Down
2 changes: 1 addition & 1 deletion src/tests/sqlsmith/src/validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ fn not_unique_error(db_error: &str) -> bool {

fn is_window_error(db_error: &str) -> bool {
db_error.contains("Bind error: The size arg of window table function should be an interval literal")
|| db_error.contains("Bind error: The 2st arg of window table function should be a column name but not complex expression. Consider using an intermediate CTE or view as workaround")
|| db_error.contains("Bind error: The 2nd arg of window table function should be a column name but not complex expression. Consider using an intermediate CTE or view as workaround")
}

// Streaming nested-loop join is not supported, as it is expensive.
Expand Down

0 comments on commit a164ab7

Please sign in to comment.