Skip to content

Commit

Permalink
Zero copy tests
Browse files Browse the repository at this point in the history
* Set zero copy settings in tests.
* make sure object storage test table has data both on local and remote disk.
  • Loading branch information
joelynch committed Oct 25, 2024
1 parent 2171f69 commit bcbaf05
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 10 deletions.
17 changes: 10 additions & 7 deletions tests/integration/coordinator/plugins/clickhouse/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,7 @@
from tests.conftest import CLICKHOUSE_PATH_OPTION, CLICKHOUSE_RESTORE_PATH_OPTION
from tests.integration.conftest import get_command_path, Ports, run_process_and_wait_for_pattern, Service, ServiceCluster
from tests.system.conftest import background_process, wait_url_up
from tests.utils import (
CONSTANT_TEST_RSA_PRIVATE_KEY,
CONSTANT_TEST_RSA_PUBLIC_KEY,
format_astacus_command,
)
from tests.utils import CONSTANT_TEST_RSA_PRIVATE_KEY, CONSTANT_TEST_RSA_PUBLIC_KEY, format_astacus_command

import argparse
import asyncio
Expand Down Expand Up @@ -369,11 +365,12 @@ def setting(name: str, value: int | float | str):
<default><disk>default</disk></default>
</volumes>
</default>
<remote>
<combination>
<volumes>
<remote><disk>remote</disk></remote>
<default><disk>default</disk></default>
</volumes>
</remote>
</combination>
</policies>
</storage_configuration>
"""
Expand Down Expand Up @@ -404,6 +401,12 @@ def setting(name: str, value: int | float | str):
{setting("number_of_free_entries_in_pool_to_execute_mutation", 2)}
{setting("number_of_free_entries_in_pool_to_execute_optimize_entire_partition", 2)}
</merge_tree>
<replicated_merge_tree>
<allow_remote_fs_zero_copy_replication>true</allow_remote_fs_zero_copy_replication>
<disable_freeze_partition_for_zero_copy_replication>false</disable_freeze_partition_for_zero_copy_replication>
<disable_detach_partition_for_zero_copy_replication>false</disable_detach_partition_for_zero_copy_replication>
<disable_fetch_partition_for_zero_copy_replication>false</disable_fetch_partition_for_zero_copy_replication>
</replicated_merge_tree>
{setting("background_pool_size", 4)}
{setting("background_move_pool_size", 2)}
{setting("background_fetches_pool_size", 2)}
Expand Down
10 changes: 7 additions & 3 deletions tests/integration/coordinator/plugins/clickhouse/test_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ async def setup_cluster_content(clients: Sequence[HttpClickHouseClient], clickho
await clients[0].execute(
b"CREATE TABLE default.in_object_storage (thekey UInt32, thedata String) "
b"ENGINE = ReplicatedMergeTree ORDER BY (thekey) "
b"SETTINGS storage_policy='remote'"
b"SETTINGS storage_policy='combination'"
)
await clients[0].execute(SAMPLE_URL_ENGINE_DDL.encode())
await clients[0].execute(
Expand Down Expand Up @@ -313,6 +313,10 @@ async def setup_cluster_content(clients: Sequence[HttpClickHouseClient], clickho
await clients[0].execute(b"INSERT INTO default.in_object_storage VALUES (123, 'foo')")
await clients[1].execute(b"INSERT INTO default.in_object_storage VALUES (456, 'bar')")
await clients[2].execute(b"INSERT INTO default.in_object_storage VALUES (789, 'baz')")
# these should be routed to local disk
await clients[0].execute(b"INSERT INTO default.in_object_storage VALUES (1123, 'foo')")
await clients[1].execute(b"INSERT INTO default.in_object_storage VALUES (1456, 'bar')")
await clients[2].execute(b"INSERT INTO default.in_object_storage VALUES (1789, 'baz')")
# This won't be backed up
await clients[0].execute(b"INSERT INTO default.memory VALUES (123, 'foo')")
await clients[0].execute(b"CREATE FUNCTION `linear_equation_\x80` AS (x, k, b) -> k*x + b")
Expand Down Expand Up @@ -434,8 +438,8 @@ async def test_restores_function_table(restored_cluster: Sequence[ClickHouseClie


async def check_object_storage_data(cluster: Sequence[ClickHouseClient]) -> None:
s1_data = [[123, "foo"], [456, "bar"]]
s2_data = [[789, "baz"]]
s1_data = [[123, "foo"], [456, "bar"], [1123, "foo"], [1456, "bar"]]
s2_data = [[789, "baz"], [1789, "baz"]]
cluster_data = [s1_data, s1_data, s2_data]
for client, expected_data in zip(cluster, cluster_data):
response = await client.execute(b"SELECT thekey, thedata FROM default.in_object_storage ORDER BY thekey")
Expand Down

0 comments on commit bcbaf05

Please sign in to comment.