From 5b8bed2ea8915130cac0c165fa497ce5746be1a6 Mon Sep 17 00:00:00 2001 From: linghengqian Date: Fri, 22 Nov 2024 19:43:13 +0800 Subject: [PATCH] Support connecting to HiveServer2 with ZooKeeper Service Discovery enabled in GraalVM Native Image --- RELEASE-NOTES.md | 1 + .../optional-plugins/hiveserver2/_index.cn.md | 155 ++++++++++++++- .../optional-plugins/hiveserver2/_index.en.md | 158 +++++++++++++++- .../testcontainers/_index.cn.md | 5 + .../testcontainers/_index.en.md | 5 + .../HiveSimpleTest.java} | 10 +- .../HiveZookeeperServiceDiscoveryTest.java | 176 ++++++++++++++++++ .../reflect-config.json | 8 - .../databases/{hive.yaml => hive/simple.yaml} | 6 +- .../jdbc/databases/hive/zookeeper-sde.yaml | 72 +++++++ 10 files changed, 566 insertions(+), 30 deletions(-) rename test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/{HiveTest.java => hive/HiveSimpleTest.java} (96%) create mode 100644 test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/hive/HiveZookeeperServiceDiscoveryTest.java rename test/native/src/test/resources/test-native/yaml/jdbc/databases/{hive.yaml => hive/simple.yaml} (90%) create mode 100644 test/native/src/test/resources/test-native/yaml/jdbc/databases/hive/zookeeper-sde.yaml diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index fe6bd7bc32bac5..8d72e00ed165f5 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -23,6 +23,7 @@ 1. DistSQL: Check inline expression when create sharding table rule with inline sharding algorithm - [#33735](https://github.com/apache/shardingsphere/pull/33735) 1. Infra: Support setting `hive_conf_list`, `hive_var_list` and `sess_var_list` for jdbcURL when connecting to HiveServer2 - [#33749](https://github.com/apache/shardingsphere/pull/33749) 1. Infra: Support connecting to HiveServer2 through database connection pools other than HikariCP - [#33762](https://github.com/apache/shardingsphere/pull/33762) +1. Proxy Native: Support connecting to HiveServer2 with ZooKeeper Service Discovery enabled in GraalVM Native Image - [#33768](https://github.com/apache/shardingsphere/pull/33768) ### Bug Fixes diff --git a/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/hiveserver2/_index.cn.md b/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/hiveserver2/_index.cn.md index f0d5e43ebc4df3..02602e14756f66 100644 --- a/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/hiveserver2/_index.cn.md +++ b/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/hiveserver2/_index.cn.md @@ -94,12 +94,6 @@ services: SERVICE_NAME: hiveserver2 ports: - "10000:10000" - expose: - - 10002 - volumes: - - warehouse:/opt/hive/data/warehouse -volumes: - warehouse: ``` ### 创建业务表 @@ -113,7 +107,8 @@ sudo snap install dbeaver-ce snap run dbeaver-ce ``` -在 DBeaver Community 内使用 `jdbc:hive2://localhost:10000/` 的 `jdbcUrl` 连接至 HiveServer2,`username` 和 `password` 留空。 +在 DBeaver Community 内,使用 `jdbc:hive2://localhost:10000/` 的 `jdbcUrl` 连接至 HiveServer2,`username` 和 `password` 留空。 +执行如下 SQL, ```sql -- noinspection SqlNoDataSourceInspectionForFile @@ -297,3 +292,149 @@ HiveServer2 不支持 ShardingSphere 集成级别的本地事务,XA 事务或 当用户使用 DBeaver Community 连接至 HiveServer2 时,需确保 DBeaver Community 版本大于或等于 `24.2.5`。 参考 https://github.com/dbeaver/dbeaver/pull/35059 。 + +### 连接至开启 ZooKeeper Service Discovery 的 HiveServer2 的限制 + +当前的确支持在 ShardingSphere 配置文件中的 `jdbcUrl` 配置连接至开启 ZooKeeper Service Discovery 的 HiveServer2,但存在限制。 + +引入讨论,假设存在如下 Docker Compose 文件来启动开启 ZooKeeper Service Discovery 的 HiveServer2。 + +```yaml +services: + zookeeper: + image: zookeeper:3.9.3-jre-17 + ports: + - "2181:2181" + apache-hive-1: + image: apache/hive:4.0.1 + depends_on: + - zookeeper + environment: + SERVICE_NAME: hiveserver2 + SERVICE_OPTS: >- + -Dhive.server2.support.dynamic.service.discovery=true + -Dhive.zookeeper.quorum=zookeeper:2181 + -Dhive.server2.thrift.bind.host=0.0.0.0 + -Dhive.server2.thrift.port=10000 + ports: + - "10000:10000" + apache-hive-2: + image: apache/hive:4.0.1 + depends_on: + - zookeeper + environment: + SERVICE_NAME: hiveserver2 + SERVICE_OPTS: >- + -Dhive.server2.support.dynamic.service.discovery=true + -Dhive.zookeeper.quorum=zookeeper:2181 + -Dhive.server2.thrift.bind.host=0.0.0.0 + -Dhive.server2.thrift.port=20000 + ports: + - "20000:20000" +``` + +此时,对于 ZooKeeper Server 中的`/hiveserver2/serverUri=0.0.0.0:10000;version=4.0.1;sequence=0000000000` 节点, +存在值为`hive.server2.instance.uri=0.0.0.0:10000;hive.server2.authentication=NONE;hive.server2.transport.mode=binary;hive.server2.thrift.sasl.qop=auth;hive.server2.thrift.bind.host=0.0.0.0;hive.server2.thrift.port=10000;hive.server2.use.SSL=false`。 +对于 ZooKeeper Server 中的`/hiveserver2/serverUri=0.0.0.0:20000;version=4.0.1;sequence=0000000001` 节点, +存在值为`hive.server2.instance.uri=0.0.0.0:20000;hive.server2.authentication=NONE;hive.server2.transport.mode=binary;hive.server2.thrift.sasl.qop=auth;hive.server2.thrift.bind.host=0.0.0.0;hive.server2.thrift.port=20000;hive.server2.use.SSL=false`。 + +在 DBeaver Community 内, +使用 `jdbc:hive2://127.0.0.1:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2` 的 `jdbcUrl` 连接至 HiveServer2, +`username` 和 `password` 留空。 +执行如下 SQL, + +```sql +-- noinspection SqlNoDataSourceInspectionForFile +CREATE DATABASE demo_ds_0; +CREATE DATABASE demo_ds_1; +CREATE DATABASE demo_ds_2; +``` + +分别使用 `jdbc:hive2://127.0.0.1:2181/demo_ds_0;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2` , +`jdbc:hive2://127.0.0.1:2181/demo_ds_1;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2` 和 +`jdbc:hive2://127.0.0.1:2181/demo_ds_2;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2` +的 `jdbcUrl` 连接至 HiveServer2 来执行如下 SQL, + +```sql +-- noinspection SqlNoDataSourceInspectionForFile +set iceberg.mr.schema.auto.conversion=true; + +CREATE TABLE IF NOT EXISTS t_order +( + order_id BIGINT, + order_type INT, + user_id INT NOT NULL, + address_id BIGINT NOT NULL, + status VARCHAR(50), + PRIMARY KEY (order_id) disable novalidate +) STORED BY ICEBERG STORED AS ORC TBLPROPERTIES ('format-version' = '2'); + +TRUNCATE TABLE t_order; +``` + +在业务项目引入`前提条件`涉及的依赖后,在业务项目的 classpath 上编写 ShardingSphere 数据源的配置文件`demo.yaml`, + +```yaml +dataSources: + ds_0: + dataSourceClassName: com.zaxxer.hikari.HikariDataSource + driverClassName: org.apache.hive.jdbc.HiveDriver + jdbcUrl: jdbc:hive2://127.0.0.1:2181/demo_ds_0;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2 + ds_1: + dataSourceClassName: com.zaxxer.hikari.HikariDataSource + driverClassName: org.apache.hive.jdbc.HiveDriver + jdbcUrl: jdbc:hive2://127.0.0.1:2181/demo_ds_1;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2 + ds_2: + dataSourceClassName: com.zaxxer.hikari.HikariDataSource + driverClassName: org.apache.hive.jdbc.HiveDriver + jdbcUrl: jdbc:hive2://127.0.0.1:2181/demo_ds_2;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2 +rules: +- !SHARDING + tables: + t_order: + actualDataNodes: + keyGenerateStrategy: + column: order_id + keyGeneratorName: snowflake + defaultDatabaseStrategy: + standard: + shardingColumn: user_id + shardingAlgorithmName: inline + shardingAlgorithms: + inline: + type: INLINE + props: + algorithm-expression: ds_${user_id % 2} + keyGenerators: + snowflake: + type: SNOWFLAKE +``` + +此时可正常创建 ShardingSphere 的数据源并在虚拟数据源上执行逻辑 SQL, + +```java +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +public class ExampleUtils { + void test() throws SQLException { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:shardingsphere:classpath:demo.yaml"); + config.setDriverClassName("org.apache.shardingsphere.driver.ShardingSphereDriver"); + try (HikariDataSource dataSource = new HikariDataSource(config); + Connection connection = dataSource.getConnection(); + Statement statement = connection.createStatement()) { + statement.execute("INSERT INTO t_order (user_id, order_type, address_id, status) VALUES (1, 1, 1, 'INSERT_TEST')"); + statement.executeQuery("SELECT * FROM t_order"); + statement.execute("DELETE FROM t_order WHERE order_id=1"); + } + } +} +``` + +但一旦 ZooKeeper Server 中的`/hiveserver2`节点被更新, +由于 ShardingSphere 的内部类会缓存包含旧 HiveServer 实例的信息的 `java.sql.Connection`, +用户需要重新创建 ShardingSphere JDBC DataSource, +或重新创建通过 `org.apache.shardingsphere.driver.ShardingSphereDriver` 创建的 JDBC DataSource。 diff --git a/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/hiveserver2/_index.en.md b/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/hiveserver2/_index.en.md index 6b0296ef5d829b..654dca2901e36f 100644 --- a/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/hiveserver2/_index.en.md +++ b/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/hiveserver2/_index.en.md @@ -96,12 +96,6 @@ services: SERVICE_NAME: hiveserver2 ports: - "10000:10000" - expose: - - 10002 - volumes: - - warehouse:/opt/hive/data/warehouse -volumes: - warehouse: ``` ### Create business tables @@ -115,7 +109,9 @@ sudo snap install dbeaver-ce snap run dbeaver-ce ``` -In DBeaver Community, connect to HiveServer2 using the `jdbcUrl` of `jdbc:hive2://localhost:10000/`, leaving `username` and `password` blank. +In DBeaver Community, use the `jdbcUrl` of `jdbc:hive2://localhost:10000/` to connect to HiveServer2, +and leave `username` and `password` blank. +Execute the following SQL, ```sql -- noinspection SqlNoDataSourceInspectionForFile @@ -306,3 +302,151 @@ For more discussion, please visit https://cwiki.apache.org/confluence/display/Hi When users use DBeaver Community to connect to HiveServer2, they need to ensure that the DBeaver Community version is greater than or equal to `24.2.5`. See https://github.com/dbeaver/dbeaver/pull/35059. + +### Limitations of connecting to HiveServer2 with ZooKeeper Service Discovery + +Currently, the `jdbcUrl` configuration in the ShardingSphere configuration file does support connecting to HiveServer2 with ZooKeeper Service Discovery, +but there are limitations. + +For discussion, assume that there is the following Docker Compose file to start HiveServer2 with ZooKeeper Service Discovery. + +```yaml +services: + zookeeper: + image: zookeeper:3.9.3-jre-17 + ports: + - "2181:2181" + apache-hive-1: + image: apache/hive:4.0.1 + depends_on: + - zookeeper + environment: + SERVICE_NAME: hiveserver2 + SERVICE_OPTS: >- + -Dhive.server2.support.dynamic.service.discovery=true + -Dhive.zookeeper.quorum=zookeeper:2181 + -Dhive.server2.thrift.bind.host=0.0.0.0 + -Dhive.server2.thrift.port=10000 + ports: + - "10000:10000" + apache-hive-2: + image: apache/hive:4.0.1 + depends_on: + - zookeeper + environment: + SERVICE_NAME: hiveserver2 + SERVICE_OPTS: >- + -Dhive.server2.support.dynamic.service.discovery=true + -Dhive.zookeeper.quorum=zookeeper:2181 + -Dhive.server2.thrift.bind.host=0.0.0.0 + -Dhive.server2.thrift.port=20000 + ports: + - "20000:20000" +``` + +At this time, for the node `/hiveserver2/serverUri=0.0.0.0:10000;version=4.0.1;sequence=0000000000` in ZooKeeper Server, +the value exists as `hive.server2.instance.uri=0.0.0.0:10000;hive.server2.authentication=NONE;hive.server2.transport.mode=binary;hive.server2.thrift.sasl.qop=auth;hive.server2.thrift.bind.host=0.0.0.0;hive.server2.thrift.port=10000;hive.server2.use.SSL=false`. +For the node `/hiveserver2/serverUri=0.0.0.0:20000;version=4.0.1;sequence=0000000001` in ZooKeeper Server, +there is a value of `hive.server2.instance.uri=0.0.0.0:20000;hive.server2.authentication=NONE;hive.server2.transport.mode=binary;hive.server2.thrift.sasl.qop=auth;hive.server2.thrift.bind.host=0.0.0.0;hive.server2.thrift.port=20000;hive.server2.use.SSL=false`. + +In DBeaver Community, +use `jdbcUrl` of `jdbc:hive2://127.0.0.1:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2` to connect to HiveServer2, +leave `username` and `password` blank. +Execute the following SQL, + +```sql +-- noinspection SqlNoDataSourceInspectionForFile +CREATE DATABASE demo_ds_0; +CREATE DATABASE demo_ds_1; +CREATE DATABASE demo_ds_2; +``` + +Use `jdbcUrl` of `jdbc:hive2://127.0.0.1:2181/demo_ds_0;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2`, +`jdbc:hive2://127.0.0.1:2181/demo_ds_1;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2` +and `jdbc:hive2://127.0.0.1:2181/demo_ds_2;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2` +to connect to HiveServer2 and execute the following SQL, + +```sql +-- noinspection SqlNoDataSourceInspectionForFile +set iceberg.mr.schema.auto.conversion=true; + +CREATE TABLE IF NOT EXISTS t_order +( + order_id BIGINT, + order_type INT, + user_id INT NOT NULL, + address_id BIGINT NOT NULL, + status VARCHAR(50), + PRIMARY KEY (order_id) disable novalidate +) STORED BY ICEBERG STORED AS ORC TBLPROPERTIES ('format-version' = '2'); + +TRUNCATE TABLE t_order; +``` + +After the business project introduces the dependencies involved in the `prerequisites`, +write the ShardingSphere data source configuration file `demo.yaml` on the classpath of the business project. + +```yaml +dataSources: + ds_0: + dataSourceClassName: com.zaxxer.hikari.HikariDataSource + driverClassName: org.apache.hive.jdbc.HiveDriver + jdbcUrl: jdbc:hive2://127.0.0.1:2181/demo_ds_0;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2 + ds_1: + dataSourceClassName: com.zaxxer.hikari.HikariDataSource + driverClassName: org.apache.hive.jdbc.HiveDriver + jdbcUrl: jdbc:hive2://127.0.0.1:2181/demo_ds_1;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2 + ds_2: + dataSourceClassName: com.zaxxer.hikari.HikariDataSource + driverClassName: org.apache.hive.jdbc.HiveDriver + jdbcUrl: jdbc:hive2://127.0.0.1:2181/demo_ds_2;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2 +rules: +- !SHARDING + tables: + t_order: + actualDataNodes: + keyGenerateStrategy: + column: order_id + keyGeneratorName: snowflake + defaultDatabaseStrategy: + standard: + shardingColumn: user_id + shardingAlgorithmName: inline + shardingAlgorithms: + inline: + type: INLINE + props: + algorithm-expression: ds_${user_id % 2} + keyGenerators: + snowflake: + type: SNOWFLAKE +``` + +At this point, you can create the ShardingSphere data source normally and execute logical SQL on the virtual data source. + +```java +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +public class ExampleUtils { + void test() throws SQLException { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:shardingsphere:classpath:demo.yaml"); + config.setDriverClassName("org.apache.shardingsphere.driver.ShardingSphereDriver"); + try (HikariDataSource dataSource = new HikariDataSource(config); + Connection connection = dataSource.getConnection(); + Statement statement = connection.createStatement()) { + statement.execute("INSERT INTO t_order (user_id, order_type, address_id, status) VALUES (1, 1, 1, 'INSERT_TEST')"); + statement.executeQuery("SELECT * FROM t_order"); + statement.execute("DELETE FROM t_order WHERE order_id=1"); + } + } +} +``` + +But once the `/hiveserver2` node in ZooKeeper Server is updated, +since ShardingSphere's internal class will cache the `java.sql.Connection` containing the information of the old HiveServer instance, +users need to recreate ShardingSphere JDBC DataSource, +or recreate the JDBC DataSource created by `org.apache.shardingsphere.driver.ShardingSphereDriver`. diff --git a/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/testcontainers/_index.cn.md b/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/testcontainers/_index.cn.md index 9c537228ddcd3c..8ef2c84852667b 100644 --- a/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/testcontainers/_index.cn.md +++ b/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/testcontainers/_index.cn.md @@ -24,6 +24,11 @@ ShardingSphere 默认情况下不提供对 `org.testcontainers.jdbc.ContainerDat shardingsphere-infra-database-testcontainers ${shardingsphere.version} + + org.postgresql + postgresql + 42.7.2 + org.testcontainers postgresql diff --git a/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/testcontainers/_index.en.md b/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/testcontainers/_index.en.md index c8f4f9a2df7564..f9faae7b4cd2ba 100644 --- a/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/testcontainers/_index.en.md +++ b/docs/document/content/user-manual/shardingsphere-jdbc/optional-plugins/testcontainers/_index.en.md @@ -24,6 +24,11 @@ the possible Maven dependencies are as follows, shardingsphere-infra-database-testcontainers ${shardingsphere.version} + + org.postgresql + postgresql + 42.7.2 + org.testcontainers postgresql diff --git a/test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/HiveTest.java b/test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/hive/HiveSimpleTest.java similarity index 96% rename from test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/HiveTest.java rename to test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/hive/HiveSimpleTest.java index d1a2b01cb5dd58..adaa894756bc4b 100644 --- a/test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/HiveTest.java +++ b/test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/hive/HiveSimpleTest.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.shardingsphere.test.natived.jdbc.databases; +package org.apache.shardingsphere.test.natived.jdbc.databases.hive; import com.zaxxer.hikari.HikariConfig; import com.zaxxer.hikari.HikariDataSource; @@ -45,15 +45,15 @@ @SuppressWarnings({"SqlDialectInspection", "SqlNoDataSourceInspection"}) @EnabledInNativeImage @Testcontainers -class HiveTest { +class HiveSimpleTest { @SuppressWarnings("resource") @Container public static final GenericContainer CONTAINER = new GenericContainer<>("apache/hive:4.0.1") .withEnv("SERVICE_NAME", "hiveserver2") - .withExposedPorts(10000, 10002); + .withExposedPorts(10000); - private static final String SYSTEM_PROP_KEY_PREFIX = "fixture.test-native.yaml.database.hive."; + private static final String SYSTEM_PROP_KEY_PREFIX = "fixture.test-native.yaml.database.hive.simple."; // Due to https://issues.apache.org/jira/browse/HIVE-28317 , the `initFile` parameter of HiveServer2 JDBC Driver must be an absolute path. private static final String ABSOLUTE_PATH = Paths.get("src/test/resources/test-native/sql/test-native-databases-hive.sql").toAbsolutePath().normalize().toString(); @@ -124,7 +124,7 @@ private DataSource createDataSource() throws SQLException { } HikariConfig config = new HikariConfig(); config.setDriverClassName("org.apache.shardingsphere.driver.ShardingSphereDriver"); - config.setJdbcUrl("jdbc:shardingsphere:classpath:test-native/yaml/jdbc/databases/hive.yaml?placeholder-type=system_props"); + config.setJdbcUrl("jdbc:shardingsphere:classpath:test-native/yaml/jdbc/databases/hive/simple.yaml?placeholder-type=system_props"); System.setProperty(SYSTEM_PROP_KEY_PREFIX + "ds0.jdbc-url", jdbcUrlPrefix + "demo_ds_0" + ";initFile=" + ABSOLUTE_PATH); System.setProperty(SYSTEM_PROP_KEY_PREFIX + "ds1.jdbc-url", jdbcUrlPrefix + "demo_ds_1" + ";initFile=" + ABSOLUTE_PATH); System.setProperty(SYSTEM_PROP_KEY_PREFIX + "ds2.jdbc-url", jdbcUrlPrefix + "demo_ds_2" + ";initFile=" + ABSOLUTE_PATH); diff --git a/test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/hive/HiveZookeeperServiceDiscoveryTest.java b/test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/hive/HiveZookeeperServiceDiscoveryTest.java new file mode 100644 index 00000000000000..ebda9e5e5358a5 --- /dev/null +++ b/test/native/src/test/java/org/apache/shardingsphere/test/natived/jdbc/databases/hive/HiveZookeeperServiceDiscoveryTest.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.shardingsphere.test.natived.jdbc.databases.hive; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import org.apache.curator.test.InstanceSpec; +import org.apache.shardingsphere.test.natived.commons.TestShardingService; +import org.awaitility.Awaitility; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledInNativeImage; +import org.testcontainers.containers.FixedHostPortGenericContainer; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; + +import javax.sql.DataSource; +import java.nio.file.Paths; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.time.Duration; +import java.util.Properties; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +@SuppressWarnings({"SqlDialectInspection", "SqlNoDataSourceInspection", "resource", "deprecation"}) +@EnabledInNativeImage +@Testcontainers +class HiveZookeeperServiceDiscoveryTest { + + private static final int RANDOM_PORT_FIRST = InstanceSpec.getRandomPort(); + + private static final int RANDOM_PORT_SECOND = InstanceSpec.getRandomPort(); + + private static final Network NETWORK = Network.newNetwork(); + + @Container + private static final GenericContainer ZOOKEEPER_CONTAINER = new GenericContainer<>("zookeeper:3.9.3-jre-17") + .withNetwork(NETWORK) + .withNetworkAliases("foo") + .withExposedPorts(2181); + + /** + * TODO Maybe we should be able to find a better solution than {@link InstanceSpec#getRandomPort()} to use a random available port on the host. + * It is not a good practice to use {@link FixedHostPortGenericContainer}. + */ + @SuppressWarnings("unused") + @Container + private static final GenericContainer HIVE_SERVER2_1_CONTAINER = new FixedHostPortGenericContainer<>("apache/hive:4.0.1") + .withNetwork(NETWORK) + .withEnv("SERVICE_NAME", "hiveserver2") + .withEnv("SERVICE_OPTS", "-Dhive.server2.support.dynamic.service.discovery=true" + " " + + "-Dhive.zookeeper.quorum=" + ZOOKEEPER_CONTAINER.getNetworkAliases().get(0) + ":2181" + " " + + "-Dhive.server2.thrift.bind.host=0.0.0.0" + " " + + "-Dhive.server2.thrift.port=" + RANDOM_PORT_FIRST) + .withFixedExposedPort(RANDOM_PORT_FIRST, RANDOM_PORT_FIRST) + .dependsOn(ZOOKEEPER_CONTAINER); + + /** + * TODO Same problem as {@code HIVE_SERVER2_1_CONTAINER}. + */ + @SuppressWarnings("unused") + @Container + private static final GenericContainer HIVE_SERVER2_2_CONTAINER = new FixedHostPortGenericContainer<>("apache/hive:4.0.1") + .withNetwork(NETWORK) + .withEnv("SERVICE_NAME", "hiveserver2") + .withEnv("SERVICE_OPTS", "-Dhive.server2.support.dynamic.service.discovery=true" + " " + + "-Dhive.zookeeper.quorum=" + ZOOKEEPER_CONTAINER.getNetworkAliases().get(0) + ":2181" + " " + + "-Dhive.server2.thrift.bind.host=0.0.0.0" + " " + + "-Dhive.server2.thrift.port=" + RANDOM_PORT_SECOND) + .withFixedExposedPort(RANDOM_PORT_SECOND, RANDOM_PORT_SECOND) + .dependsOn(ZOOKEEPER_CONTAINER); + + private static final String SYSTEM_PROP_KEY_PREFIX = "fixture.test-native.yaml.database.hive.zookeeper.sde."; + + // Due to https://issues.apache.org/jira/browse/HIVE-28317 , the `initFile` parameter of HiveServer2 JDBC Driver must be an absolute path. + private static final String ABSOLUTE_PATH = Paths.get("src/test/resources/test-native/sql/test-native-databases-hive.sql").toAbsolutePath().normalize().toString(); + + private final String jdbcUrlSuffix = ";serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2"; + + private String jdbcUrlPrefix; + + private TestShardingService testShardingService; + + @BeforeAll + static void beforeAll() { + assertThat(System.getProperty(SYSTEM_PROP_KEY_PREFIX + "ds0.jdbc-url"), is(nullValue())); + assertThat(System.getProperty(SYSTEM_PROP_KEY_PREFIX + "ds1.jdbc-url"), is(nullValue())); + assertThat(System.getProperty(SYSTEM_PROP_KEY_PREFIX + "ds2.jdbc-url"), is(nullValue())); + } + + @AfterAll + static void afterAll() { + NETWORK.close(); + System.clearProperty(SYSTEM_PROP_KEY_PREFIX + "ds0.jdbc-url"); + System.clearProperty(SYSTEM_PROP_KEY_PREFIX + "ds1.jdbc-url"); + System.clearProperty(SYSTEM_PROP_KEY_PREFIX + "ds2.jdbc-url"); + } + + /** + * TODO Need to fix `shardingsphere-parser-sql-hive` module to use {@link TestShardingService#cleanEnvironment()} + * after {@link TestShardingService#processSuccessInHive()}. + * + * @throws SQLException An exception that provides information on a database access error or other errors. + */ + @Test + void assertShardingInLocalTransactions() throws SQLException { + jdbcUrlPrefix = "jdbc:hive2://" + ZOOKEEPER_CONTAINER.getHost() + ":" + ZOOKEEPER_CONTAINER.getMappedPort(2181) + "/"; + DataSource dataSource = createDataSource(); + testShardingService = new TestShardingService(dataSource); + testShardingService.processSuccessInHive(); + } + + /** + * TODO Need to fix `shardingsphere-parser-sql-hive` module to use `initEnvironment()` before {@link TestShardingService#processSuccessInHive()}. + * + * @throws SQLException An exception that provides information on a database access error or other errors. + */ + @SuppressWarnings("unused") + private void initEnvironment() throws SQLException { + testShardingService.getOrderRepository().createTableIfNotExistsInHive(); + testShardingService.getOrderItemRepository().createTableIfNotExistsInHive(); + testShardingService.getAddressRepository().createTableIfNotExistsInHive(); + testShardingService.getOrderRepository().truncateTable(); + testShardingService.getOrderItemRepository().truncateTable(); + testShardingService.getAddressRepository().truncateTable(); + } + + private Connection openConnection() throws SQLException { + Properties props = new Properties(); + return DriverManager.getConnection(jdbcUrlPrefix + jdbcUrlSuffix, props); + } + + private DataSource createDataSource() throws SQLException { + Awaitility.await().atMost(Duration.ofMinutes(2L)).ignoreExceptions().until(() -> { + openConnection().close(); + return true; + }); + try ( + Connection connection = openConnection(); + Statement statement = connection.createStatement()) { + statement.executeUpdate("CREATE DATABASE demo_ds_0"); + statement.executeUpdate("CREATE DATABASE demo_ds_1"); + statement.executeUpdate("CREATE DATABASE demo_ds_2"); + } + HikariConfig config = new HikariConfig(); + config.setDriverClassName("org.apache.shardingsphere.driver.ShardingSphereDriver"); + config.setJdbcUrl("jdbc:shardingsphere:classpath:test-native/yaml/jdbc/databases/hive/zookeeper-sde.yaml?placeholder-type=system_props"); + System.setProperty(SYSTEM_PROP_KEY_PREFIX + "ds0.jdbc-url", jdbcUrlPrefix + "demo_ds_0" + ";initFile=" + ABSOLUTE_PATH + jdbcUrlSuffix); + System.setProperty(SYSTEM_PROP_KEY_PREFIX + "ds1.jdbc-url", jdbcUrlPrefix + "demo_ds_1" + ";initFile=" + ABSOLUTE_PATH + jdbcUrlSuffix); + System.setProperty(SYSTEM_PROP_KEY_PREFIX + "ds2.jdbc-url", jdbcUrlPrefix + "demo_ds_2" + ";initFile=" + ABSOLUTE_PATH + jdbcUrlSuffix); + return new HikariDataSource(config); + } +} diff --git a/test/native/src/test/resources/META-INF/native-image/shardingsphere-test-native-test-metadata/reflect-config.json b/test/native/src/test/resources/META-INF/native-image/shardingsphere-test-native-test-metadata/reflect-config.json index d1eeed2763a9bd..fe6184c73af16c 100644 --- a/test/native/src/test/resources/META-INF/native-image/shardingsphere-test-native-test-metadata/reflect-config.json +++ b/test/native/src/test/resources/META-INF/native-image/shardingsphere-test-native-test-metadata/reflect-config.json @@ -105,14 +105,6 @@ "allPublicMethods": true, "allDeclaredFields": true }, -{ - "condition":{"typeReachable":"org.apache.shardingsphere.test.natived.jdbc.databases.HiveTest"}, - "name":"org.apache.shardingsphere.test.natived.jdbc.databases.HiveTest", - "allDeclaredConstructors": true, - "allDeclaredMethods": true, - "allPublicMethods": true, - "allDeclaredFields": true -}, { "condition":{"typeReachable":"org.apache.shardingsphere.test.natived.jdbc.databases.PostgresTest"}, "name":"org.apache.shardingsphere.test.natived.jdbc.databases.PostgresTest", diff --git a/test/native/src/test/resources/test-native/yaml/jdbc/databases/hive.yaml b/test/native/src/test/resources/test-native/yaml/jdbc/databases/hive/simple.yaml similarity index 90% rename from test/native/src/test/resources/test-native/yaml/jdbc/databases/hive.yaml rename to test/native/src/test/resources/test-native/yaml/jdbc/databases/hive/simple.yaml index c999127daec1d1..46056faffc4f9a 100644 --- a/test/native/src/test/resources/test-native/yaml/jdbc/databases/hive.yaml +++ b/test/native/src/test/resources/test-native/yaml/jdbc/databases/hive/simple.yaml @@ -24,15 +24,15 @@ dataSources: ds_0: dataSourceClassName: com.zaxxer.hikari.HikariDataSource driverClassName: org.apache.hive.jdbc.HiveDriver - jdbcUrl: $${fixture.test-native.yaml.database.hive.ds0.jdbc-url::} + jdbcUrl: $${fixture.test-native.yaml.database.hive.simple.ds0.jdbc-url::} ds_1: dataSourceClassName: com.zaxxer.hikari.HikariDataSource driverClassName: org.apache.hive.jdbc.HiveDriver - jdbcUrl: $${fixture.test-native.yaml.database.hive.ds1.jdbc-url::} + jdbcUrl: $${fixture.test-native.yaml.database.hive.simple.ds1.jdbc-url::} ds_2: dataSourceClassName: com.zaxxer.hikari.HikariDataSource driverClassName: org.apache.hive.jdbc.HiveDriver - jdbcUrl: $${fixture.test-native.yaml.database.hive.ds2.jdbc-url::} + jdbcUrl: $${fixture.test-native.yaml.database.hive.simple.ds2.jdbc-url::} rules: - !SHARDING diff --git a/test/native/src/test/resources/test-native/yaml/jdbc/databases/hive/zookeeper-sde.yaml b/test/native/src/test/resources/test-native/yaml/jdbc/databases/hive/zookeeper-sde.yaml new file mode 100644 index 00000000000000..6e72411c702240 --- /dev/null +++ b/test/native/src/test/resources/test-native/yaml/jdbc/databases/hive/zookeeper-sde.yaml @@ -0,0 +1,72 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +mode: + type: Standalone + repository: + type: JDBC + +dataSources: + ds_0: + dataSourceClassName: com.zaxxer.hikari.HikariDataSource + driverClassName: org.apache.hive.jdbc.HiveDriver + jdbcUrl: $${fixture.test-native.yaml.database.hive.zookeeper.sde.ds0.jdbc-url::} + ds_1: + dataSourceClassName: com.zaxxer.hikari.HikariDataSource + driverClassName: org.apache.hive.jdbc.HiveDriver + jdbcUrl: $${fixture.test-native.yaml.database.hive.zookeeper.sde.ds1.jdbc-url::} + ds_2: + dataSourceClassName: com.zaxxer.hikari.HikariDataSource + driverClassName: org.apache.hive.jdbc.HiveDriver + jdbcUrl: $${fixture.test-native.yaml.database.hive.zookeeper.sde.ds2.jdbc-url::} + +rules: +- !SHARDING + tables: + t_order: + actualDataNodes: ds_0.t_order, ds_1.t_order, ds_2.t_order + keyGenerateStrategy: + column: order_id + keyGeneratorName: snowflake + t_order_item: + actualDataNodes: ds_0.t_order_item, ds_1.t_order_item, ds_2.t_order_item + keyGenerateStrategy: + column: order_item_id + keyGeneratorName: snowflake + defaultDatabaseStrategy: + standard: + shardingColumn: user_id + shardingAlgorithmName: inline + shardingAlgorithms: + inline: + type: CLASS_BASED + props: + strategy: STANDARD + algorithmClassName: org.apache.shardingsphere.test.natived.commons.algorithm.ClassBasedInlineShardingAlgorithmFixture + keyGenerators: + snowflake: + type: SNOWFLAKE + auditors: + sharding_key_required_auditor: + type: DML_SHARDING_CONDITIONS + +- !BROADCAST + tables: + - t_address + +props: + sql-show: false