diff --git a/integration_tests/src/main/python/json_test.py b/integration_tests/src/main/python/json_test.py index 677b927f4baf..cb8e73dc3220 100644 --- a/integration_tests/src/main/python/json_test.py +++ b/integration_tests/src/main/python/json_test.py @@ -183,12 +183,11 @@ def test_json_date_formats_round_trip(spark_tmp_path, date_format, v1_enabled_li "'T'HH:mm[:ss]", "'T'HH:mm"] -not_utc_allow=['BatchScanExec'] if is_not_utc() else [] - +not_utc_allow_for_test_json_scan = ['BatchScanExec', 'FileSourceScanExec'] if is_not_utc() else [] @pytest.mark.parametrize('ts_part', json_supported_ts_parts) @pytest.mark.parametrize('date_format', json_supported_date_formats) @pytest.mark.parametrize('v1_enabled_list', ["", "json"]) -@allow_non_gpu(*not_utc_allow) +@allow_non_gpu(*not_utc_allow_for_test_json_scan) def test_json_ts_formats_round_trip(spark_tmp_path, date_format, ts_part, v1_enabled_list): full_format = date_format + ts_part data_gen = TimestampGen() @@ -284,6 +283,7 @@ def do_read(spark): @pytest.mark.parametrize('allow_non_numeric_numbers', ["true", "false"]) @pytest.mark.parametrize('allow_numeric_leading_zeros', ["true"]) @pytest.mark.parametrize('ansi_enabled', ["true", "false"]) +@allow_non_gpu(*not_utc_allow_for_test_json_scan) def test_basic_json_read(std_input_path, filename, schema, read_func, allow_non_numeric_numbers, allow_numeric_leading_zeros, ansi_enabled, spark_tmp_table_factory): updated_conf = copy_and_update(_enable_all_types_conf, {'spark.sql.ansi.enabled': ansi_enabled, diff --git a/integration_tests/src/main/python/schema_evolution_test.py b/integration_tests/src/main/python/schema_evolution_test.py index f9766a80eef2..edbf14900372 100644 --- a/integration_tests/src/main/python/schema_evolution_test.py +++ b/integration_tests/src/main/python/schema_evolution_test.py @@ -16,7 +16,7 @@ from conftest import is_not_utc from data_gen import * from datetime import date, datetime, timezone -from marks import ignore_order +from marks import ignore_order, allow_non_gpu import pytest from spark_session import is_databricks_runtime, is_databricks113_or_later @@ -60,8 +60,10 @@ def get_ddl(col_gen_pairs): """Given a list of column_name, data_generator paris, returns the corresponding DDL string""" return ', '.join([f"{c} {g.data_type.simpleString()}" for c, g in col_gen_pairs]) +non_utc_allow_for_test_column_add_after_partition = ['DataWritingCommandExec'] if is_not_utc() else [] @ignore_order(local=True) @pytest.mark.parametrize("format", _formats) +@allow_non_gpu(*non_utc_allow_for_test_column_add_after_partition) def test_column_add_after_partition(spark_tmp_table_factory, format): # Databricks 10.4 appears to be missing https://issues.apache.org/jira/browse/SPARK-39417 # so avoid generating nulls for numeric partitions