Skip to content

Commit

Permalink
chore: upgrade the spark version in the integration to 3.2.1
Browse files Browse the repository at this point in the history
  • Loading branch information
TalkWIthKeyboard committed May 7, 2022
1 parent 7b92fab commit 0b30719
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 10 deletions.
13 changes: 7 additions & 6 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,20 @@ jobs:
- image: fishtownanalytics/test-container:10
steps:
- checkout
- run: tox -e flake8,unit
- run: tox -e unit

integration-spark-session:
environment:
DBT_INVOCATION_ENV: circle
docker:
- image: godatadriven/pyspark:3.1
- image: bitnami/spark:3.2.1
user: root
steps:
- checkout
- run: apt-get update
- run: python3 -m pip install --upgrade pip
- run: apt-get install -y git gcc g++ unixodbc-dev libsasl2-dev
- run: python3 -m pip install tox
- run: python3 -m pip install --upgrade pip
- run: python3 -m pip install flake8,tox,pyspark==3.2.1
- run:
name: Run integration tests
command: tox -e integration-spark-session
Expand All @@ -33,7 +34,7 @@ jobs:
DBT_INVOCATION_ENV: circle
docker:
- image: fishtownanalytics/test-container:10
- image: godatadriven/spark:2
- image: godatadriven/spark:3.1
environment:
WAIT_FOR: localhost:5432
command: >
Expand All @@ -44,7 +45,7 @@ jobs:
--conf spark.hadoop.javax.jdo.option.ConnectionPassword=dbt
--conf spark.hadoop.javax.jdo.option.ConnectionDriverName=org.postgresql.Driver
--conf spark.serializer=org.apache.spark.serializer.KryoSerializer
--conf spark.jars.packages=org.apache.hudi:hudi-spark-bundle_2.11:0.9.0
--conf spark.jars.packages=org.apache.hudi:hudi-spark3.1-bundle_2.12:0.11.0
--conf spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension
--conf spark.driver.userClassPathFirst=true
--conf spark.hadoop.datanucleus.autoCreateTables=true
Expand Down
3 changes: 1 addition & 2 deletions dbt/adapters/spark/impl.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import re
from concurrent.futures import Future
from dataclasses import dataclass
Expand Down Expand Up @@ -149,8 +150,6 @@ def list_relations_without_caching(
relations = []
view_names = views.columns["viewName"].values()

raise Exception(f"tbl: {tables.print_json()}")

for tbl in tables:
rel_type = RelationType('view' if tbl['tableName'] in view_names else 'table')
_schema = tbl['namespace'] if 'namespace' in tbl else tbl['database']
Expand Down
2 changes: 0 additions & 2 deletions dbt/include/spark/macros/adapters.sql
Original file line number Diff line number Diff line change
Expand Up @@ -179,15 +179,13 @@
{% call statement('list_tables_without_caching', fetch_result=True) -%}
show tables in {{ relation.schema }}
{% endcall %}

{% do return(load_result('list_tables_without_caching').table) %}
{% endmacro %}

{% macro spark__list_views_without_caching(relation) %}
{% call statement('list_views_without_caching', fetch_result=True) -%}
show views in {{ relation.schema }}
{% endcall %}

{% do return(load_result('list_views_without_caching').table) %}
{% endmacro %}

Expand Down

0 comments on commit 0b30719

Please sign in to comment.