diff --git a/website/dbt-versions.js b/website/dbt-versions.js index adee9230c7a..3eff99e7f98 100644 --- a/website/dbt-versions.js +++ b/website/dbt-versions.js @@ -24,13 +24,61 @@ exports.versions = [ version: "1.2", EOLDate: "2023-07-26", }, - { - version: "1.1", - EOLDate: "2023-04-28", - }, ] exports.versionedPages = [ + { + "page": "docs/build/build-metrics-intro", + "firstVersion": "1.6", + }, + { + "page": "docs/build/sl-getting-started", + "firstVersion": "1.6", + }, + { + "page": "docs/build/about-metricflow", + "firstVersion": "1.6", + }, + { + "page": "docs/build/join-logic", + "firstVersion": "1.6", + }, + { + "page": "docs/build/validation", + "firstVersion": "1.6", + }, + { + "page": "docs/build/semantic-models", + "firstVersion": "1.6", + }, + { + "page": "docs/build/group-by", + "firstVersion": "1.6", + }, + { + "page": "docs/build/entities", + "firstVersion": "1.6", + }, + { + "page": "docs/build/metrics-overview", + "firstVersion": "1.6", + }, + { + "page": "docs/build/cumulative", + "firstVersion": "1.6", + }, + { + "page": "docs/build/derived", + "firstVersion": "1.6", + }, + { + "page": "docs/build/measure-proxy", + "firstVersion": "1.6", + }, + { + "page": "docs/build/ratio", + "firstVersion": "1.6", + }, { "page": "reference/commands/clone", "firstVersion": "1.6", @@ -122,70 +170,6 @@ exports.versionedPages = [ { "page": "reference/resource-configs/grants", "firstVersion": "1.2", - }, - { - "page": "docs/contributing/testing-a-new-adapter", - "firstVersion": "1.1", - }, - { - "page": "reference/dbt-jinja-functions/selected_resources", - "firstVersion": "1.1", - }, - { - "page": "reference/dbt-jinja-functions/print", - "firstVersion": "1.1", - }, - { - "page": "docs/build/build-metrics-intro", - "firstVersion": "1.6", - }, - { - "page": "docs/build/sl-getting-started", - "firstVersion": "1.6", - }, - { - "page": "docs/build/about-metricflow", - "firstVersion": "1.6", - }, - { - "page": "docs/build/join-logic", - "firstVersion": "1.6", - }, - { - "page": "docs/build/validation", - "firstVersion": "1.6", - }, - { - "page": "docs/build/semantic-models", - "firstVersion": "1.6", - }, - { - "page": "docs/build/group-by", - "firstVersion": "1.6", - }, - { - "page": "docs/build/entities", - "firstVersion": "1.6", - }, - { - "page": "docs/build/metrics-overview", - "firstVersion": "1.6", - }, - { - "page": "docs/build/cumulative", - "firstVersion": "1.6", - }, - { - "page": "docs/build/derived", - "firstVersion": "1.6", - }, - { - "page": "docs/build/measure-proxy", - "firstVersion": "1.6", - }, - { - "page": "docs/build/ratio", - "firstVersion": "1.6", } ] diff --git a/website/docs/community/resources/jobs-terms-and-conditions.md b/website/docs/community/resources/jobs-terms-and-conditions.md new file mode 100644 index 00000000000..f2f2134f847 --- /dev/null +++ b/website/docs/community/resources/jobs-terms-and-conditions.md @@ -0,0 +1,16 @@ +--- +title: "dbt Labs Community #jobs Channels Terms and Conditions" +id: "jobs-terms-and-conditions" +description: "Before posting a job in the dbt Community or submitting an application, review these terms and conditions." +--- + +I agree to abide by the [dbt Community Code of Conduct](community/resources/code-of-conduct) and all laws applicable to me in my use of the dbt Community's #jobs channels. I further agree: + +- dbt Labs is not responsible for not does it warrant or guarantee the validity, accuracy, completeness, legality, or reliability of any functionality of any #jobs channel, any posting's content, or any application and/or solicitation of any kind of employment. +- dbt Labs does not review and approve job-related content. +- dbt Labs disclaims liability of any kind whatsoever for any type of damage that occurs while using the community Slack for job-related reasons, and I waive any type of claim (including actual, special or consequential damages) to the maximum extent permitted by law. +- Without limitation, dbt Labs disclaims liability for quality, performance, merchantability, and fitness for a particular purpose, express or implied, that may arise out of my use of the community Slack for job-related content, my reliance on such information, and/or my provision/receipt of job-related information. +- I understand that no internet-based site is without risk, and my use is at my own risk. +- My use of any job-posting template (or other forum for providing job-related information) confirms my consent to provide the data posted, confirms that I have permission to post such data, and is subject to the terms of the [dbt Labs privacy policy](https://www.getdbt.com/cloud/privacy-policy). + +For further information, please contact [legal@dbtlabs.com](mailto:legal@dbtlabs.com). diff --git a/website/docs/community/resources/oss-expectations.md b/website/docs/community/resources/oss-expectations.md index 7bcc79cac9e..649a9dea94f 100644 --- a/website/docs/community/resources/oss-expectations.md +++ b/website/docs/community/resources/oss-expectations.md @@ -82,8 +82,8 @@ In some cases, the right resolution to an open issue might be tangential to the | `triage` | This is a new issue which has not yet been reviewed by a maintainer. This label is removed when a maintainer reviews and responds to the issue. | | `bug` | This issue represents a defect or regression from the behavior that's documented, or that you reasonably expect | | `enhancement` | This issue represents net-new functionality, including an extension of an existing capability | -| `good first issue` | This issue does not require deep knowledge of the codebase to implement. This issue is appropriate for a first-time contributor. | -| `help wanted` | This issue is trickier than a "good first issue." The required changes are scattered across the codebase, or more difficult to test. The maintainers are happy to help an experienced community contributor; they aren't planning to prioritize this issue themselves. | +| `good_first_issue` | This issue does not require deep knowledge of the codebase to implement. This issue is appropriate for a first-time contributor. | +| `help_wanted` | This issue is trickier than a "good first issue." The required changes are scattered across the codebase, or more difficult to test. The maintainers are happy to help an experienced community contributor; they aren't planning to prioritize this issue themselves. | | `duplicate` | This issue is functionally identical to another open issue. The maintainers will close this issue and encourage community members to focus conversation on the other one. | | `stale` | This is an old issue which has not recently been updated. In repositories with a lot of activity, stale issues will periodically be closed. | | `wontfix` | This issue does not require a code change in the repository, or the maintainers are unwilling to merge a change which implements the proposed behavior. | diff --git a/website/docs/docs/build/about-metricflow.md b/website/docs/docs/build/about-metricflow.md index b1a22b9072c..68879911597 100644 --- a/website/docs/docs/build/about-metricflow.md +++ b/website/docs/docs/build/about-metricflow.md @@ -10,9 +10,9 @@ This guide introduces MetricFlow's fundamental ideas for new users. MetricFlow, :::info -MetricFlow is a new way to define metrics in dbt and one of the key components of the [dbt Semantic Layer](/docs/use-dbt-semantic-layer/dbt-sl). It handles SQL query construction and defines the specification for dbt semantic models and metrics. +MetricFlow is a new way to define metrics and one of the key components of the [dbt Semantic Layer](/docs/use-dbt-semantic-layer/dbt-sl). It handles SQL query construction and defines the specification for dbt semantic models and metrics. -To fully experience the dbt Semantic Layer, including the ability to query dbt metrics via external integrations, you'll need a [dbt Cloud Team or Enterprise account](https://www.getdbt.com/pricing/). +MetricFlow is currently available on dbt v1.6 or higher for all users. dbt Core users can use the MetricFlow CLI to define metrics in their local dbt Core project. However, to experience the power of the universal [dbt Semantic Layer](/docs/use-dbt-semantic-layer/dbt-sl) and query those metrics in downstream tools, you'll need a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) account. ::: diff --git a/website/docs/docs/build/build-metrics-intro.md b/website/docs/docs/build/build-metrics-intro.md index 232b3f83ee0..a6fab61d576 100644 --- a/website/docs/docs/build/build-metrics-intro.md +++ b/website/docs/docs/build/build-metrics-intro.md @@ -12,15 +12,14 @@ Use MetricFlow in dbt to centrally define your metrics. As a key component of th Use familiar constructs like semantic models and metrics to avoid duplicative coding, optimize your development workflow, ensure data governance for company metrics, and guarantee consistency for data consumers. :::info -MetricFlow is currently available on dbt v1.6 or higher. MetricFlow provides a new way to define metrics in dbt and replaces the dbt_metrics package. +MetricFlow is currently available on dbt v1.6 or higher and allows users to define metrics in their dbt project whether in dbt Cloud or dbt Core. dbt Core users can use the MetricFlow CLI to define metrics in their local dbt Core project. However, to experience the power of the universal [dbt Semantic Layer](/docs/use-dbt-semantic-layer/dbt-sl) and query those metrics in downstream tools, you'll need a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) account. -To use the dbt Semantic Layer, you must have a [dbt Cloud Team or Enterprise account](https://www.getdbt.com/pricing/). ::: Before you start, consider the following guidelines: - Define metrics in YAML and query them using these [new metric specifications](https://github.com/dbt-labs/dbt-core/discussions/7456). -- You must be on dbt v1.6 or higher to use MetricFlow. [Upgrade your dbt Cloud version](/docs/dbt-versions/upgrade-core-in-cloud) to get started. +- You must be on dbt v1.6 or higher to use MetricFlow. [Upgrade your dbt version](/docs/dbt-versions/upgrade-core-in-cloud) to get started. - Use MetricFlow with Snowflake, BigQuery, Databricks, Postgres (CLI only), or Redshift. (dbt Cloud Postgres support coming soon) - Unlock insights and query your metrics using the [dbt Semantic Layer](/docs/use-dbt-semantic-layer/dbt-sl) and its diverse range of [available integrations](/docs/use-dbt-semantic-layer/avail-sl-integrations). diff --git a/website/docs/docs/build/custom-databases.md b/website/docs/docs/build/custom-databases.md index 300fd3147f1..dd54d6998e8 100644 --- a/website/docs/docs/build/custom-databases.md +++ b/website/docs/docs/build/custom-databases.md @@ -54,8 +54,6 @@ select * from ... ### generate_database_name -New in v0.16.0 - The database name generated for a model is controlled by a macro called `generate_database_name`. This macro can be overridden in a dbt project to change how dbt generates model database names. This macro works similarly to the [generate_schema_name](/docs/build/custom-schemas#advanced-custom-schema-configuration) macro. To override dbt's database name generation, create a macro named `generate_database_name` in your own dbt project. The `generate_database_name` macro accepts two arguments: diff --git a/website/docs/docs/build/custom-schemas.md b/website/docs/docs/build/custom-schemas.md index d8a319f40de..ad9fe997483 100644 --- a/website/docs/docs/build/custom-schemas.md +++ b/website/docs/docs/build/custom-schemas.md @@ -180,13 +180,6 @@ The following context methods _are_ available in the `generate_schema_name` macr ### Which vars are available in generate_schema_name? - - -Variable semantics have changed in dbt v0.17.0. See the [migration guide](/guides/migration/versions) -for more information on these changes. - - - Globally-scoped variables and variables defined on the command line with [--vars](/docs/build/project-variables) are accessible in the `generate_schema_name` context. diff --git a/website/docs/docs/build/exposures.md b/website/docs/docs/build/exposures.md index f58903a9726..65c0792e0a0 100644 --- a/website/docs/docs/build/exposures.md +++ b/website/docs/docs/build/exposures.md @@ -4,13 +4,6 @@ sidebar_label: "Exposures" id: "exposures" --- - - -* **v0.18.1**: Exposures are new! -* **v0.20.0**: Exposures support `tags` and `meta` properties - - - Exposures make it possible to define and describe a downstream use of your dbt project, such as in a dashboard, application, or data science pipeline. By defining exposures, you can then: - run, test, and list resources that feed into your exposure - populate a dedicated page in the auto-generated [documentation](/docs/collaborate/documentation) site with context relevant to data consumers diff --git a/website/docs/docs/build/hooks-operations.md b/website/docs/docs/build/hooks-operations.md index 1abc5657bad..effbebb3c37 100644 --- a/website/docs/docs/build/hooks-operations.md +++ b/website/docs/docs/build/hooks-operations.md @@ -68,127 +68,6 @@ You can use hooks to provide database-specific functionality not available out-o - - - - -### Examples using hooks - -Here's a minimal example of using hooks to grant privileges. For more information, see [`on-run-start` & `on-run-end` hooks](/reference/project-configs/on-run-start-on-run-end) and [`pre-hook` & `post-hook`](/reference/resource-configs/pre-hook-post-hook) reference sections. - - - -```yml -on-run-end: - - "grant usage on {{ target.schema }} to role reporter" - -models: - +post-hook: - - "grant select on {{ this }} to role reporter" - -``` - - - -You can also apply the `post-hook` to individual models using a `config` block: - - - -```sql -{{ config( - post_hook=[ - "grant select on {{ this }} to role reporter" - ] -) }} - -select ... - -``` - - - -You should use database-specific syntax when appropriate: - - - -
- - - -```sql -{{ config( - post_hook=[ - 'grant `roles/bigquery.dataViewer` on {{ this.type }} {{ this }} to "user:someone@yourcompany.com"' - ] -) }} - -select ... - -``` - - - -
- -
- - - -```sql -{{ config( - post_hook=[ - "grant select on {{ this }} to `someone@yourcompany.com`" - ] -) }} - -select ... - -``` - - - -
- -
- - - -```sql -{{ config( - post_hook=[ - "grant select on {{ this }} to reporter" - ] -) }} - -select ... - -``` - - - -
- -
- - - -```sql -{{ config( - post_hook=[ - "grant select on {{ this }} to role reporter" - ] -) }} - -select ... - -``` - - - -
- -
-
### Calling a macro in a hook diff --git a/website/docs/docs/build/incremental-models.md b/website/docs/docs/build/incremental-models.md index d3c3f25890b..07a571cd4db 100644 --- a/website/docs/docs/build/incremental-models.md +++ b/website/docs/docs/build/incremental-models.md @@ -79,8 +79,6 @@ A `unique_key` enables updating existing rows instead of just appending new rows Not specifying a `unique_key` will result in append-only behavior, which means dbt inserts all rows returned by the model's SQL into the preexisting target table without regard for whether the rows represent duplicates. - - The optional `unique_key` parameter specifies a field (or combination of fields) that define the grain of your model. That is, the field(s) identify a single unique row. You can define `unique_key` in a configuration block at the top of your model, and it can be a single column name or a list of column names. The `unique_key` should be supplied in your model definition as a string representing a single column or a list of single-quoted column names that can be used together, for example, `['col1', 'col2', …])`. Columns used in this way should not contain any nulls, or the incremental model run may fail. Either ensure that each column has no nulls (for example with `coalesce(COLUMN_NAME, 'VALUE_IF_NULL')`), or define a single-column [surrogate key](/terms/surrogate-key) (for example with [`dbt_utils.generate_surrogate_key`](https://github.com/dbt-labs/dbt-utils#generate_surrogate_key-source)). @@ -95,8 +93,6 @@ When you pass a list in this way, please ensure that each column does not contai Alternatively, you can define a single-column [surrogate key](/terms/surrogate-key), for example with [`dbt_utils.generate_surrogate_key`](https://github.com/dbt-labs/dbt-utils#generate_surrogate_key-source). ::: - - When you define a `unique_key`, you'll see this behavior for each row of "new" data returned by your dbt model: * If the same `unique_key` is present in the "new" and "old" model data, dbt will update/replace the old row with the new row of data. The exact mechanics of how that update/replace takes place will vary depending on your database, [incremental strategy](#about-incremental_strategy), and [strategy specific configs](#strategy-specific-configs). diff --git a/website/docs/docs/build/materializations.md b/website/docs/docs/build/materializations.md index 619880e5d1b..463651ccc77 100644 --- a/website/docs/docs/build/materializations.md +++ b/website/docs/docs/build/materializations.md @@ -83,7 +83,7 @@ When using the `table` materialization, your model is rebuilt as a - - -* **v1.3.0**: Metrics have been moved out of the experimental phase -* **v1.0.0**: Metrics are new and experimental - - A metric is an aggregation over a that supports zero or more dimensions. Some examples of metrics include: - active users @@ -236,7 +230,7 @@ The type of calculation (aggregation or expression) that is applied to the sql p -| Metric Calculation Method Metric Type | Description | +| Metric Calculation Method | Description | |----------------|----------------------------------------------------------------------------| | count | This metric type will apply the `count` aggregation to the specified field | | count_distinct | This metric type will apply the `count` aggregation to the specified field, with an additional distinct statement inside the aggregation | @@ -468,16 +462,6 @@ packages: - - -```yml -packages: - - package: dbt-labs/metrics - version: [">=0.2.0", "<0.3.0"] -``` - - - Once the package has been installed with `dbt deps`, make sure to run the `dbt_metrics_default_calendar` model as this is required for macros used to query metrics. More information on this, and additional calendar functionality, can be found in the [project README](https://github.com/dbt-labs/dbt_metrics#calendar). ### Querying metrics with `metrics.calculate` @@ -496,19 +480,6 @@ from {{ metrics.calculate( - - -```sql -select * -from {{ metrics.calculate( - metric_name='new_customers', - grain='week', - dimensions=['plan', 'country'] -) }} -``` - - - ### Supported inputs The example above doesn't display all the potential inputs you can provide to the macro. @@ -517,7 +488,7 @@ You may find some pieces of functionality, like secondary calculations, complica | Input | Example | Description | Required | | ----------- | ----------- | ----------- | -----------| -| metric_listmetric_name | `metric('some_metric)'`,
[`metric('some_metric)'`,
`metric('some_other_metric)'`]
`'metric_name'`
| The metric(s) to be queried by the macro. If multiple metrics required, provide in list format.The name of the metric | Required | +| metric_list | `metric('some_metric)'`,
[`metric('some_metric)'`,
`metric('some_other_metric)'`]
| The metric(s) to be queried by the macro. If multiple metrics required, provide in list format. | Required | | grain | `'day'`, `'week'`,
`'month'`, `'quarter'`,
`'year'`
| The time grain that the metric will be aggregated to in the returned dataset | Optional | | dimensions | [`'plan'`,
`'country'`] | The dimensions you want the metric to be aggregated by in the returned dataset | Optional | | secondary_calculations | [`metrics.period_over_period( comparison_strategy="ratio", interval=1, alias="pop_1wk")`] | Performs the specified secondary calculation on the metric results. Examples include period over period calculations, rolling calculations, and period to date calculations. | Optional | @@ -669,12 +640,6 @@ from {{ metrics.develop( - - -Functionality for `develop` is only supported in v1.2 and higher. Please navigate to those versions for information about this method of metric development. - - - #### Multiple/Derived Metrics with `metrics.develop` If you have a more complicated use case that you are interested in testing, the develop macro also supports this behavior. The only caveat is that you must include the raw tags for any provided metric yml that contains a derived metric. Example below: diff --git a/website/docs/docs/build/packages.md b/website/docs/docs/build/packages.md index 97e8784416e..74e25262994 100644 --- a/website/docs/docs/build/packages.md +++ b/website/docs/docs/build/packages.md @@ -48,11 +48,7 @@ packages: - - -- **v1.0.0:** The default [`packages-install-path`](/reference/project-configs/packages-install-path) has been updated to be `dbt_packages` instead of `dbt_modules`. - - +The default [`packages-install-path`](/reference/project-configs/packages-install-path) is `dbt_packages`. 3. Run `dbt deps` to install the package(s). Packages get installed in the `dbt_packages` directory – by default this directory is ignored by git, to avoid duplicating the source code for the package. @@ -89,13 +85,6 @@ In comparison, other package installation methods are unable to handle the dupli #### Prerelease versions - - -* `v0.20.1`: Fixed handling for prerelease versions. Introduced `install-prerelease` parameter. -* `v1.0.0`: When you provide an explicit prerelease version, dbt will install that version. - - - Some package maintainers may wish to push prerelease versions of packages to the dbt Hub, in order to test out new functionality or compatibility with a new version of dbt. A prerelease version is demarcated by a suffix, such as `a1` (first alpha), `b2` (second beta), or `rc3` (third release candidate). By default, `dbt deps` will not include prerelease versions when resolving package dependencies. You can enable the installation of prereleases in one of two ways: @@ -130,12 +119,6 @@ packages: - - -* `v0.20.0`: Introduced the ability to specify commit hashes as package revisions - - - Add the Git URL for the package, and optionally specify a revision. The revision can be: - a branch name - a tagged release @@ -265,12 +248,6 @@ Read more about creating a Personal Access Token [here](https://confluence.atlas #### Configure subdirectory for packaged projects - - -* `v0.20.0`: Introduced the ability to specify `subdirectory` - - - In general, dbt expects `dbt_project.yml` to be located as a top-level file in a package. If the packaged project is instead nested in a subdirectory—perhaps within a much larger mono repo—you can optionally specify the folder path as `subdirectory`. dbt will attempt a [sparse checkout](https://git-scm.com/docs/git-sparse-checkout) of just the files located within that subdirectory. Note that you must be using a recent version of `git` (`>=2.26.0`). diff --git a/website/docs/docs/build/sl-getting-started.md b/website/docs/docs/build/sl-getting-started.md index 227cfee20b3..f070bc27538 100644 --- a/website/docs/docs/build/sl-getting-started.md +++ b/website/docs/docs/build/sl-getting-started.md @@ -14,9 +14,9 @@ import DefineMetrics from '/snippets/_sl-define-metrics.md'; import ConfigMetric from '/snippets/_sl-configure-metricflow.md'; import TestQuery from '/snippets/_sl-test-and-query-metrics.md'; -This getting started page presents a sample workflow to help you create your first metrics in dbt Cloud or the command-line interface (CLI). It uses the [Jaffle shop example project](https://github.com/dbt-labs/jaffle-sl-template) as the project data source and is available for you to use. If you prefer, you can create semantic models and metrics for your own dbt project. +This getting started page presents a sample workflow to help you create your first metrics in dbt Cloud or the command-line interface (CLI). It uses the [Jaffle shop example project](https://github.com/dbt-labs/jaffle-sl-template) as the project data source and is available for you to use. -This guide will walk you through how to: +If you prefer, you can create semantic models and metrics for your own dbt project. This page will guide you on how to: - [Create a semantic model](#create-a-semantic-model) using MetricFlow - [Define metrics](#define-metrics) using MetricFlow @@ -25,7 +25,10 @@ This guide will walk you through how to: - [Set up dbt Semantic Layer](#set-up-dbt-semantic-layer) in dbt Cloud - [Connect to and query the API](#connect-and-query-api) with dbt Cloud -To experience the power of a universal [dbt Semantic Layer](/docs/use-dbt-semantic-layer/dbt-sl) and query metrics in downstream tools, you'll need a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) account. + +MetricFlow allows users to define metrics in their dbt project whether in dbt Cloud or in dbt Core. dbt Core users can use the [MetricFlow CLI](/docs/build/metricflow-cli) to define metrics in their local dbt Core project. + +However, to experience the power of the universal [dbt Semantic Layer](/docs/use-dbt-semantic-layer/dbt-sl) and query those metrics in downstream tools, you'll need a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) account. ## Prerequisites @@ -35,7 +38,7 @@ To experience the power of a universal [dbt Semantic Layer](/docs/use-dbt-semant - Create a successful run in the environment where you configure the Semantic Layer. - **Note:** Semantic Layer currently supports the Deployment environment for querying. (_development querying experience coming soon_) - Set up the [Semantic Layer API](/docs/dbt-cloud-apis/sl-api-overview) in the integrated tool to import metric definitions. - - **Note:** dbt Core or Developer accounts can only query data manually using the [MetricFlow CLI](/docs/build/metricflow-cli) and SQL. To dynamically query metrics using external tools, you must have a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) account with access to the Semantic Layer API.
+ - **Note:** To access the API and query metrics in downstream tools, you must have a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) account. dbt Core or Developer accounts can define metrics using [MetricFlow CLI](/docs/build/metricflow-cli) or the [dbt Cloud IDE](/docs/cloud/dbt-cloud-ide/develop-in-the-cloud).
- Understand [MetricFlow's](/docs/build/about-metricflow) key concepts, which powers the revamped dbt Semantic Layer. :::tip @@ -80,7 +83,7 @@ import SlSetUp from '/snippets/_new-sl-setup.md'; You can query your metrics in a JDBC-enabled tool or use existing first-class integrations with the dbt Semantic Layer. -Before you begin, you must have a dbt Cloud Team or Enterprise [multi-tenant](/docs/cloud/about-cloud/regions-ip-addresses) deployment, hosted in North America (cloud.getdbt.com login). +You must have a dbt Cloud Team or Enterprise [multi-tenant](/docs/cloud/about-cloud/regions-ip-addresses) deployment, hosted in North America. (Additional region support coming soon) - To learn how to use the JDBC API and what tools you can query it with, refer to the {frontMatter.meta.api_name}.
@@ -105,12 +108,10 @@ User data passes through the Semantic Layer on its way back from the warehouse.
Is the dbt Semantic Layer open source? -The dbt Semantic Layer is proprietary, however, some components of the dbt Semantic Layer are open source, like dbt-core and MetricFlow.

The universal dbt Semantic Layer is available to all Team and Enterprise Plans during public beta. Users on dbt Cloud Developer plans or dbt Core users can use MetricFlow to only define and test metrics locally.
+The dbt Semantic Layer is proprietary, however, some components of the dbt Semantic Layer are open source, like dbt-core and MetricFlow.

dbt Cloud Developer or dbt Core users can define metrics in their project, including a local dbt Core project, using the dbt Cloud IDE or the MetricFlow CLI. However, to experience the universal dbt Semantic Layer and access those metrics using the API or downstream tools, users will must be on a dbt Cloud Team or Enterprise plan. ## Next steps -Review the following documents to learn more and get started: - - [About MetricFlow](/docs/build/about-metricflow) - [Build your metrics](/docs/build/build-metrics-intro) - [Get started with the dbt Semantic Layer](/docs/use-dbt-semantic-layer/quickstart-sl) diff --git a/website/docs/docs/build/tests.md b/website/docs/docs/build/tests.md index c107dacf7b2..fa78d0df905 100644 --- a/website/docs/docs/build/tests.md +++ b/website/docs/docs/build/tests.md @@ -19,11 +19,7 @@ Tests are assertions you make about your models and other resources in your dbt You can use tests to improve the integrity of the SQL in each model by making assertions about the results generated. Out of the box, you can test whether a specified column in a model only contains non-null values, unique values, or values that have a corresponding value in another model (for example, a `customer_id` for an `order` corresponds to an `id` in the `customers` model), and values from a specified list. You can extend tests to suit business logic specific to your organization – any assertion that you can make about your model in the form of a select query can be turned into a test. - - -* `v0.20.0`: Both types of tests return a set of failing records. Previously, generic/schema tests returned a numeric value representing failures. Generic tests (f.k.a. schema tests) are defined using `test` blocks instead of macros prefixed `test_`. - - +Both types of tests return a set of failing records. Previously, generic/schema tests returned a numeric value representing failures. Generic tests (f.k.a. schema tests) are defined using `test` blocks instead of macros prefixed `test_`. Like almost everything in dbt, tests are SQL queries. In particular, they are `select` statements that seek to grab "failing" records, ones that disprove your assertion. If you assert that a column is unique in a model, the test query selects for duplicates; if you assert that a column is never null, the test seeks after nulls. If the test returns zero failing rows, it passes, and your assertion has been validated. @@ -245,12 +241,6 @@ where {{ column_name }} is null ## Storing test failures - - -* `v0.20.0`: Introduced storing test failures in the database - - - Normally, a test query will calculate failures as part of its execution. If you set the optional `--store-failures` flag or [`store_failures` config](/reference/resource-configs/store_failures), dbt will first save the results of a test query to a table in the database, and then query that table to calculate the number of failures. This workflow allows you to query and examine failing records much more quickly in development: diff --git a/website/docs/docs/cloud/connect-data-platform/connect-snowflake.md b/website/docs/docs/cloud/connect-data-platform/connect-snowflake.md index 4f31c56e8aa..62a58f6e1c5 100644 --- a/website/docs/docs/cloud/connect-data-platform/connect-snowflake.md +++ b/website/docs/docs/cloud/connect-data-platform/connect-snowflake.md @@ -30,31 +30,34 @@ to authenticate dbt Cloud to run queries against Snowflake on behalf of a Snowfl ### Key Pair + **Available in:** Development environments, Deployment environments The `Keypair` auth method uses Snowflake's [Key Pair Authentication](https://docs.snowflake.com/en/user-guide/python-connector-example.html#using-key-pair-authentication) to authenticate Development or Deployment credentials for a dbt Cloud project. -After [generating an encrypted key pair](https://docs.snowflake.com/en/user-guide/key-pair-auth.html#configuring-key-pair-authentication), be sure to set the `rsa_public_key` for the Snowflake user to authenticate in dbt Cloud: +1. After [generating an encrypted key pair](https://docs.snowflake.com/en/user-guide/key-pair-auth.html#configuring-key-pair-authentication), be sure to set the `rsa_public_key` for the Snowflake user to authenticate in dbt Cloud: ```sql alter user jsmith set rsa_public_key='MIIBIjANBgkqh...'; ``` -Finally, set the "Private Key" and "Private Key Passphrase" fields in the "Edit -Credentials" page to finish configuring dbt Cloud to authenticate with Snowflake -using a key pair. - -**Note:** At this time ONLY Encrypted Private Keys are supported by dbt Cloud, and the keys must be of size 4096 or smaller. +2. Finally, set the **Private Key** and **Private Key Passphrase** fields in the **Credentials** page to finish configuring dbt Cloud to authenticate with Snowflake using a key pair. + + **Note:** At this time ONLY Encrypted Private Keys are supported by dbt Cloud, and the keys must be of size 4096 or smaller. -In order to successfully fill in the Private Key field, you **must** include the commented lines below when you add the passphrase. Leaving the `PRIVATE KEY PASSPHRASE` field empty will return an error - have a look at the examples below: +3. To successfully fill in the Private Key field, you **must** include commented lines when you add the passphrase. Leaving the **Private Key Passphrase** field empty will return an error. If you're receiving a `Could not deserialize key data` or `JWT token` error, refer to [Troubleshooting](#troubleshooting) for more info. **Example:** + ```sql -----BEGIN ENCRYPTED PRIVATE KEY----- -< encrypted private key contents here > +< encrypted private key contents here - line 1 > +< encrypted private key contents here - line 2 > +< ... > -----END ENCRYPTED PRIVATE KEY----- ``` - + + ### Snowflake OAuth @@ -68,3 +71,36 @@ more information on configuring a Snowflake OAuth connection in dbt Cloud, pleas ## Configuration To learn how to optimize performance with data platform-specific configurations in dbt Cloud, refer to [Snowflake-specific configuration](/reference/resource-configs/snowflake-configs). + +## Troubleshooting + + +If you're receiving a `Could not deserialize key data` or `JWT token` error, refer to the following causes and solutions: + +
+ +Error: Could not deserialize key data + + - Possible cause + + - This could be because of mistakes like not copying correctly, missing dashes, or leaving out commented lines. + - Solution + + - You can copy the key from its source and paste it into a text editor to verify it before using it in dbt Cloud. + +
+ +
+Error: JWT token + + - Possible causes + + - This could be a transient issue between Snowflake and dbt Cloud. When connecting to Snowflake, dbt gets a JWT token valid for only 60 seconds. If there's no response from Snowflake within this time, you might see a `JWT token is invalid` error in dbt Cloud. + - The public key was not entered correctly in Snowflake. + + - Solutions + + - dbt needs to retry connections to Snowflake. + - Confirm and enter Snowflake's public key correctly. Additionally, you can reach out to Snowflake for help or refer to this Snowflake doc for more info: [Key-Based Authentication Failed with JWT token is invalid Error](https://community.snowflake.com/s/article/Key-Based-Authentication-Failed-with-JWT-token-is-invalid-Error). + +
diff --git a/website/docs/docs/cloud/git/authenticate-azure.md b/website/docs/docs/cloud/git/authenticate-azure.md index 9e755519e67..03020ccca73 100644 --- a/website/docs/docs/cloud/git/authenticate-azure.md +++ b/website/docs/docs/cloud/git/authenticate-azure.md @@ -26,3 +26,4 @@ You will be directed back to dbt Cloud, and your profile should be linked. You a ## FAQs + diff --git a/website/docs/docs/cloud/git/connect-github.md b/website/docs/docs/cloud/git/connect-github.md index 5d27012195d..771e4286ef6 100644 --- a/website/docs/docs/cloud/git/connect-github.md +++ b/website/docs/docs/cloud/git/connect-github.md @@ -78,5 +78,5 @@ The next time you log into dbt Cloud, you will be able to do so via OAuth throug ## FAQs - + diff --git a/website/docs/docs/cloud/git/connect-gitlab.md b/website/docs/docs/cloud/git/connect-gitlab.md index 9bf0d3971e1..53fde5f4878 100644 --- a/website/docs/docs/cloud/git/connect-gitlab.md +++ b/website/docs/docs/cloud/git/connect-gitlab.md @@ -124,3 +124,4 @@ If you imported a repository using the dbt Cloud native integration with GitLab, + diff --git a/website/docs/docs/cloud/git/import-a-project-by-git-url.md b/website/docs/docs/cloud/git/import-a-project-by-git-url.md index d84eb99dab8..ba53baa33ea 100644 --- a/website/docs/docs/cloud/git/import-a-project-by-git-url.md +++ b/website/docs/docs/cloud/git/import-a-project-by-git-url.md @@ -125,3 +125,7 @@ Don't see your git provider here? Please [contact dbt Support](mailto:support@ge ## Limited integration Some features of dbt Cloud require a tight integration with your git host, for example, updating GitHub pull requests with dbt Cloud run statuses. Importing your project by a URL prevents you from using these features. Once you give dbt Cloud access to your repository, you can continue to set up your project by adding a connection and creating and running your first dbt Cloud job. + +## FAQs + + diff --git a/website/docs/docs/cloud/manage-access/about-access.md b/website/docs/docs/cloud/manage-access/about-access.md index 9a95d0aeb68..f9f97bc555d 100644 --- a/website/docs/docs/cloud/manage-access/about-access.md +++ b/website/docs/docs/cloud/manage-access/about-access.md @@ -121,12 +121,6 @@ set on the _Internal Analytics_ project. ### Manual assignment - - -- New in version 1.1.23 (March, 2021) - - - dbt Cloud administrators can manually assign users to groups independently of IdP attributes. If a dbt Cloud group is configured _without_ any SSO Mappings, then the group will be _unmanaged_ and dbt Cloud will not adjust diff --git a/website/docs/docs/cloud/manage-access/licenses-and-groups.md b/website/docs/docs/cloud/manage-access/licenses-and-groups.md index 88d64f2d9a3..83b926c7445 100644 --- a/website/docs/docs/cloud/manage-access/licenses-and-groups.md +++ b/website/docs/docs/cloud/manage-access/licenses-and-groups.md @@ -117,12 +117,6 @@ set on the _Internal Analytics_ project. ### Manual assignment - - -- New in version 1.1.23 (March, 2021) - - - dbt Cloud administrators can manually assign users to groups independently of IdP attributes. If a dbt Cloud group is configured _without_ any SSO Mappings, then the group will be _unmanaged_ and dbt Cloud will not adjust diff --git a/website/docs/docs/cloud/manage-access/set-up-sso-saml-2.0.md b/website/docs/docs/cloud/manage-access/set-up-sso-saml-2.0.md index 2a23d686032..be46e965fe3 100644 --- a/website/docs/docs/cloud/manage-access/set-up-sso-saml-2.0.md +++ b/website/docs/docs/cloud/manage-access/set-up-sso-saml-2.0.md @@ -264,7 +264,7 @@ Expected **Attributes**: | Google groups | App attributes | | -------------- | -------------- | -| Name of groups | `MemberOf` | +| Name of groups | `groups` | 10. Click **Finish** to continue. diff --git a/website/docs/docs/collaborate/documentation.md b/website/docs/docs/collaborate/documentation.md index b613fd7a5ef..429b5187152 100644 --- a/website/docs/docs/collaborate/documentation.md +++ b/website/docs/docs/collaborate/documentation.md @@ -147,7 +147,6 @@ as well as the repo for this project \[here](https://github.com/dbt-labs/mrr-pla
### Custom project-level overviews -New in v0.18.0 You can set different overviews for each dbt project/package included in your documentation site by creating a docs block named `__[project_name]__`. For example, in order to define diff --git a/website/docs/docs/core/connect-data-platform/bigquery-setup.md b/website/docs/docs/core/connect-data-platform/bigquery-setup.md index 6b5bac53600..7a2a445be3f 100644 --- a/website/docs/docs/core/connect-data-platform/bigquery-setup.md +++ b/website/docs/docs/core/connect-data-platform/bigquery-setup.md @@ -84,8 +84,6 @@ my-bigquery-db: **Default project** -New in dbt v0.19.0 - If you do not specify a `project`/`database` and are using the `oauth` method, dbt will use the default `project` associated with your user, as defined by `gcloud config set`. ### OAuth Token-Based @@ -233,8 +231,6 @@ my-profile: ### Timeouts and Retries - - The `dbt-bigquery` plugin uses the BigQuery Python client library to submit queries. Each query requires two steps: 1. Job creation: Submit the query job to BigQuery, and receive its job ID. 2. Job execution: Wait for the query job to finish executing, and receive its result. @@ -321,7 +317,6 @@ my-profile: - ### Dataset locations @@ -343,12 +338,6 @@ my-profile: ### Maximum Bytes Billed - - -- New in dbt v0.17.0 - - - When a `maximum_bytes_billed` value is configured for a BigQuery profile, queries executed by dbt will fail if they exceed the configured maximum bytes threshhold. This configuration should be supplied as an integer number @@ -395,7 +384,6 @@ my-profile: ``` ### Service Account Impersonation -New in v0.18.0 This feature allows users authenticating via local OAuth to access BigQuery resources based on the permissions of a service account. @@ -417,7 +405,6 @@ For a general overview of this process, see the official docs for [Creating Shor ### Execution project -New in v0.21.0 By default, dbt will use the specified `project`/`database` as both: 1. The location to materialize resources (models, seeds, snapshots, etc), unless they specify a custom `project`/`database` config diff --git a/website/docs/docs/core/connect-data-platform/oracle-setup.md b/website/docs/docs/core/connect-data-platform/oracle-setup.md index f601709654b..b1195fbd0a0 100644 --- a/website/docs/docs/core/connect-data-platform/oracle-setup.md +++ b/website/docs/docs/core/connect-data-platform/oracle-setup.md @@ -455,27 +455,6 @@ dbt_test: - - - -```yaml -dbt_test: - target: "{{ env_var('DBT_TARGET', 'dev') }}" - outputs: - dev: - type: oracle - user: "{{ env_var('DBT_ORACLE_USER') }}" - pass: "{{ env_var('DBT_ORACLE_PASSWORD') }}" - protocol: "tcps" - host: "{{ env_var('DBT_ORACLE_HOST') }}" - port: 1522 - service: "{{ env_var('DBT_ORACLE_SERVICE') }}" - database: "{{ env_var('DBT_ORACLE_DATABASE') }}" - schema: "{{ env_var('DBT_ORACLE_SCHEMA') }}" - threads: 4 -``` - - diff --git a/website/docs/docs/core/connect-data-platform/postgres-setup.md b/website/docs/docs/core/connect-data-platform/postgres-setup.md index 5d7467c786d..f56d3f22576 100644 --- a/website/docs/docs/core/connect-data-platform/postgres-setup.md +++ b/website/docs/docs/core/connect-data-platform/postgres-setup.md @@ -88,33 +88,23 @@ The `search_path` config controls the Postgres "search path" that dbt configures #### role - Added in v0.16.0 - The `role` config controls the Postgres role that dbt assumes when opening new connections to the database. #### sslmode - Added in v0.16.0 - The `sslmode` config controls how dbt connectes to Postgres databases using SSL. See [the Postgres docs](https://www.postgresql.org/docs/9.1/libpq-ssl.html) on `sslmode` for usage information. When unset, dbt will connect to databases using the Postgres default, `prefer`, as the `sslmode`. #### sslcert - Added in v0.21.0 - The `sslcert` config controls the location of the certificate file used to connect to Postgres when using client SSL connections. To use a certificate file that is not in the default location, set that file path using this value. Without this config set, dbt uses the Postgres default locations. See [Client Certificates](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-CLIENTCERT) in the Postgres SSL docs for the default paths. #### sslkey - Added in v0.21.0 - The `sslkey` config controls the location of the private key for connecting to Postgres using client SSL connections. If this config is omitted, dbt uses the default key location for Postgres. See [Client Certificates](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-CLIENTCERT) in the Postgres SSL docs for the default locations. #### sslrootcert - Added in v0.21.0 - When connecting to a Postgres server using a client SSL connection, dbt verifies that the server provides an SSL certificate signed by a trusted root certificate. These root certificates are in the `~/.postgresql/root.crt` file by default. To customize the location of this file, set the `sslrootcert` config value to a new file path. ### `keepalives_idle` diff --git a/website/docs/docs/core/connect-data-platform/snowflake-setup.md b/website/docs/docs/core/connect-data-platform/snowflake-setup.md index 22254c30ee0..98bcf447fed 100644 --- a/website/docs/docs/core/connect-data-platform/snowflake-setup.md +++ b/website/docs/docs/core/connect-data-platform/snowflake-setup.md @@ -124,7 +124,7 @@ Along with adding the `authenticator` parameter, be sure to run `alter account s To use key pair authentication, omit a `password` and instead provide a `private_key_path` and, optionally, a `private_key_passphrase` in your target. **Note:** Versions of dbt before 0.16.0 required that private keys were encrypted and a `private_key_passphrase` was provided. This behavior was changed in dbt v0.16.0. -Starting from [dbt v1.5.0](/docs/dbt-versions/core), you have the option to use a `private_key` string instead of a `private_key_path`. The `private_key` string should be in Base64-encoded DER format, representing the key bytes. Refer to [Snowflake documentation](https://docs.snowflake.com/developer-guide/python-connector/python-connector-example#using-key-pair-authentication-key-pair-rotation) for more info on how they generate the key. +Starting from [dbt v1.5.0](/docs/dbt-versions/core), you have the option to use a `private_key` string instead of a `private_key_path`. The `private_key` string should be in either Base64-encoded DER format, representing the key bytes, or a plain-text PEM format. Refer to [Snowflake documentation](https://docs.snowflake.com/developer-guide/python-connector/python-connector-example#using-key-pair-authentication-key-pair-rotation) for more info on how they generate the key. diff --git a/website/docs/docs/core/connect-data-platform/spark-setup.md b/website/docs/docs/core/connect-data-platform/spark-setup.md index b22416fd3a5..895f0559953 100644 --- a/website/docs/docs/core/connect-data-platform/spark-setup.md +++ b/website/docs/docs/core/connect-data-platform/spark-setup.md @@ -57,15 +57,11 @@ $ pip install "dbt-spark[ODBC]" $ pip install "dbt-spark[PyHive]" ``` - - ```zsh # session connections $ pip install "dbt-spark[session]" ``` - -

Configuring {frontMatter.meta.pypi_package}

For {frontMatter.meta.platform_name}-specific configuration please refer to {frontMatter.meta.platform_name} Configuration

@@ -80,7 +76,6 @@ dbt-spark can connect to Spark clusters by three different methods: - [`thrift`](#thrift) connects directly to the lead node of a cluster, either locally hosted / on premise or in the cloud (e.g. Amazon EMR). - [`http`](#http) is a more generic method for connecting to a managed service that provides an HTTP endpoint. Currently, this includes connections to a Databricks interactive cluster. - - [`session`](#session) connects to a pySpark session, running locally or on a remote machine. @@ -88,12 +83,9 @@ dbt-spark can connect to Spark clusters by three different methods: The `session` connection method is intended for advanced users and experimental dbt development. This connection method is not supported by dbt Cloud. ::: - ### ODBC -New in v0.18.1 - Use the `odbc` connection method if you are connecting to a Databricks SQL endpoint or interactive cluster via ODBC driver. (Download the latest version of the official driver [here](https://databricks.com/spark/odbc-driver-download).) @@ -186,8 +178,6 @@ Databricks interactive clusters can take several minutes to start up. You may include the optional profile configs `connect_timeout` and `connect_retries`, and dbt will periodically retry the connection. - - ### Session Use the `session` method if you want to run `dbt` against a pySpark session. @@ -209,8 +199,6 @@ your_profile_name: - - ## Optional configurations ### Retries diff --git a/website/docs/docs/dbt-cloud-apis/service-tokens.md b/website/docs/docs/dbt-cloud-apis/service-tokens.md index efab8b7e3c6..9553f48a013 100644 --- a/website/docs/docs/dbt-cloud-apis/service-tokens.md +++ b/website/docs/docs/dbt-cloud-apis/service-tokens.md @@ -18,9 +18,9 @@ You can use service account tokens for system-level integrations that do not run You can assign as many permission sets as needed to one token. For more on permissions sets, see "[Enterprise Permissions](/docs/cloud/manage-access/enterprise-permissions)." -## Generating service account tokens +## Generate service account tokens -To make a service token in dbt Cloud, follow these steps: +You can generate service tokens if you have a Developer [license](/docs/cloud/manage-access/seats-and-users) and account admin [permissions](/docs/cloud/manage-access/about-user-access#permission-sets). To create a service token in dbt Cloud, follow these steps: 1. Open the **Account Settings** page by clicking the gear icon on the right-hand side. 2. On the left sidebar, click on **Service Tokens**. diff --git a/website/docs/docs/dbt-cloud-apis/sl-jdbc.md b/website/docs/docs/dbt-cloud-apis/sl-jdbc.md index b50dd99ec75..c238dcad680 100644 --- a/website/docs/docs/dbt-cloud-apis/sl-jdbc.md +++ b/website/docs/docs/dbt-cloud-apis/sl-jdbc.md @@ -168,7 +168,7 @@ To query metric values, here are the following parameters that are available: | `where` | A where clause that allows you to filter on dimensions and entities using parameters - comes with `TimeDimension`, `Dimension`, and `Entity` objects. Granularity is required with `TimeDimension` | `"{{ where=Dimension('customer__country') }} = 'US')"` | Optional | | `limit` | Limit the data returned | `limit=10` | Optional | |`order` | Order the data returned | `order_by=['-order_gross_profit']` (remove `-` for ascending order) | Optional | -| `explain` | If true, returns generated SQL for the data platform but does not execute | `explain=True` | Optional | +| `compile` | If true, returns generated SQL for the data platform but does not execute | `compile=True` | Optional | ## Note on time dimensions and `metric_time` @@ -285,15 +285,15 @@ semantic_layer.query(metrics=['food_order_amount', 'order_gross_profit'], order_by=['order_gross_profit']) }} ``` -### Query with explain keyword +### Query with compile keyword -Use the following example to query using a `explain` keyword: +Use the following example to query using a `compile` keyword: ```bash select * from {{ semantic_layer.query(metrics=['food_order_amount', 'order_gross_profit'], group_by=[Dimension('metric_time').grain('month'),'customer__customer_type'], - explain=True) + compile=True) }} ``` diff --git a/website/docs/docs/dbt-versions/release-notes/04-Sept-2023/removing-prerelease-versions.md b/website/docs/docs/dbt-versions/release-notes/04-Sept-2023/removing-prerelease-versions.md new file mode 100644 index 00000000000..0b588376c34 --- /dev/null +++ b/website/docs/docs/dbt-versions/release-notes/04-Sept-2023/removing-prerelease-versions.md @@ -0,0 +1,15 @@ +--- +title: "Update: Removing old (prerelease) versions of dbt from dbt Cloud when (latest) is available" +description: "Sept 2023: Improving the version selection options by removing prerelease versions whenever the latest version is available." +sidebar_label: "Update: Removing old prerelease versions from dbt Cloud" +tags: [Sept-2023, Versions] +date: 2023-09-26 +sidebar_position: 07 +--- + +Previously, when dbt Labs released a new [version](/docs/dbt-versions/core#how-dbt-core-uses-semantic-versioning) in dbt Cloud, the older patch _prerelease_ version and the _latest_ version remained as options in the dropdown menu available in the **Environment settings**. Now, when the _latest_ version is released, the _prerelease_ version will be removed and all customers remaining on it will be migrated seamlessly. There will be no interruptions to service when this migration occurs. + +To see which version you are currently using and to upgrade, select **Deploy** in the top navigation bar and select **Environments**. Choose the preferred environment and click **Settings**. Click **Edit** to make a change to the current dbt version. dbt Labs recommends always using the latest version whenever possible to take advantage of new features and functionality. + + + \ No newline at end of file diff --git a/website/docs/docs/use-dbt-semantic-layer/dbt-sl.md b/website/docs/docs/use-dbt-semantic-layer/dbt-sl.md index c10feded83a..8d073297f48 100644 --- a/website/docs/docs/use-dbt-semantic-layer/dbt-sl.md +++ b/website/docs/docs/use-dbt-semantic-layer/dbt-sl.md @@ -28,7 +28,7 @@ import Features from '/snippets/_sl-plan-info.md' diff --git a/website/docs/docs/use-dbt-semantic-layer/quickstart-sl.md b/website/docs/docs/use-dbt-semantic-layer/quickstart-sl.md index f6c296cfc2a..542ab4896bb 100644 --- a/website/docs/docs/use-dbt-semantic-layer/quickstart-sl.md +++ b/website/docs/docs/use-dbt-semantic-layer/quickstart-sl.md @@ -23,9 +23,9 @@ import TestQuery from '/snippets/_sl-test-and-query-metrics.md'; The dbt Semantic Layer, powered by [MetricFlow](/docs/build/about-metricflow), simplifies defining and using critical business metrics. It centralizes metric definitions, eliminates duplicate coding, and ensures consistent self-service access to metrics in downstream tools. -MetricFlow is a powerful component within the dbt Semantic Layer that helps users define and manage company metrics efficiently. It provides flexible abstractions and SQL query generation and also allows data consumers to retrieve metric datasets quickly and easily from a data platform. +MetricFlow, a powerful component of the dbt Semantic Layer, simplifies the creation and management of company metrics. It offers flexible abstractions, SQL query generation, and enables fast retrieval of metric datasets from a data platform. -Use this guide to fully experience the power of a universal dbt Semantic Layer. Here are the following steps you'll take: +Use this guide to fully experience the power of the universal dbt Semantic Layer. Here are the following steps you'll take: - [Create a semantic model](#create-a-semantic-model) in dbt Cloud using MetricFlow - [Define metrics](#define-metrics) in dbt Cloud using MetricFlow @@ -34,6 +34,10 @@ Use this guide to fully experience the power of a universal dbt Semantic Layer. - [Set up dbt Semantic Layer](#setup) in dbt Cloud - [Connect and query API](#connect-and-query-api) with dbt Cloud + +MetricFlow allows users to define metrics in their dbt project whether in dbt Cloud or in dbt Core. dbt Core users can use the [MetricFlow CLI](/docs/build/metricflow-cli) to define metrics in their local dbt Core project. + +However, to experience the power of the universal [dbt Semantic Layer](/docs/use-dbt-semantic-layer/dbt-sl) and query those metrics in downstream tools, you'll need a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) account. ## Prerequisites import SetUp from '/snippets/_v2-sl-prerequisites.md'; @@ -86,7 +90,7 @@ import SlSetUp from '/snippets/_new-sl-setup.md'; You can query your metrics in a JDBC-enabled tool or use existing first-class integrations with the dbt Semantic Layer. -Before you begin, you must have a dbt Cloud Team or Enterprise [multi-tenant](/docs/cloud/about-cloud/regions-ip-addresses) deployment, hosted in North America (cloud.getdbt.com login). +You must have a dbt Cloud Team or Enterprise [multi-tenant](/docs/cloud/about-cloud/regions-ip-addresses) deployment, hosted in North America (Additional region support coming soon). - To learn how to use the JDBC API and what tools you can query it with, refer to the {frontMatter.meta.api_name}.
@@ -112,13 +116,11 @@ User data passes through the Semantic Layer on its way back from the warehouse.
Is the dbt Semantic Layer open source? -The dbt Semantic Layer is proprietary, however, some components of the dbt Semantic Layer are open source, like dbt-core and MetricFlow.

The universal dbt Semantic Layer is available to all Team and Enterprise Plans during public beta. Users on dbt Cloud Developer plans or dbt Core users can use MetricFlow to only define and test metrics locally.
+The dbt Semantic Layer is proprietary, however, some components of the dbt Semantic Layer are open source, like dbt-core and MetricFlow.

dbt Cloud Developer or dbt Core users can define metrics in their project, including a local dbt Core project, using the dbt Cloud IDE or the MetricFlow CLI. However, to experience the universal dbt Semantic Layer and access those metrics using the API or downstream tools, users will must be on a dbt Cloud Team or Enterprise plan.

## Next steps -Review the following documents to learn more and get started: - - [Build your metrics](/docs/build/build-metrics-intro) - [Set up dbt Semantic Layer](docs/use-dbt-semantic-layer/setup-dbt-sl) - [Available integrations](/docs/use-dbt-semantic-layer/avail-sl-integrations) @@ -178,16 +180,6 @@ packages: - - -```yml -packages: - - package: dbt-labs/metrics - version: [">=0.2.0", "<0.3.0"] -``` - - - 1. Paste the dbt metrics package code in your `packages.yml` file. 2. Run the [`dbt deps` command](/reference/commands/deps) to install the package. diff --git a/website/docs/faqs/Core/install-python-compatibility.md b/website/docs/faqs/Core/install-python-compatibility.md index 4d6066d931b..5c536101f0c 100644 --- a/website/docs/faqs/Core/install-python-compatibility.md +++ b/website/docs/faqs/Core/install-python-compatibility.md @@ -17,7 +17,7 @@ The latest version of `dbt-core` is compatible with Python versions 3.7, 3.8, 3. - + The latest version of `dbt-core` is compatible with Python versions 3.7, 3.8, 3.9, and 3.10 diff --git a/website/docs/faqs/Docs/documenting-macros.md b/website/docs/faqs/Docs/documenting-macros.md index cbc12b988c6..9a2036cd6bf 100644 --- a/website/docs/faqs/Docs/documenting-macros.md +++ b/website/docs/faqs/Docs/documenting-macros.md @@ -5,8 +5,6 @@ sidebar_label: 'Document macros' id: documenting-macros --- -The `macros:` key is new in 0.16.0. - To document macros, use a [schema file](/reference/macro-properties) and nest the configurations under a `macros:` key ## Example diff --git a/website/docs/faqs/Git/git-migration.md b/website/docs/faqs/Git/git-migration.md new file mode 100644 index 00000000000..775ae3679e3 --- /dev/null +++ b/website/docs/faqs/Git/git-migration.md @@ -0,0 +1,26 @@ +--- +title: "How to migrate git providers" +sidebar_label: "How to migrate git providers" +id: "git-migration" +hide_table_of_contents: true +description: "Learn how to migrate git providers in dbt Cloud with minimal disruption." +tags: [Git] +--- + +To migrate from one git provider to another, refer to the following steps to avoid minimal disruption: + +1. Outside of dbt Cloud, you'll need to import your existing repository into your new provider. + + As an example, if you're migrating from GitHub to Azure DevOps, you'll need to import your existing repository (GitHub) into your new git provider (Azure DevOps). For detailed steps on how to do this, refer to your git provider's documentation (Such as [GitHub](https://docs.github.com/en/migrations/importing-source-code/using-github-importer/importing-a-repository-with-github-importer), [GitLab](https://docs.gitlab.com/ee/user/project/import/repo_by_url.html), [Azure DevOps](https://learn.microsoft.com/en-us/azure/devops/repos/git/import-git-repository?view=azure-devops)) + +2. Go back to dbt Cloud and set up your [integration for the new git provider](/docs/cloud/git/connect-github), if needed. +3. Disconnect the old repository in dbt Cloud by going to **Account Settings** and then **Projects**. Click on the **Repository** link, then click **Edit** and **Disconnect**. + + + +4. On the same page, connect to the new git provider repository by clicking **Configure Repository** + - If you're using the native integration, you may need to OAuth to it. + +5. That's it, you should now be connected to the new git provider! 🎉 + +Note — As a tip, we recommend you refresh your page and dbt Cloud IDE before performing any actions. diff --git a/website/docs/faqs/Models/configurable-model-path.md b/website/docs/faqs/Models/configurable-model-path.md index 6e8861a0693..c34112a5fe1 100644 --- a/website/docs/faqs/Models/configurable-model-path.md +++ b/website/docs/faqs/Models/configurable-model-path.md @@ -6,12 +6,6 @@ id: configurable-model-path --- - - -- **v1.0.0:** The config 'source-path' has been deprecated in favor of [`model-paths`](/reference/project-configs/model-paths). - - - By default, dbt expects the files defining your models to be located in the `models` subdirectory of your project. To change this, update the [model-paths](reference/project-configs/model-paths.md) configuration in your `dbt_project.yml` diff --git a/website/docs/faqs/Tests/configurable-data-path.md b/website/docs/faqs/Tests/configurable-data-path.md index 7c4e92f7226..7663d2d3f11 100644 --- a/website/docs/faqs/Tests/configurable-data-path.md +++ b/website/docs/faqs/Tests/configurable-data-path.md @@ -6,12 +6,6 @@ id: configurable-data-path --- - - -- **v1.0.0:** The config 'data-paths' has been deprecated in favor of [`seed-paths`](/reference/project-configs/seed-paths). - - - By default, dbt expects your seed files to be located in the `seeds` subdirectory of your project. diff --git a/website/docs/faqs/Tests/testing-seeds.md b/website/docs/faqs/Tests/testing-seeds.md index 93afcab2fa4..3b1b3e0df56 100644 --- a/website/docs/faqs/Tests/testing-seeds.md +++ b/website/docs/faqs/Tests/testing-seeds.md @@ -6,8 +6,6 @@ id: testing-seeds --- -The `seeds:` key is new in 0.16.0. Prior to this, use a `models:` key instead. - To test and document seeds, use a [schema file](/reference/configs-and-properties) and nest the configurations under a `seeds:` key ## Example diff --git a/website/docs/guides/best-practices/custom-generic-tests.md b/website/docs/guides/best-practices/custom-generic-tests.md index dc23770423e..f2d84e38853 100644 --- a/website/docs/guides/best-practices/custom-generic-tests.md +++ b/website/docs/guides/best-practices/custom-generic-tests.md @@ -6,13 +6,6 @@ displayText: Writing custom generic tests hoverSnippet: Learn how to define your own custom generic tests. --- - - -* `v0.20.0`: Generic tests (f.k.a. schema tests) are defined using `test` blocks instead of macros prefixed `test_`. They return a number of failing rows, rather than a single numeric value. -* `v1.0.0`: Generic tests can be defined in the `tests/generic` subfolder, in addition to the `macros/` directory - - - dbt ships with [Not Null](/reference/resource-properties/tests#not-null), [Unique](/reference/resource-properties/tests#unique), [Relationships](/reference/resource-properties/tests#relationships), and [Accepted Values](/reference/resource-properties/tests#accepted-values) generic tests. (These used to be called "schema tests," and you'll still see that name in some places.) Under the hood, these generic tests are defined as `test` blocks (like macros) in a globally accessible dbt project. You can find the source code for these tests in the [global project](https://github.com/dbt-labs/dbt-core/tree/main/core/dbt/include/global_project/macros/generic_test_sql). :::info diff --git a/website/docs/guides/best-practices/how-we-structure/2-staging.md b/website/docs/guides/best-practices/how-we-structure/2-staging.md index cb46fa19b33..bcb589508e5 100644 --- a/website/docs/guides/best-practices/how-we-structure/2-staging.md +++ b/website/docs/guides/best-practices/how-we-structure/2-staging.md @@ -102,7 +102,7 @@ select * from renamed - ✅ **Type casting** - ✅ **Basic computations** (e.g. cents to dollars) - ✅ **Categorizing** (using conditional logic to group values into buckets or booleans, such as in the `case when` statements above) - - ❌ **Joins** — the goal of staging models is to clean and prepare individual source conformed concepts for downstream usage. We're creating the most useful version of a source system table, which we can use as a new modular component for our project. In our experience, joins are almost always a bad idea here — they create immediate duplicated computation and confusing relationships that ripple downstream — there are occasionally exceptions though (see [base models](guides/best-practices/how-we-structure/2-staging#staging-other-considerations) below). + - ❌ **Joins** — the goal of staging models is to clean and prepare individual source-conformed concepts for downstream usage. We're creating the most useful version of a source system table, which we can use as a new modular component for our project. In our experience, joins are almost always a bad idea here — they create immediate duplicated computation and confusing relationships that ripple downstream — there are occasionally exceptions though (refer to [base models](#staging-other-considerations) for more info). - ❌ **Aggregations** — aggregations entail grouping, and we're not doing that at this stage. Remember - staging models are your place to create the building blocks you’ll use all throughout the rest of your project — if we start changing the grain of our tables by grouping in this layer, we’ll lose access to source data that we’ll likely need at some point. We just want to get our individual concepts cleaned and ready for use, and will handle aggregating values downstream. - ✅ **Materialized as views.** Looking at a partial view of our `dbt_project.yml` below, we can see that we’ve configured the entire staging directory to be materialized as views. As they’re not intended to be final artifacts themselves, but rather building blocks for later models, staging models should typically be materialized as views for two key reasons: diff --git a/website/docs/guides/best-practices/how-we-style/2-how-we-style-our-sql.md b/website/docs/guides/best-practices/how-we-style/2-how-we-style-our-sql.md index 9684a498bce..8c61e63b888 100644 --- a/website/docs/guides/best-practices/how-we-style/2-how-we-style-our-sql.md +++ b/website/docs/guides/best-practices/how-we-style/2-how-we-style-our-sql.md @@ -25,7 +25,7 @@ id: 2-how-we-style-our-sql - 🔙 Fields should be stated before aggregates and window functions. - 🤏🏻 Aggregations should be executed as early as possible (on the smallest data set possible) before joining to another table to improve performance. -- 🔢 Ordering and grouping by a number (eg. group by 1, 2) is preferred over listing the column names (see [this classic rant](https://blog.getdbt.com/write-better-sql-a-defense-of-group-by-1/) for why). Note that if you are grouping by more than a few columns, it may be worth revisiting your model design. +- 🔢 Ordering and grouping by a number (eg. group by 1, 2) is preferred over listing the column names (see [this classic rant](https://www.getdbt.com/blog/write-better-sql-a-defense-of-group-by-1) for why). Note that if you are grouping by more than a few columns, it may be worth revisiting your model design. ## Joins diff --git a/website/docs/guides/dbt-ecosystem/sl-partner-integration-guide.md b/website/docs/guides/dbt-ecosystem/sl-partner-integration-guide.md index 20e2d8fce71..39e93987b20 100644 --- a/website/docs/guides/dbt-ecosystem/sl-partner-integration-guide.md +++ b/website/docs/guides/dbt-ecosystem/sl-partner-integration-guide.md @@ -136,9 +136,9 @@ These are recommendations on how to evolve a Semantic Layer integration and not * Querying dimensions without metrics and other more advanced querying functionality * Suggest metrics to users based on teams/identity, and so on. -### A note on transparency and using explain +### A note on transparency and using compile -For transparency and additional context, we recommend you have an easy way for the user to obtain the SQL that MetricFlow generates. Depending on what API you are using, you can do this by using our explain parameter (JDBC) or compileSQL mutation (GraphQL). This is incredibly powerful because we want to be very transparent to the user about what we're doing and do not want to be a black box. This would be mostly beneficial to a technical user. +For transparency and additional context, we recommend you have an easy way for the user to obtain the SQL that MetricFlow generates. Depending on what API you are using, you can do this by using our compile parameter. This is incredibly powerful because we want to be very transparent to the user about what we're doing and do not want to be a black box. This would be mostly beneficial to a technical user. ### A note on where filters diff --git a/website/docs/guides/legacy/best-practices.md b/website/docs/guides/legacy/best-practices.md index 10e02271518..1fbcbc72cc1 100644 --- a/website/docs/guides/legacy/best-practices.md +++ b/website/docs/guides/legacy/best-practices.md @@ -112,8 +112,6 @@ To merge code changes with confidence, you want to know that those changes will At the same time, it costs time (and money) to run and test all the models in your project. This inefficiency feels especially painful if your PR only proposes changes to a handful of models. -New in v0.18.0 - By comparing to artifacts from a previous production run, dbt can determine which models are modified and build them on top of of their unmodified parents. @@ -122,8 +120,6 @@ dbt run -s state:modified+ --defer --state path/to/prod/artifacts dbt test -s state:modified+ --defer --state path/to/prod/artifacts ``` -New in v1.0.0 - By comparing to artifacts from a previous production run, dbt can determine model and test result statuses. - `result:fail` @@ -159,7 +155,6 @@ dbt test --select result:fail --exclude --defer --state path/to/p > Note: If you're using the `--state target/` flag, `result:error` and `result:fail` flags can only be selected concurrently(in the same command) if using the `dbt build` command. `dbt test` will overwrite the `run_results.json` from `dbt run` in a previous command invocation. - Only supported by v1.1 or newer. @@ -178,8 +173,6 @@ dbt source freshness # must be run again to compare current to previous state dbt build --select source_status:fresher+ --state path/to/prod/artifacts ``` - - To learn more, read the docs on [state](/reference/node-selection/syntax#about-node-selection). ## Pro-tips for dbt Projects diff --git a/website/docs/reference/analysis-properties.md b/website/docs/reference/analysis-properties.md index 008da70f9db..fbc7b05538f 100644 --- a/website/docs/reference/analysis-properties.md +++ b/website/docs/reference/analysis-properties.md @@ -28,10 +28,3 @@ analyses: ```
- - - - -* `v0.16.0`: The ability to declare analysis properties was introduced. - - diff --git a/website/docs/reference/artifacts/dbt-artifacts.md b/website/docs/reference/artifacts/dbt-artifacts.md index 7528692aa8b..859fde7c908 100644 --- a/website/docs/reference/artifacts/dbt-artifacts.md +++ b/website/docs/reference/artifacts/dbt-artifacts.md @@ -30,8 +30,6 @@ Most dbt commands (and corresponding RPC methods) produce artifacts: ## Common metadata -New in v0.19.0 - All artifacts produced by dbt include a `metadata` dictionary with these properties: - `dbt_version`: Version of dbt that produced this artifact. diff --git a/website/docs/reference/artifacts/manifest-json.md b/website/docs/reference/artifacts/manifest-json.md index 3a916ed6d4c..5e8dcedd2d5 100644 --- a/website/docs/reference/artifacts/manifest-json.md +++ b/website/docs/reference/artifacts/manifest-json.md @@ -53,12 +53,4 @@ You can refer to [dbt JSON Schema](https://schemas.getdbt.com/) for info on desc **Note**: The `manifest.json` version number is related to (but not _equal_ to) your dbt version, so you _must_ use the correct `manifest.json` version for your dbt version. To find the correct `manifest.json` version, select the dbt version on the top navigation (such as `v1.5`). -Use the following table to understand how the versioning pattern works and match the Manifest version with the dbt version: - -| dbt version | Manifest version | -| ----------- | ---------------- | -| `v1.5` | [Manifest v9](https://schemas.getdbt.com/dbt/manifest/v9/index.html) -| `v1.4` | [Manifest v8](https://schemas.getdbt.com/dbt/manifest/v8/index.html) -| `v1.3` | [Manifest v7](https://schemas.getdbt.com/dbt/manifest/v7/index.html) -| `v1.2` | [Manifest v6](https://schemas.getdbt.com/dbt/manifest/v6/index.html) -| `v1.1` | [Manifest v5](https://schemas.getdbt.com/dbt/manifest/v5/index.html) +Refer to the table at the beginning of [this page](/reference/artifacts/manifest-json) to understand how the Manifest version matches the dbt version. diff --git a/website/docs/reference/commands/clean.md b/website/docs/reference/commands/clean.md index 0185b701740..23a3f6080ce 100644 --- a/website/docs/reference/commands/clean.md +++ b/website/docs/reference/commands/clean.md @@ -4,12 +4,6 @@ sidebar_label: "clean" id: "clean" --- - - -- **v1.0.0:** `dbt_modules` has been replaced by `dbt_packages` by default for the [clean-target](/reference/project-configs/clean-targets) for packages. - - - `dbt clean` is a utility function that deletes all folders specified in the [`clean-targets`](/reference/project-configs/clean-targets) list specified in `dbt_project.yml`. You can use this to delete the `dbt_packages` and `target` directories. To avoid complex permissions issues and potentially deleting crucial aspects of the remote file system without access to fix them, this command does not work when interfacing with the RPC server that powers the dbt Cloud IDE. Instead, when working in dbt Cloud, the `dbt deps` command cleans before it installs packages automatically. The `target` folder can be manually deleted from the sidebar file tree if needed. diff --git a/website/docs/reference/commands/init.md b/website/docs/reference/commands/init.md index 468bee5ff60..873647814ec 100644 --- a/website/docs/reference/commands/init.md +++ b/website/docs/reference/commands/init.md @@ -29,35 +29,6 @@ If you've just cloned or downloaded an existing dbt project, `dbt init` can stil - **Existing project:** If you're the maintainer of an existing project, and you want to help new users get connected to your database quickly and easily, you can include your own custom `profile_template.yml` in the root of your project, alongside `dbt_project.yml`. For common connection attributes, set the values in `fixed`; leave user-specific attributes in `prompts`, but with custom hints and defaults as you'd like. - - - - -```yml -fixed: - account: abc123 - authenticator: externalbrowser - database: analytics - role: transformer - type: snowflake - warehouse: transforming -prompts: - user: - type: string - hint: yourname@jaffleshop.com - schema: - type: string - hint: usually dbt_ - threads: - hint: "your favorite number, 1-10" - type: int - default: 8 -``` - - - - - diff --git a/website/docs/reference/commands/rpc.md b/website/docs/reference/commands/rpc.md index 2b9a96688de..809eadee639 100644 --- a/website/docs/reference/commands/rpc.md +++ b/website/docs/reference/commands/rpc.md @@ -5,13 +5,6 @@ id: "rpc" description: "Remote Procedure Call (rpc) dbt server compiles and runs queries, and provides methods that enable you to list and terminate running processes. " --- - - - - **v0.14**: The `dbt rpc` command was introduced to dbt Core - - **v1.0**: We now distribute and package the Remote Procedure Call (rpc) server functionality separately from `dbt-core`. You can find the code in a dedicated [`dbt-rpc` repository](https://github.com/dbt-labs/dbt-rpc). - - - :::caution The dbt-rpc plugin is deprecated diff --git a/website/docs/reference/commands/run.md b/website/docs/reference/commands/run.md index f22cea71522..557d0d71338 100644 --- a/website/docs/reference/commands/run.md +++ b/website/docs/reference/commands/run.md @@ -71,32 +71,12 @@ For more information on running parents or children of specific models, see the ## Treat warnings as errors - - -- Moved to [global configs](/reference/global-configs/about-global-configs) in v1.0 - - - See [global configs](/reference/global-configs/warnings) ## Failing fast - - -- The `--fail-fast` flag is new in dbt v0.17.0 -- Moved to [global configs](/reference/global-configs/about-global-configs) in v1.0 - - - See [global configs](/reference/global-configs/failing-fast) ## Enable or Disable Colorized Logs - - -- The `--use-colors` and `--no-use-colors` flags are new in dbt v0.18.0 -- Moved to [global configs](/reference/global-configs/about-global-configs) in v1.0 - - - See [global configs](/reference/global-configs/print-output#print-color) diff --git a/website/docs/reference/commands/seed.md b/website/docs/reference/commands/seed.md index 272a2a7f2a9..8a410706842 100644 --- a/website/docs/reference/commands/seed.md +++ b/website/docs/reference/commands/seed.md @@ -4,20 +4,11 @@ sidebar_label: "seed" id: "seed" --- - - -- **v1.0.0:** The default config for this command will now be `seed-paths` instead of `data-paths`. - - - - The `dbt seed` command will load `csv` files located in the `seed-paths` directory of your dbt project into your . ### Selecting seeds to run - Added in v0.16.0 - Specific seeds can be run using the `--select` flag to `dbt seed`. Example: ``` diff --git a/website/docs/reference/dbt-classes.md b/website/docs/reference/dbt-classes.md index 72e11b98ed4..13f9263e545 100644 --- a/website/docs/reference/dbt-classes.md +++ b/website/docs/reference/dbt-classes.md @@ -104,12 +104,6 @@ col.numeric_type('numeric', 12, 4) # numeric(12,4) ### Instance methods - - - The `is_number` and `is_float` instance methods were added dbt v0.16.0 - - - - **is_string()**: Returns True if the column is a String type (eg. text, varchar), else False - **is_numeric()**: Returns True if the column is a fixed-precision Numeric type (eg. `numeric`), else False - **is_number()**: Returns True if the column is a number-y type (eg. `numeric`, `int`, `float`, or similar), else False @@ -194,12 +188,6 @@ will be expanded to: ## Result objects - - -* `v0.19.0`: The `Result` object significantly changed its schema. See https://schemas.getdbt.com/dbt/run-results/v1.json for the full specification. - - - The execution of a resource in dbt generates a `Result` object. This object contains information about the executed node, timing, status, and metadata returned by the adapter. At the end of an invocation, dbt records these objects in [`run_results.json`](/reference/artifacts/run-results-json). - `node`: Full object representation of the dbt resource (model, seed, snapshot, test) executed, including its `unique_id` diff --git a/website/docs/reference/dbt-jinja-functions/as_bool.md b/website/docs/reference/dbt-jinja-functions/as_bool.md index e0700032212..d4c2bbf1743 100644 --- a/website/docs/reference/dbt-jinja-functions/as_bool.md +++ b/website/docs/reference/dbt-jinja-functions/as_bool.md @@ -24,10 +24,3 @@ models: ``` - - - -* `v0.17.1`: Native rendering is disabled by default. The `as_bool` filter was -introduced. - - diff --git a/website/docs/reference/dbt-jinja-functions/as_native.md b/website/docs/reference/dbt-jinja-functions/as_native.md index fca25249dca..1de9ad45bf9 100644 --- a/website/docs/reference/dbt-jinja-functions/as_native.md +++ b/website/docs/reference/dbt-jinja-functions/as_native.md @@ -16,10 +16,3 @@ and [`as_number`](/reference/dbt-jinja-functions/as_number) instead. Unlike `as_bool` and `as_number`, `as_native` will return a rendered value regardless of the input type. Ensure that your inputs match expectations. ::: - - - -* `v0.17.1`: Native rendering is disabled by default. The `as_native` filter was -introduced. - - diff --git a/website/docs/reference/dbt-jinja-functions/as_number.md b/website/docs/reference/dbt-jinja-functions/as_number.md index 057d7ec8d20..29b35094880 100644 --- a/website/docs/reference/dbt-jinja-functions/as_number.md +++ b/website/docs/reference/dbt-jinja-functions/as_number.md @@ -25,10 +25,3 @@ my_profile: ``` - - - -* `v0.17.1`: Native rendering is disabled by default. The `as_number` filter was -introduced. - - diff --git a/website/docs/reference/dbt-jinja-functions/as_text.md b/website/docs/reference/dbt-jinja-functions/as_text.md index 5e19e5bc9bc..6b26cfa327d 100644 --- a/website/docs/reference/dbt-jinja-functions/as_text.md +++ b/website/docs/reference/dbt-jinja-functions/as_text.md @@ -56,12 +56,3 @@ models: ``` - - - -* `v0.17.0`: Native rendering is enabled by default. The `as_text` filter was -introduced. -* `v0.17.1`: Native rendering is disabled by default. The `as_text` filter works -as before, with no functional effect. - - diff --git a/website/docs/reference/dbt-jinja-functions/builtins.md b/website/docs/reference/dbt-jinja-functions/builtins.md index a7e96640351..edc5f34ffda 100644 --- a/website/docs/reference/dbt-jinja-functions/builtins.md +++ b/website/docs/reference/dbt-jinja-functions/builtins.md @@ -1,8 +1,8 @@ --- -title: "About builtins Jinja function" +title: "About builtins Jinja variable" sidebar_label: "builtins" id: "builtins" -description: "Read this guide to understand the builtins Jinja function in dbt." +description: "Read this guide to understand the builtins Jinja variable in dbt." --- diff --git a/website/docs/reference/dbt-jinja-functions/dispatch.md b/website/docs/reference/dbt-jinja-functions/dispatch.md index a165ae59eb0..5dff787219f 100644 --- a/website/docs/reference/dbt-jinja-functions/dispatch.md +++ b/website/docs/reference/dbt-jinja-functions/dispatch.md @@ -5,12 +5,6 @@ id: "dispatch" description: "dbt extends functionality across data platforms using multiple dispatch." --- - - -- **v1.0.0:** The 'packages' argument is fully deprecated. Use `macro_namespace` and project-level `dispatch` config instead. - - - dbt can extend functionality across [Supported Data Platforms](/docs/supported-data-platforms) through a system of [multiple dispatch](https://en.wikipedia.org/wiki/Multiple_dispatch). Because SQL syntax, data types, and / support vary across adapters, dbt can define and call generic functional macros, and then "dispatch" that macro to the appropriate implementation for the current adapter. ## Syntax diff --git a/website/docs/reference/dbt-jinja-functions/env_var.md b/website/docs/reference/dbt-jinja-functions/env_var.md index a5e9df82415..f4cc05cec0f 100644 --- a/website/docs/reference/dbt-jinja-functions/env_var.md +++ b/website/docs/reference/dbt-jinja-functions/env_var.md @@ -58,12 +58,6 @@ models: ### Secrets - - - **v1.0.0:** Restricted use of secret env vars to `profiles.yml` and `packages.yml` - - - For certain configurations, you can use "secret" env vars. Any env var named with the prefix `DBT_ENV_SECRET_` will be: - Available for use in `profiles.yml` + `packages.yml`, via the same `env_var()` function - Disallowed everywhere else, including `dbt_project.yml` and model SQL, to prevent accidentally writing these secret values to the or metadata artifacts @@ -82,12 +76,6 @@ host: "www.{{ env_var('DBT_ENV_SECRET_HOST_DOMAIN') }}.com/{{ env_var('DBT_ENV_S ### Custom metadata - - - - **v0.19.0:** Introduced `DBT_ENV_CUSTOM_ENV_` prefix and artifact `metadata.env` - - - Any env var named with the prefix `DBT_ENV_CUSTOM_ENV_` will be included in two places, with its prefix-stripped name as the key: - [dbt artifacts](/reference/artifacts/dbt-artifacts#common-metadata): `metadata` -> `env` - [events and structured logs](/reference/events-logging#info-fields): `info` -> `extra` diff --git a/website/docs/reference/dbt-jinja-functions/graph.md b/website/docs/reference/dbt-jinja-functions/graph.md index 3b3b4d1cb88..491b7836f45 100644 --- a/website/docs/reference/dbt-jinja-functions/graph.md +++ b/website/docs/reference/dbt-jinja-functions/graph.md @@ -99,7 +99,7 @@ representations of those nodes. A simplified example might look like: }, "exposures": { "exposure.my_project.traffic_dashboard": { - "unique_id": "source.my_project.traffic_dashboard", + "unique_id": "exposure.my_project.traffic_dashboard", "type": "dashboard", "maturity": "high", "path": "models/path/to/schema.yml", diff --git a/website/docs/reference/dbt-jinja-functions/log.md b/website/docs/reference/dbt-jinja-functions/log.md index ec4533ea621..30e68f8c21d 100644 --- a/website/docs/reference/dbt-jinja-functions/log.md +++ b/website/docs/reference/dbt-jinja-functions/log.md @@ -12,7 +12,34 @@ __Args__: Logs a line to either the log file or stdout. -([Source on GitHub](https://github.com/dbt-labs/dbt-core/blob/HEAD/core/dbt/context/base.py#L432)) +
+ Code source + Refer to GitHub or the following code as a source:

+ +```python + def log(msg: str, info: bool = False) -> str: + """Logs a line to either the log file or stdout. + + :param msg: The message to log + :param info: If `False`, write to the log file. If `True`, write to + both the log file and stdout. + + > macros/my_log_macro.sql + + {% macro some_macro(arg1, arg2) %} + {{ log("Running some_macro: " ~ arg1 ~ ", " ~ arg2) }} + {% endmacro %}" + """ + if info: + fire_event(JinjaLogInfo(msg=msg, node_info=get_node_info())) + else: + fire_event(JinjaLogDebug(msg=msg, node_info=get_node_info())) + return "" +``` + + + +
```sql diff --git a/website/docs/reference/dbt-jinja-functions/on-run-end-context.md b/website/docs/reference/dbt-jinja-functions/on-run-end-context.md index ff0f7c1ef33..32cd8ca10ff 100644 --- a/website/docs/reference/dbt-jinja-functions/on-run-end-context.md +++ b/website/docs/reference/dbt-jinja-functions/on-run-end-context.md @@ -100,12 +100,6 @@ on-run-end: ## Results - - -* `v0.19.0`: The `Result` object significantly changed its schema. See https://schemas.getdbt.com/dbt/run-results/v1.json for the full specification. - - - The `results` variable contains a list of [Result objects](/reference/dbt-classes#result-objects) with one element per resource that executed in the dbt job. The Result object provides access within the Jinja on-run-end context to the information that will populate the [run results JSON artifact](/reference/artifacts/run-results-json). Example usage: diff --git a/website/docs/reference/dbt-jinja-functions/project_name.md b/website/docs/reference/dbt-jinja-functions/project_name.md index 38717aa16c3..7f76c5a4800 100644 --- a/website/docs/reference/dbt-jinja-functions/project_name.md +++ b/website/docs/reference/dbt-jinja-functions/project_name.md @@ -5,8 +5,6 @@ id: "project_name" description: "Read this guide to understand the project_name Jinja function in dbt." --- -New in 0.16.0 - The `project_name` context variable returns the `name` for the root-level project which is being run by dbt. This variable can be used to defer execution to a root-level project macro if one exists. diff --git a/website/docs/reference/dbt-jinja-functions/statement-blocks.md b/website/docs/reference/dbt-jinja-functions/statement-blocks.md index 1ad4f099aa3..2829ad3fe14 100644 --- a/website/docs/reference/dbt-jinja-functions/statement-blocks.md +++ b/website/docs/reference/dbt-jinja-functions/statement-blocks.md @@ -41,12 +41,6 @@ Once the statement block has executed, the result set is accessible via the `loa - `data`: Pythonic representation of data returned by query (arrays, tuples, dictionaries). - `table`: [Agate](https://agate.readthedocs.io/page/api/table.html) table representation of data returned by query. - - -* `v0.19.0`: The `response` structured object replaced a `status` string that contained similar information. - - - For the above statement, that could look like: diff --git a/website/docs/reference/dbt-jinja-functions/this.md b/website/docs/reference/dbt-jinja-functions/this.md index c75a639042c..f9f2961b08f 100644 --- a/website/docs/reference/dbt-jinja-functions/this.md +++ b/website/docs/reference/dbt-jinja-functions/this.md @@ -22,24 +22,6 @@ meta: - - -### Grant permissions on a model in a post-hook - - - -```yaml -models: - project-name: - +post-hook: - - "grant select on {{ this }} to db_reader" -``` - - - - - - ### Configuring incremental models diff --git a/website/docs/reference/dbt_project.yml.md b/website/docs/reference/dbt_project.yml.md index 59541a81256..c706b57a73b 100644 --- a/website/docs/reference/dbt_project.yml.md +++ b/website/docs/reference/dbt_project.yml.md @@ -1,8 +1,3 @@ - - -- **v1.0.0:** The default config name for `data-paths` is now [`seed-paths`](/reference/project-configs/seed-paths), `source-paths` is now [`model-paths`](/reference/project-configs/model-paths) and `modules-path` is now [`packages-install-path`](/reference/project-configs/packages-install-path). - - Every [dbt project](/docs/build/projects) needs a `dbt_project.yml` file — this is how dbt knows a directory is a dbt project. It also contains important information that tells dbt how to operate on your project. diff --git a/website/docs/reference/global-configs/cache.md b/website/docs/reference/global-configs/cache.md index 6157e1a3bfb..a605e1e70f3 100644 --- a/website/docs/reference/global-configs/cache.md +++ b/website/docs/reference/global-configs/cache.md @@ -31,7 +31,7 @@ dbt --cache-selected-only run --select salesforce
- + ### Cache database objects for selected resource diff --git a/website/docs/reference/macro-properties.md b/website/docs/reference/macro-properties.md index 91ba52de9ca..9919835f3c5 100644 --- a/website/docs/reference/macro-properties.md +++ b/website/docs/reference/macro-properties.md @@ -27,9 +27,3 @@ macros: ``` - - - -* `v0.16.0`: The ability to declare macro properties was introduced. - - diff --git a/website/docs/reference/node-selection/defer.md b/website/docs/reference/node-selection/defer.md index 6079e53793a..e13a4f6648a 100644 --- a/website/docs/reference/node-selection/defer.md +++ b/website/docs/reference/node-selection/defer.md @@ -2,13 +2,6 @@ title: "Defer" --- - - -- **v0.18.0**: Introduced `--defer` and `--state` flags as beta features. -- **v0.19.0**: Changed `--defer` to use the current environment's resource, if it exists, and only fall back to the other environment's resource if the first does not. Also added support for `dbt test --defer`. - - - Defer is a powerful feature that makes it possible to run a subset of models or tests in a [sandbox environment](/docs/environments-in-dbt) without having to first build their upstream parents. This can save time and computational resources when you want to test a small number of models in a large project. Defer requires that a manifest from a previous dbt invocation be passed to the `--state` flag or env var. Together with the `state:` selection method, these features enable "Slim CI". Read more about [state](/reference/node-selection/syntax#about-node-selection). diff --git a/website/docs/reference/node-selection/methods.md b/website/docs/reference/node-selection/methods.md index 3ffed493c23..e318a0b9f4a 100644 --- a/website/docs/reference/node-selection/methods.md +++ b/website/docs/reference/node-selection/methods.md @@ -151,9 +151,6 @@ $ dbt ls -s config.transient:true ### The "test_type" method - -In v1.0.0, test types were renamed: "singular" (instead of "data") and "generic" (instead of "schema") - The `test_type` method is used to select tests based on their type, `singular` or `generic`: @@ -239,7 +236,6 @@ The `exposure` method is used to select parent resources of a specified [exposur ``` ### The "metric" method -New in v1.0.0 The `metric` method is used to select parent resources of a specified [metric](/docs/build/metrics). Use in conjunction with the `+` operator. @@ -249,7 +245,6 @@ $ dbt ls --select +metric:* --resource-type source # list all source tables ``` ### The "result" method -New in v1.0.0 The `result` method is related to the `state` method described above and can be used to select resources based on their result status from a prior run. Note that one of the dbt commands [`run`, `test`, `build`, `seed`] must have been performed in order to create the result on which a result selector operates. You can use `result` selectors in conjunction with the `+` operator. @@ -261,8 +256,6 @@ $ dbt seed --select result:error --state path/to/artifacts # run all seeds that ``` ### The "source_status" method - - Supported in v1.1 or higher. @@ -294,9 +287,6 @@ $ dbt build --select source_status:fresher+ --state path/to/prod/artifacts - - - ### The "group" method diff --git a/website/docs/reference/node-selection/state-comparison-caveats.md b/website/docs/reference/node-selection/state-comparison-caveats.md index 6ae156fddcf..baeeb7e4c75 100644 --- a/website/docs/reference/node-selection/state-comparison-caveats.md +++ b/website/docs/reference/node-selection/state-comparison-caveats.md @@ -60,13 +60,6 @@ dbt will do its best to capture *only* changes that are the result of modificati - iterative improvements to dbt's built-in detective abilities - better options for more complex projects, in the form of more-specific subselectors (see [this issue](https://github.com/dbt-labs/dbt-core/issues/2704)) - - -- v0.18.0: All env-aware logic results in false positives during state comparison, when comparing against a manifest generated with a different target. -- v0.19.0: dbt stores and compares unrendered Jinja expressions for configurations, allowing it to see past env-aware logic in `dbt_project.yml`. - - - State comparison is now able to detect env-aware config in `dbt_project.yml`. For instance, this target-based config would register as a modification in v0.18.0, but in v0.19.0 it no longer will: diff --git a/website/docs/reference/node-selection/syntax.md b/website/docs/reference/node-selection/syntax.md index a60d23cd16f..7c165b0f4ff 100644 --- a/website/docs/reference/node-selection/syntax.md +++ b/website/docs/reference/node-selection/syntax.md @@ -174,8 +174,6 @@ $ dbt run --select result:+ state:modified+ --defer --state ./ - Only supported by v1.1 or newer. When a job is selected, dbt Cloud will surface the artifacts from that job's most recent successful run. dbt will then use those artifacts to determine the set of fresh sources. In your job commands, you can signal to dbt to run and test only on these fresher sources and their children by including the `source_status:fresher+` argument. This requires both previous and current state to have the `sources.json` artifact be available. Or plainly said, both job states need to run `dbt source freshness`. @@ -188,14 +186,11 @@ dbt source freshness dbt build --select source_status:fresher+ ``` - For more example commands, refer to [Pro-tips for workflows](/guides/legacy/best-practices.md#pro-tips-for-workflows). ### The "source_status" status - - Only supported by v1.1 or newer. Another element of job state is the `source_status` of a prior dbt invocation. After executing `dbt source freshness`, for example, dbt creates the `sources.json` artifact which contains execution times and `max_loaded_at` dates for dbt sources. You can read more about `sources.json` on the ['sources'](/reference/artifacts/sources-json) page. @@ -210,4 +205,3 @@ After issuing one of the above commands, you can reference the source freshness $ dbt source freshness # must be run again to compare current to previous state $ dbt build --select source_status:fresher+ --state path/to/prod/artifacts ``` - diff --git a/website/docs/reference/node-selection/test-selection-examples.md b/website/docs/reference/node-selection/test-selection-examples.md index 85141c8cd01..52439d95d97 100644 --- a/website/docs/reference/node-selection/test-selection-examples.md +++ b/website/docs/reference/node-selection/test-selection-examples.md @@ -11,22 +11,10 @@ Like all resource types, tests can be selected **directly**, by methods and oper Unlike other resource types, tests can also be selected **indirectly**. If a selection method or operator includes a test's parent(s), the test will also be selected. [See below](#indirect-selection) for more details. - - - `v1.0.0`: Renamed the `--greedy` flag/property to `indirect_selection`, and set its default back to "eager" (pre-v0.20). You can achieve the "cautious" behavior introduced in v0.20 by setting the flag/property to `cautious`. - - - Test selection is powerful, and we know it can be tricky. To that end, we've included lots of examples below: ### Direct selection - - -`v1.0.0`: Renamed test types: "generic" (formerly "schema") and "singular" (formerly "data"). Removed support for the `--schema` and `--data` flags. - - - Run generic tests only: diff --git a/website/docs/reference/project-configs/asset-paths.md b/website/docs/reference/project-configs/asset-paths.md index 97204923cb9..1fb3cf9f260 100644 --- a/website/docs/reference/project-configs/asset-paths.md +++ b/website/docs/reference/project-configs/asset-paths.md @@ -15,12 +15,6 @@ asset-paths: [directorypath] ## Definition Optionally specify a custom list of directories to copy to the `target` directory as part of the `docs generate` command. This is useful for rendering images in your repository in your project documentation. - - -* `v0.18.0`: This configuration was introduced — see the [migration guide](/guides/migration/versions) for more details. - - - ## Default By default, dbt will not copy any additional files as part of docs generate, i.e. `asset-paths: []` diff --git a/website/docs/reference/project-configs/clean-targets.md b/website/docs/reference/project-configs/clean-targets.md index 119630b00b1..9b464840723 100644 --- a/website/docs/reference/project-configs/clean-targets.md +++ b/website/docs/reference/project-configs/clean-targets.md @@ -3,12 +3,6 @@ datatype: [directorypath] default_value: [target_path] --- - - -- **v1.0.0:** The `modules-path` has been updated to be [`packages-install-path`](/reference/project-configs/packages-install-path). The default value has also been updated to be `dbt-packages` from `dbt-modules`. - - - ```yml diff --git a/website/docs/reference/project-configs/config-version.md b/website/docs/reference/project-configs/config-version.md index 20947c03d62..804caf1328f 100644 --- a/website/docs/reference/project-configs/config-version.md +++ b/website/docs/reference/project-configs/config-version.md @@ -20,12 +20,7 @@ config-version: 2 ## Definition Specify your `dbt_project.yml` as using the v2 structure. - - -* `v0.17.0`: This configuration was introduced — see the [migration guide](/guides/migration/versions) for more details. -* `v1.5.0`: This configuration was made optional. - - + This configuration is optional. ## Default Without this configuration, dbt will assume your `dbt_project.yml` uses the version 1 syntax, which was deprecated in dbt v0.19.0. diff --git a/website/docs/reference/project-configs/model-paths.md b/website/docs/reference/project-configs/model-paths.md index 2129747af27..a0652432787 100644 --- a/website/docs/reference/project-configs/model-paths.md +++ b/website/docs/reference/project-configs/model-paths.md @@ -2,11 +2,6 @@ datatype: [directorypath] default_value: [models] --- - - -- **v1.0.0:** The config `source-paths` has been deprecated in favor of `model-paths`. - - diff --git a/website/docs/reference/project-configs/on-run-start-on-run-end.md b/website/docs/reference/project-configs/on-run-start-on-run-end.md index 2c5cde4c0c2..1ed8c570dd0 100644 --- a/website/docs/reference/project-configs/on-run-start-on-run-end.md +++ b/website/docs/reference/project-configs/on-run-start-on-run-end.md @@ -33,34 +33,6 @@ A SQL statement (or list of SQL statements) to be run at the start, or end, of t - - -### Grant privileges at the end of a run - - - -```yml -on-run-end: "grant select on all tables in schema {{ target.schema }} group transformer" - -``` - - - -### Grant multiple privileges at the end of a run - - - -```yml -on-run-end: - - "grant usage on schema {{ target.schema }} to group reporter" - - "grant select on all tables in schema {{ target.schema }} group reporter" - -``` - - - - - ### Grant privileges on all schemas that dbt uses at the end of a run This leverages the [schemas](/reference/dbt-jinja-functions/schemas) variable that is only available in an `on-run-end` hook. diff --git a/website/docs/reference/project-configs/packages-install-path.md b/website/docs/reference/project-configs/packages-install-path.md index 98142305357..157c630fd36 100644 --- a/website/docs/reference/project-configs/packages-install-path.md +++ b/website/docs/reference/project-configs/packages-install-path.md @@ -3,12 +3,6 @@ datatype: directorypath default_value: dbt_packages --- - - -- **v1.0.0:** The default config has changed from `modules-path` to `packages-install-path` with a new default value of `dbt_packages`. - - - ```yml diff --git a/website/docs/reference/project-configs/query-comment.md b/website/docs/reference/project-configs/query-comment.md index 4d72bd4fcff..b1a73605e55 100644 --- a/website/docs/reference/project-configs/query-comment.md +++ b/website/docs/reference/project-configs/query-comment.md @@ -30,14 +30,6 @@ A string to inject as a comment in each query that dbt runs against your databas The `query-comment` configuration can also call a macro that returns a string. - - -* `v0.15.0`: The `query-comment` configuration was introduced -* `v0.16.1`: Dictionary syntax introduced to allow comments to be appended -* `v0.20.0:` Introduced `job-label` argument for BigQuery job labels - - - ## Default By default, dbt will insert a comment at the top of your query containing the information including the dbt version, profile and target names, and node ids for the resources it runs. For example: @@ -149,13 +141,6 @@ select ... ### BigQuery: include query comment items as job labels - - - -* `v0.20.0:` Introduced `job-label` argument for BigQuery job labels - - - If `query-comment.job-label` is set to true, dbt will include the query comment items, if a dictionary, or the comment string, as job labels on the query it executes. These will be included in addition to labels specified in the [BigQuery-specific config](/reference/project-configs/query-comment#bigquery-include-query-comment-items-as-job-labels). diff --git a/website/docs/reference/project-configs/quoting.md b/website/docs/reference/project-configs/quoting.md index 92968ace1bd..821b920188c 100644 --- a/website/docs/reference/project-configs/quoting.md +++ b/website/docs/reference/project-configs/quoting.md @@ -28,13 +28,6 @@ Note that for BigQuery quoting configuration, `database` and `schema` should be ::: - - -* `v0.10.1`: This configuration was introduced with a default value of `true` for each adapter. -* `v0.11.0`: The default quoting config on Snowflake changed from `true` to `false` - - - ## Default The default values vary by database. diff --git a/website/docs/reference/project-configs/require-dbt-version.md b/website/docs/reference/project-configs/require-dbt-version.md index 892495dde45..85a502bff60 100644 --- a/website/docs/reference/project-configs/require-dbt-version.md +++ b/website/docs/reference/project-configs/require-dbt-version.md @@ -19,12 +19,6 @@ When you set this configuration, dbt sends a helpful error message for any user If this configuration is not specified, no version check will occur. - - -* `v0.13.0`: This configuration was introduced - - - :::info YAML Quoting This configuration needs to be interpolated by the YAML parser as a string. As such, you should quote the value of the configuration, taking care to avoid whitespace. For example: diff --git a/website/docs/reference/project-configs/seed-paths.md b/website/docs/reference/project-configs/seed-paths.md index 92f7c5aa91f..614bda62cd2 100644 --- a/website/docs/reference/project-configs/seed-paths.md +++ b/website/docs/reference/project-configs/seed-paths.md @@ -3,12 +3,6 @@ datatype: [directorypath] default_value: [data] --- - - -- **v1.0.0:** The config `data-paths` has been deprecated in favor of `seed-paths`. - - - ```yml diff --git a/website/docs/reference/project-configs/snapshot-paths.md b/website/docs/reference/project-configs/snapshot-paths.md index a623d48b20f..81b2759609d 100644 --- a/website/docs/reference/project-configs/snapshot-paths.md +++ b/website/docs/reference/project-configs/snapshot-paths.md @@ -14,12 +14,6 @@ snapshot-paths: [directorypath] ## Definition Optionally specify a custom list of directories where [snapshots](/docs/build/snapshots) are located. Note that you cannot co-locate models and snapshots. - - -* `v0.14.0`: Snapshots were introduced - - - ## Default By default, dbt will search for snapshots in the `snapshots` directory, i.e. `snapshot-paths: ["snapshots"]` diff --git a/website/docs/reference/project-configs/test-paths.md b/website/docs/reference/project-configs/test-paths.md index e3f3cd2ccce..e3d0e0b76fa 100644 --- a/website/docs/reference/project-configs/test-paths.md +++ b/website/docs/reference/project-configs/test-paths.md @@ -3,12 +3,6 @@ datatype: [directorypath] default_value: [test] --- - - -* `v1.0.0`: Generic tests can be defined in the `tests/generic` subfolder, in addition to the `macros/` directory - - - ```yml diff --git a/website/docs/reference/resource-configs/bigquery-configs.md b/website/docs/reference/resource-configs/bigquery-configs.md index 1df21af98e4..89a750f47bd 100644 --- a/website/docs/reference/resource-configs/bigquery-configs.md +++ b/website/docs/reference/resource-configs/bigquery-configs.md @@ -21,26 +21,6 @@ This will allow you to read and write from multiple BigQuery projects. Same for ### Partition clause - - -Before dbt v0.16.0, the `partition_by` configuration was supplied as string. While -the string specification syntax is still supported in dbt v0.16.0, it has been -deprecated and will be removed in a future release. **Note:** partitioning configs -using a range bucket *must* be supplied using the dictionary-style configuration as of -dbt v0.16.0. - -Example usage for versions of dbt < 0.16.0: - -```sql --- Partitioning by a timestamp field -{{ config( materialized='table', partition_by="date(created_at)" ) }} - --- Partitioning by a date field -{{ config( materialized='table', partition_by="created_date" ) }} -``` - - - BigQuery supports the use of a [partition by](https://cloud.google.com/bigquery/docs/data-definition-language#specifying_table_partitioning_options) clause to easily partition a by a column or expression. This option can help decrease latency and cost when querying large tables. Note that partition pruning [only works](https://cloud.google.com/bigquery/docs/querying-partitioned-tables#pruning_limiting_partitions) when partitions are filtered using literal values (so selecting partitions using a won't improve performance). The `partition_by` config can be supplied as a dictionary with the following format: @@ -61,7 +41,6 @@ The `partition_by` config can be supplied as a dictionary with the following for ``` #### Partitioning by a date or timestamp -Partitioning by hour, month or year is new in v0.19.0 When using a `datetime` or `timestamp` column to partition data, you can create partitions with a granularity of hour, day, month, or year. A `date` column supports granularity of day, month and year. Daily partitioning is the default for all column types. @@ -266,12 +245,6 @@ as ( #### Additional partition configs - - - - **v0.20.0:** Introduced `require_partition_filter` and `partition_expiration_days` - - - If your model has `partition_by` configured, you may optionally specify two additional configurations: - `require_partition_filter` (boolean): If set to `true`, anyone querying this model _must_ specify a partition filter, otherwise their query will fail. This is recommended for very large tables with obvious partitioning schemes, such as event streams grouped by day. Note that this will affect other dbt models or tests that try to select from this model, too. @@ -367,11 +340,7 @@ dbt supports the specification of BigQuery labels for the tables and BigQuery key-value pair entries for labels larger than 63 characters are truncated. **Configuring labels in a model file** @@ -489,12 +458,6 @@ strategy is selected. ### The `insert_overwrite` strategy - - - - **v0.16.0:** Introduced `insert_overwrite` incremental strategy - - - The `insert_overwrite` strategy generates a merge statement that replaces entire partitions in the destination table. **Note:** this configuration requires that the model is configured with a [Partition clause](#partition-clause). The `merge` statement that dbt generates @@ -587,12 +550,6 @@ _today_ and _yesterday_ every day that it is run. It is the fastest and cheapest way to incrementally update a table using dbt. If we wanted this to run more dynamically— let’s say, always for the past 3 days—we could leverage dbt’s baked-in [datetime macros](https://github.com/dbt-labs/dbt-core/blob/dev/octavius-catto/core/dbt/include/global_project/macros/etc/datetime.sql) and write a few of our own. - - - - **v0.19.0:** With the advent of truncated timestamp partitions in BigQuery, `timestamp`-type partitions are now treated as timestamps instead of dates for the purposes of filtering. Update `partitions_to_replace` accordingly. - - - Think of this as "full control" mode. You must ensure that expressions or literal values in the the `partitions` config have proper quoting when templated, and that they match the `partition_by.data_type` (`timestamp`, `datetime`, `date`, or `int64`). Otherwise, the filter in the incremental `merge` statement will raise an error. #### Dynamic partitions @@ -685,7 +642,6 @@ from {{ ref('events') }} ## Controlling table expiration -New in v0.18.0 By default, dbt-created tables never expire. You can configure certain model(s) to expire after a set number of hours by setting `hours_to_expiration`. @@ -721,8 +677,6 @@ select ... ## Authorized Views -New in v0.18.0 - If the `grant_access_to` config is specified for a model materialized as a view, dbt will grant the view model access to select from the list of datasets provided. See [BQ docs on authorized views](https://cloud.google.com/bigquery/docs/share-access-views) diff --git a/website/docs/reference/resource-configs/database.md b/website/docs/reference/resource-configs/database.md index 118c72b38e9..9c63b0ca457 100644 --- a/website/docs/reference/resource-configs/database.md +++ b/website/docs/reference/resource-configs/database.md @@ -22,14 +22,8 @@ The standard behavior of dbt is: To learn more about changing the way that dbt generates a relation's `database`, read [Using Custom Databases](/docs/build/custom-databases) - - -* `v0.13.0`: Support for the `database` config is added -* `v0.16.0`: The `generate_database_name` macro was added to control how the `database` config is used by dbt - - - ## Usage + ### Load seeds into the RAW database diff --git a/website/docs/reference/resource-configs/docs.md b/website/docs/reference/resource-configs/docs.md index 8169b5dc26c..4968ed5dc30 100644 --- a/website/docs/reference/resource-configs/docs.md +++ b/website/docs/reference/resource-configs/docs.md @@ -124,12 +124,6 @@ The docs field can be used to provide documentation-specific configuration to mo **Note:** hidden models will still appear in the dbt DAG visualization but will be identified as "hidden.” - - -* `v0.16.0`: This property was added - - - ## Default The default value for `show` is `true`. diff --git a/website/docs/reference/resource-configs/enabled.md b/website/docs/reference/resource-configs/enabled.md index 03d1598c931..b6d0961ee60 100644 --- a/website/docs/reference/resource-configs/enabled.md +++ b/website/docs/reference/resource-configs/enabled.md @@ -150,7 +150,6 @@ sources: - @@ -170,7 +169,6 @@ sources: - diff --git a/website/docs/reference/resource-configs/invalidate_hard_deletes.md b/website/docs/reference/resource-configs/invalidate_hard_deletes.md index 3e9f13b738d..ba5b37c5d71 100644 --- a/website/docs/reference/resource-configs/invalidate_hard_deletes.md +++ b/website/docs/reference/resource-configs/invalidate_hard_deletes.md @@ -4,7 +4,6 @@ description: "Invalidate_hard_deletes - Read this in-depth guide to learn about datatype: column_name --- -New in v0.19.0 ```jinja2 diff --git a/website/docs/reference/resource-configs/materialize-configs.md b/website/docs/reference/resource-configs/materialize-configs.md index 1338647a2a6..6976aa84061 100644 --- a/website/docs/reference/resource-configs/materialize-configs.md +++ b/website/docs/reference/resource-configs/materialize-configs.md @@ -8,11 +8,9 @@ id: "materialize-configs" ### Clusters - -- **v1.2.0:** Enable the configuration of [clusters](https://github.com/MaterializeInc/materialize/blob/main/misc/dbt-materialize/CHANGELOG.md#120---2022-08-31). +Enable the configuration of [clusters](https://github.com/MaterializeInc/materialize/blob/main/misc/dbt-materialize/CHANGELOG.md#120---2022-08-31). - The default [cluster](https://materialize.com/docs/overview/key-concepts/#clusters) that is used to maintain materialized views or indexes can be configured in your [profile](/docs/core/connect-data-platform/profiles.yml) using the `cluster` connection parameter. To override the cluster that is used for specific models (or groups of models), use the `cluster` configuration parameter. @@ -45,11 +43,7 @@ Materialize, at its core, is a real-time database that delivers incremental view ### Indexes - - -- **v1.2.0:** Enable additional configuration for [indexes](https://github.com/MaterializeInc/materialize/blob/main/misc/dbt-materialize/CHANGELOG.md#120---2022-08-31). - - +Enable additional configuration for [indexes](https://github.com/MaterializeInc/materialize/blob/main/misc/dbt-materialize/CHANGELOG.md#120---2022-08-31). Like in any standard relational database, you can use [indexes](https://materialize.com/docs/overview/key-concepts/#indexes) to optimize query performance in Materialize. Improvements can be significant, reducing response times down to single-digit milliseconds. @@ -85,12 +79,6 @@ select ... ### Tests - - -- **v1.1.1:** Provide support for storing the results of a test query in a materialized view using the `store_failures` config. - - - If you set the optional `--store-failures` flag or [`store_failures` config](/reference/resource-configs/store_failures), dbt will create a materialized view for each configured test that can keep track of failures over time. By default, test views are created in a schema suffixed with `dbt_test__audit`. To specify a custom suffix, use the `schema` config. diff --git a/website/docs/reference/resource-configs/meta.md b/website/docs/reference/resource-configs/meta.md index d31b9e36e45..d24c5fbaee1 100644 --- a/website/docs/reference/resource-configs/meta.md +++ b/website/docs/reference/resource-configs/meta.md @@ -4,12 +4,6 @@ datatype: "{}" default_value: {} --- - - -* `v0.21.0`: `meta` is now a config that can be set in `dbt_project.yml` and as a `config` YAML property for some resource types. It is applied hierarchically and merges on a per-key basis. - - - - - - Support for this config on Redshift, Postgres, and Snowflake is new in 0.17.0 - - Support for column-level docs persistence is new for all databases in 0.17.0 - - - ## Support The `persist_docs` config is supported on the most widely used dbt adapters: @@ -151,12 +144,6 @@ Some known issues and limitations: - - -- Column names that must be quoted, such as column names containing special characters, will cause runtime errors if column-level `persist_docs` is enabled. This is fixed in v1.2. - - - diff --git a/website/docs/reference/resource-configs/postgres-configs.md b/website/docs/reference/resource-configs/postgres-configs.md index 97377b7ab9f..97a695ee12e 100644 --- a/website/docs/reference/resource-configs/postgres-configs.md +++ b/website/docs/reference/resource-configs/postgres-configs.md @@ -28,12 +28,6 @@ In dbt-postgres, the following incremental materialization strategies are suppor ### Unlogged - - - - **v0.14.1:** Introduced native support for `unlogged` config - - - "Unlogged" tables can be considerably faster than ordinary tables, as they are not written to the write-ahead log nor replicated to read replicas. They are also considerably less safe than ordinary tables. See [Postgres docs](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-UNLOGGED) for details. @@ -59,12 +53,6 @@ models: While Postgres works reasonably well for datasets smaller than about 10m rows, database tuning is sometimes required. It's important to create indexes for columns that are commonly used in joins or where clauses. - - - - **v0.20.0:** Introduced native support for `indexes` config - - - Table models, incremental models, seeds, snapshots, and materialized views may have a list of `indexes` defined. Each Postgres index can have three components: - `columns` (list, required): one or more columns on which the index is defined - `unique` (boolean, optional): whether the index should be [declared unique](https://www.postgresql.org/docs/9.4/indexes-unique.html) diff --git a/website/docs/reference/resource-configs/pre-hook-post-hook.md b/website/docs/reference/resource-configs/pre-hook-post-hook.md index 1660c50049b..297d6975d6f 100644 --- a/website/docs/reference/resource-configs/pre-hook-post-hook.md +++ b/website/docs/reference/resource-configs/pre-hook-post-hook.md @@ -115,13 +115,6 @@ Pre- and post-hooks can also call macros that return SQL statements. If your mac dbt aims to provide all the boilerplate SQL you need (DDL, DML, and DCL) via out-of-the-box functionality, which you can configure quickly and concisely. In some cases, there may be SQL that you want or need to run, specific to functionality in your data platform, which dbt does not (yet) offer as a built-in feature. In those cases, you can write the exact SQL you need, using dbt's compilation context, and pass it into a `pre-` or `post-` hook to run before or after your model, seed, or snapshot. - - -* `v0.12.2`: The `post_hook` alias for config blocks was introduced. Prior to this, users needed to use the alternative config syntax to apply pre- and post-hooks. - - - - ## Examples @@ -167,69 +160,6 @@ See: [Apache Spark docs on `ANALYZE TABLE`](https://spark.apache.org/docs/latest - - -### Grant privileges on a model - - - -```yml - -models: - +post-hook: "grant select on {{ this }} to group reporter" - -``` - - - -### Grant multiple privileges on a model - - - -```yml - -models: - +post-hook: - - "grant select on {{ this }} to group reporter" - - "grant select on {{ this }} to group transformer" - -``` - - - -### Call a macro to grant privileges on a model - - - -```yml - -models: - +post-hook: "{{ grant_select(this) }}" - -``` - - - - -### Grant privileges on a directory of models - - - -```yml - -models: - jaffle_shop: # this is the project name - marts: - marketing: - # this will be applied to all models in marts/marketing/ - +post-hook: "{{ grant_select(this) }}" - -``` - - - - - ### Additional examples We've compiled some more in-depth examples [here](/docs/build/hooks-operations#additional-examples). diff --git a/website/docs/reference/resource-configs/severity.md b/website/docs/reference/resource-configs/severity.md index c89c6db0716..25bab9647d6 100644 --- a/website/docs/reference/resource-configs/severity.md +++ b/website/docs/reference/resource-configs/severity.md @@ -6,14 +6,6 @@ resource_types: [tests] datatype: string --- - - -* `v0.14.0`: Introduced `severity` config -* `v0.20.0`: Introduced `error_if` + `warn_if` configs. Enabled configuration of tests from `dbt_project.yml` -* `v0.21.0`: Introduced `config` property for tests - - - Tests return a number of failures—most often, this is the count of rows returned by the test query, but it could be a [custom calculation](/reference/resource-configs/fail_calc). Generally, if the number of failures is nonzero, the test returns an error. This makes sense, as test queries are designed to return all the rows you _don't_ want: duplicate records, null values, etc. It's possible to configure tests to return warnings instead of errors, or to make the test status conditional on the number of failures returned. Maybe 1 duplicate record can count as a warning, but 10 duplicate records should count as an error. diff --git a/website/docs/reference/resource-configs/singlestore-configs.md b/website/docs/reference/resource-configs/singlestore-configs.md index f503779f0fc..0c93d557a8b 100644 --- a/website/docs/reference/resource-configs/singlestore-configs.md +++ b/website/docs/reference/resource-configs/singlestore-configs.md @@ -3,13 +3,6 @@ title: "SingleStore configurations" id: "singlestore-configs" --- - - - - - **v1.1.2:** Added support for for `storage_type`, `indexes`, `primary_key`, `sort_key`, `shard_key`, `unique_table_key`, `charset`, `collation` options for creating SingleStore tables. - - - ## Performance Optimizations [SingleStore Physical Database Schema Design documentation](https://docs.singlestore.com/managed-service/en/create-a-database/physical-database-schema-design/concepts-of-physical-database-schema-design.html) is helpful if you want to use specific options (that are described below) in your dbt project. diff --git a/website/docs/reference/resource-configs/spark-configs.md b/website/docs/reference/resource-configs/spark-configs.md index 95a853107f6..ce3b317f0f1 100644 --- a/website/docs/reference/resource-configs/spark-configs.md +++ b/website/docs/reference/resource-configs/spark-configs.md @@ -29,12 +29,6 @@ When materializing a model as `table`, you may include several optional configs ## Incremental models - - - - `dbt-spark==0.19.0`: Added the `append` strategy as default for all platforms, file types, and connection methods. - - - dbt seeks to offer useful, intuitive modeling abstractions by means of its built-in configurations and materializations. Because there is so much variance between Apache Spark clusters out in the world—not to mention the powerful features offered to Databricks users by the Delta file format and custom runtime—making sense of all the available options is an undertaking in its own right. Alternatively, you can use Apache Iceberg or Apache Hudi file format with Apache Spark runtime for building incremental models. @@ -192,13 +186,6 @@ insert overwrite table analytics.spark_incremental ### The `merge` strategy - - - - `dbt-spark==0.15.3`: Introduced `merge` incremental strategy - - - - **Usage notes:** The `merge` incremental strategy requires: - `file_format: delta, iceberg or hudi` - Databricks Runtime 5.1 and above for delta file format @@ -294,12 +281,6 @@ or `show table extended in [database] like '*'`. ## Always `schema`, never `database` - - - - `dbt-spark==0.17.0` ended use of `database` in all cases. - - - Apache Spark uses the terms "schema" and "database" interchangeably. dbt understands `database` to exist at a higher level than `schema`. As such, you should _never_ use or set `database` as a node config or in the target profile when running dbt-spark. diff --git a/website/docs/reference/resource-configs/store_failures.md b/website/docs/reference/resource-configs/store_failures.md index 62ae33ba713..3c965179211 100644 --- a/website/docs/reference/resource-configs/store_failures.md +++ b/website/docs/reference/resource-configs/store_failures.md @@ -3,13 +3,6 @@ resource_types: [tests] datatype: boolean --- - - -* `v0.20.0`: Introduced `store_failures` config and functionality -* `v0.21.0`: Introduced `config` property for tests - - - The configured test(s) will store their failures when `dbt test --store-failures` is invoked. ## Description diff --git a/website/docs/reference/resource-configs/where.md b/website/docs/reference/resource-configs/where.md index b0953e6f3d4..dbb3b66e901 100644 --- a/website/docs/reference/resource-configs/where.md +++ b/website/docs/reference/resource-configs/where.md @@ -3,13 +3,6 @@ resource_types: [tests] datatype: string --- - - -* `v0.20.0`: Introduced `where` config -* `v0.21.0`: Introduced `config` property for tests. Reimplemented `where` config with `get_where_subquery` macro - - - ### Definition Filter the resource being tested (model, source, seed, or snapshot). diff --git a/website/docs/reference/resource-properties/config.md b/website/docs/reference/resource-properties/config.md index 1d3a2de6592..e6021def852 100644 --- a/website/docs/reference/resource-properties/config.md +++ b/website/docs/reference/resource-properties/config.md @@ -108,7 +108,6 @@ version: 2 - @@ -127,8 +126,6 @@ sources: - - diff --git a/website/docs/reference/resource-properties/freshness.md b/website/docs/reference/resource-properties/freshness.md index ae39a764cc1..f332f5a1b8f 100644 --- a/website/docs/reference/resource-properties/freshness.md +++ b/website/docs/reference/resource-properties/freshness.md @@ -88,13 +88,6 @@ This is particularly useful if: - You are using Snowflake, Databricks or Spark with large tables, and this results in a performance benefit - - -* `v0.15.0`: This property was introduced - - - - ## Examples ### Complete example diff --git a/website/docs/reference/resource-properties/quote.md b/website/docs/reference/resource-properties/quote.md index 3552d1d3d3a..50bf4c08c40 100644 --- a/website/docs/reference/resource-properties/quote.md +++ b/website/docs/reference/resource-properties/quote.md @@ -115,12 +115,6 @@ analyses: ## Definition The `quote` field can be used to enable or disable quoting for column names. - - -* `v0.16.0`: This configuration was added - - - ## Default The default quoting value is `false` diff --git a/website/docs/reference/resource-properties/tests.md b/website/docs/reference/resource-properties/tests.md index f25e5306542..6e2c02c6bc5 100644 --- a/website/docs/reference/resource-properties/tests.md +++ b/website/docs/reference/resource-properties/tests.md @@ -300,8 +300,6 @@ models: Check out the guide on writing a [custom generic test](/guides/best-practices/writing-custom-generic-tests) for more information. - - ### Custom test name By default, dbt will synthesize a name for your generic test by concatenating: @@ -438,10 +436,6 @@ $ dbt test **If using [`store_failures`](/reference/resource-configs/store_failures):** dbt uses each test's name as the name of the table in which to store any failing records. If you have defined a custom name for one test, that custom name will also be used for its table of failures. You may optionally configure an [`alias`](/reference/resource-configs/alias) for the test, to separately control both the name of the test (for metadata) and the name of its database table (for storing failures). - - - - ### Alternative format for defining tests When defining a generic test with several arguments and configurations, the YAML can look and feel unwieldy. If you find it easier, you can define the same test properties as top-level keys of a single dictionary, by providing the test name as `test_name` instead. It's totally up to you. @@ -470,5 +464,3 @@ models: ``` - - diff --git a/website/docs/reference/seed-properties.md b/website/docs/reference/seed-properties.md index d8b72737646..85e7be21ae1 100644 --- a/website/docs/reference/seed-properties.md +++ b/website/docs/reference/seed-properties.md @@ -2,12 +2,6 @@ title: Seed properties --- - - - **v1.0.0:** The default path for [`seed-paths`](/reference/project-configs/seed-paths) (formerly `data-paths`) is now `seeds`. - - - Seed properties can be declared in `.yml` files under a `seed` key. We recommend that you put them in the `seeds/` directory. You can name these files `whatever_you_want.yml`, and nest them arbitrarily deeply in subfolders within that directory. @@ -42,9 +36,3 @@ seeds: - name: ... # declare properties of additional seeds ``` - - - -* `v0.16.0`: The ability to declare seed properties was introduced. Prior to this, you could declare seed properties under the `models:` key (confusing, right?). Support for declaring seed properties under a `models:` key will be removed in a future release. - - diff --git a/website/docs/reference/snapshot-properties.md b/website/docs/reference/snapshot-properties.md index 48c5328a400..301747e9325 100644 --- a/website/docs/reference/snapshot-properties.md +++ b/website/docs/reference/snapshot-properties.md @@ -40,9 +40,3 @@ snapshots: ``` - - - -* `v0.16.0`: The ability to declare snapshot properties was introduced. - - diff --git a/website/docs/reference/source-configs.md b/website/docs/reference/source-configs.md index 43b9bfbff6b..3f9a19e78ca 100644 --- a/website/docs/reference/source-configs.md +++ b/website/docs/reference/source-configs.md @@ -37,8 +37,6 @@ sources: - - ```yaml @@ -57,19 +55,14 @@ sources: - - ## Configuring sources - - Sources can be configured via a `config:` block within their `.yml` definitions, or from the `dbt_project.yml` file under the `sources:` key. This configuration is most useful for configuring sources imported from [a package](/docs/build/packages). You can disable sources imported from a package to prevent them from rendering in the documentation, or to prevent [source freshness checks](/docs/build/sources#snapshotting-source-data-freshness) from running on source tables imported from packages. - ### Examples #### Disable all sources imported from a package @@ -89,8 +82,6 @@ sources: - - #### Conditionally enable a single source When defining a source, you can disable the entire source, or specific source tables, using the inline `config` property: @@ -130,8 +121,6 @@ sources: - - #### Disable a single source from a package To disable a specific source from another package, qualify the resource path for your configuration with both a package name and a source name. In this case, we're disabling the `clickstream` source from the `events` package. diff --git a/website/sidebars.js b/website/sidebars.js index be4e20e75e1..af9482a8ddf 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -1203,6 +1203,7 @@ const sidebarSettings = { "community/resources/oss-expectations", "community/resources/oss-projects", "community/resources/contributor-license-agreements", + "community/resources/jobs-terms-and-conditions", "community/resources/speaking-at-a-meetup", ], }, diff --git a/website/snippets/_sl-plan-info.md b/website/snippets/_sl-plan-info.md index 5294dab39d8..20ec4b5dd44 100644 --- a/website/snippets/_sl-plan-info.md +++ b/website/snippets/_sl-plan-info.md @@ -1 +1 @@ -During {props.cycle}, you can access {props.product} on {props.plan} multi-tenant plans {props.instance} (cloud.getdbt.com login). It's available on dbt v1.6 or higher. dbt Cloud Developer plans and dbt Core users can use MetricFlow to define and test metrics locally, but can't dynamically query them with integrated tools.

+To define and query metrics with the {props.product}, you must be on a {props.plan} multi-tenant plan, {props.instance} (Additional region support coming soon).

The re-released dbt Semantic Layer is available on dbt v1.6 or higher. dbt Core users can use the MetricFlow CLI to define metrics in their local project, but won't be able dynamically query them with integrated tools.


diff --git a/website/snippets/_sl-test-and-query-metrics.md b/website/snippets/_sl-test-and-query-metrics.md index 0ccea4dc3e8..323ba2d83ad 100644 --- a/website/snippets/_sl-test-and-query-metrics.md +++ b/website/snippets/_sl-test-and-query-metrics.md @@ -4,11 +4,11 @@ Support for testing or querying metrics in the dbt Cloud IDE is not available in You can use the **Preview** or **Compile** buttons in the IDE to run semantic validations and make sure your metrics are defined. You can [dynamically query metrics](#connect-and-query-api) with integrated tools on a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) plan using the [Semantic Layer API](/docs/dbt-cloud-apis/sl-api-overview). -Currently, you can test metrics locally using the MetricFlow CLI. dbt Cloud IDE support is coming soon. Alternatively, you can test using SQL client tools like DataGrip, DBeaver, or RazorSQL. +Currently, you can define and test metrics using the MetricFlow CLI. dbt Cloud IDE support is coming soon. Alternatively, you can test using SQL client tools like DataGrip, DBeaver, or RazorSQL. ::: -This section will explain how you can test and query metrics locally using the MetricFlow CLI (dbt Cloud IDE support coming soon). +This section will explain how you can test and query metrics using the MetricFlow CLI (dbt Cloud IDE support coming soon). Before you begin, you'll need to install the [MetricFlow CLI](/docs/build/metricflow-cli) package and make sure you run at least one model. ### Install MetricFlow diff --git a/website/snippets/_v2-sl-prerequisites.md b/website/snippets/_v2-sl-prerequisites.md index 632e2af6412..9fdc3b53143 100644 --- a/website/snippets/_v2-sl-prerequisites.md +++ b/website/snippets/_v2-sl-prerequisites.md @@ -9,7 +9,7 @@ To use the Semantic Layer, you must: - Create a successful run in the environment where you configure the Semantic Layer. - **Note:** Semantic Layer currently supports the Deployment environment for querying. (_development querying experience coming soon_) - Set up the [Semantic Layer API](/docs/dbt-cloud-apis/sl-api-overview) in the integrated tool to import metric definitions. - - **Note:** dbt Core or Developer accounts can only query data manually using the [MetricFlow CLI](/docs/build/metricflow-cli) and SQL. To dynamically query metrics using external tools, you must have a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) account with access to the Semantic Layer API.
+ - **Note:** To access the API and query metrics in downstream tools, you must have a dbt Cloud [Team or Enterprise](https://www.getdbt.com/pricing/) account. dbt Core or Developer accounts can define metrics with the [MetricFlow CLI](/docs/build/metricflow-cli) or [dbt Cloud IDE](/docs/cloud/dbt-cloud-ide/develop-in-the-cloud) but won't be able to dynamically query them.
- Understand [MetricFlow's](/docs/build/about-metricflow) key concepts, which powers the revamped dbt Semantic Layer. diff --git a/website/snippets/core-versions-table.md b/website/snippets/core-versions-table.md index 7860ac90cb3..431e1f08b4c 100644 --- a/website/snippets/core-versions-table.md +++ b/website/snippets/core-versions-table.md @@ -8,7 +8,7 @@ | [**v1.4**](/guides/migration/versions/upgrading-to-v1.4) | Jan 25, 2023 | Critical | Jan 25, 2024 | | [**v1.3**](/guides/migration/versions/upgrading-to-v1.3) | Oct 12, 2022 | Critical | Oct 12, 2023 | | [**v1.2**](/guides/migration/versions/upgrading-to-v1.2) | Jul 26, 2022 | End of Life* ⚠️ | Jul 26, 2023 | -| [**v1.1**](/guides/migration/versions/upgrading-to-v1.1) ⚠️ | Apr 28, 2022 | End of Life* ⚠️ | Apr 28, 2023 | +| [**v1.1**](/guides/migration/versions/upgrading-to-v1.1) ⚠️ | Apr 28, 2022 | Deprecated ⛔️ | Deprecated ⛔️ | | [**v1.0**](/guides/migration/versions/upgrading-to-v1.0) ⚠️ | Dec 3, 2021 | Deprecated ⛔️ | Deprecated ⛔️ | | **v0.X** ⛔️ | (Various dates) | Deprecated ⛔️ | Deprecated ⛔️ | _*All versions of dbt Core since v1.0 are available in dbt Cloud until further notice. Versions that are EOL do not receive any fixes. For the best support, we recommend upgrading to a version released within the past 12 months._ diff --git a/website/snippets/tutorial-document-your-models.md b/website/snippets/tutorial-document-your-models.md index dd9e1592145..9913dbcd1d7 100644 --- a/website/snippets/tutorial-document-your-models.md +++ b/website/snippets/tutorial-document-your-models.md @@ -40,7 +40,12 @@ Adding [documentation](/docs/collaborate/documentation) to your project allows y tests: - accepted_values: values: ['placed', 'shipped', 'completed', 'return_pending', 'returned'] - + - name: customer_id + tests: + - not_null + - relationships: + to: ref('stg_customers') + field: customer_id ```
diff --git a/website/static/img/docs/dbt-cloud/disconnect-repo.gif b/website/static/img/docs/dbt-cloud/disconnect-repo.gif new file mode 100644 index 00000000000..135ae789fa8 Binary files /dev/null and b/website/static/img/docs/dbt-cloud/disconnect-repo.gif differ diff --git a/website/static/img/docs/dbt-cloud/on-premises/disconnect-repo.gif b/website/static/img/docs/dbt-cloud/on-premises/disconnect-repo.gif new file mode 100644 index 00000000000..135ae789fa8 Binary files /dev/null and b/website/static/img/docs/dbt-cloud/on-premises/disconnect-repo.gif differ diff --git a/website/static/img/docs/dbt-cloud/on-premises/self-signed-cert.png b/website/static/img/docs/dbt-cloud/on-premises/self-signed-cert.png deleted file mode 100644 index 08ea839b002..00000000000 Binary files a/website/static/img/docs/dbt-cloud/on-premises/self-signed-cert.png and /dev/null differ diff --git a/website/static/img/docs/release-notes/dbt-cloud-versions.png b/website/static/img/docs/release-notes/dbt-cloud-versions.png new file mode 100644 index 00000000000..26c9f5fa0a2 Binary files /dev/null and b/website/static/img/docs/release-notes/dbt-cloud-versions.png differ