diff --git a/Makefile b/Makefile
index 98bbc3e19..a37088c22 100644
--- a/Makefile
+++ b/Makefile
@@ -45,7 +45,12 @@ start.pink: __start__
__start__:
@env $$(cat .dev.env | xargs) PORT=${PORT} LOGFLARE_GRPC_PORT=${LOGFLARE_GRPC_PORT} iex --sname ${ERL_NAME} --cookie ${ERL_COOKIE} -S mix phx.server
-.PHONY: __start__
+
+migrate:
+ @env $$(cat .dev.env | xargs) mix ecto.migrate
+
+
+.PHONY: __start__ migrate
# Encryption and decryption of secrets
# Usage:
@@ -87,7 +92,6 @@ $(addprefix decrypt.,${envs}): decrypt.%: \
.$$*.gcloud.json \
.$$*.env \
.$$*.cacert.pem \
- .$$*.cacert.key \
.$$*.cert.key \
.$$*.cert.pem \
.$$*.db-client-cert.pem \
@@ -98,7 +102,6 @@ $(addprefix encrypt.,${envs}): encrypt.%: \
.$$*.gcloud.json.enc \
.$$*.env.enc \
.$$*.cacert.pem.enc \
- .$$*.cacert.key.enc \
.$$*.cert.key.enc \
.$$*.cert.pem.enc \
.$$*.db-client-cert.pem.enc \
diff --git a/VERSION b/VERSION
index 0683f2ae0..afa2b3515 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.7.15
\ No newline at end of file
+1.8.0
\ No newline at end of file
diff --git a/cloudbuild/.dev.env.enc b/cloudbuild/.dev.env.enc
index 5d7e35727..bc6b61aec 100644
Binary files a/cloudbuild/.dev.env.enc and b/cloudbuild/.dev.env.enc differ
diff --git a/cloudbuild/.prod.env.enc b/cloudbuild/.prod.env.enc
index 871fa989c..f381ab4e0 100644
Binary files a/cloudbuild/.prod.env.enc and b/cloudbuild/.prod.env.enc differ
diff --git a/cloudbuild/.staging.env.enc b/cloudbuild/.staging.env.enc
index 59c93f3dc..2e3d5c6a2 100644
Binary files a/cloudbuild/.staging.env.enc and b/cloudbuild/.staging.env.enc differ
diff --git a/cloudbuild/startup.sh b/cloudbuild/startup.sh
index f0853c01d..0965282d8 100644
--- a/cloudbuild/startup.sh
+++ b/cloudbuild/startup.sh
@@ -1,4 +1,9 @@
#! /bin/sh
+if [ -z "$LOGFLARE_DB_ENCRYPTION_KEY" ]; then
+ echo "LOGFLARE_DB_ENCRYPTION_KEY is not set!" 1>&2
+ exit 1
+fi
+echo $?
# wait for networking to be ready before starting Erlang
echo 'Sleeping for 15 seconds for GCE networking to be ready...'
diff --git a/config/config.exs b/config/config.exs
index 72a6eaef9..9013c1c21 100644
--- a/config/config.exs
+++ b/config/config.exs
@@ -6,12 +6,17 @@
import Config
# General application configuration
+
+hardcoded_encryption_key = "Q+IS7ogkzRxsj+zAIB1u6jNFquxkFzSrBZXItN27K/Q="
+
config :logflare,
ecto_repos: [Logflare.Repo],
# https://cloud.google.com/compute/docs/instances/deleting-instance#delete_timeout
# preemtible is 30 seconds from shutdown to sigterm
# normal instances can be more than 90 seconds
- sigterm_shutdown_grace_period_ms: 15_000
+ sigterm_shutdown_grace_period_ms: 15_000,
+ encryption_key_fallback: hardcoded_encryption_key,
+ encryption_key_default: hardcoded_encryption_key
config :logflare, Logflare.Alerting, min_cluster_size: 1, enabled: true
@@ -129,4 +134,6 @@ config :opentelemetry,
span_processor: :batch,
traces_exporter: :none
+config :logflare, Logflare.Vault, json_library: Jason
+
import_config "#{Mix.env()}.exs"
diff --git a/config/runtime.exs b/config/runtime.exs
index 6c462cbf6..77d1371d8 100644
--- a/config/runtime.exs
+++ b/config/runtime.exs
@@ -23,7 +23,9 @@ config :logflare,
single_tenant: System.get_env("LOGFLARE_SINGLE_TENANT", "false") == "true",
supabase_mode: System.get_env("LOGFLARE_SUPABASE_MODE", "false") == "true",
api_key: System.get_env("LOGFLARE_API_KEY"),
- cache_stats: System.get_env("LOGFLARE_CACHE_STATS", "false") == "true"
+ cache_stats: System.get_env("LOGFLARE_CACHE_STATS", "false") == "true",
+ encryption_key_default: System.get_env("LOGFLARE_DB_ENCRYPTION_KEY"),
+ encryption_key_retired: System.get_env("LOGFLARE_DB_ENCRYPTION_KEY_RETIRED")
]
|> filter_nil_kv_pairs.()
diff --git a/config/test.exs b/config/test.exs
index 9e9ff026a..3fcbc09aa 100644
--- a/config/test.exs
+++ b/config/test.exs
@@ -7,7 +7,8 @@ config :logflare, LogflareWeb.Endpoint,
server: false
config :logflare,
- env: :test
+ env: :test,
+ encryption_key_default: "Q+IS7ogkzRxsj+zAIB1u6jNFquxkFzSrBZXItN27K/Q="
config :logflare, Logflare.Cluster.Utils, min_cluster_size: 1
diff --git a/docs/docs.logflare.com/docs/self-hosting/index.md b/docs/docs.logflare.com/docs/self-hosting/index.md
index ff9f1bd2c..2ff8a9408 100644
--- a/docs/docs.logflare.com/docs/self-hosting/index.md
+++ b/docs/docs.logflare.com/docs/self-hosting/index.md
@@ -19,24 +19,26 @@ All browser authentication will be disabled when in single-tenant mode.
### Common Configuration
-| Env Var | Type | Description |
-| -------------------------------------- | ------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `LOGFLARE_SINGLE_TENANT` | Boolean, defaults to `false` | If enabled, a singular user will be seeded. All browser usage will default to the user. |
-| `LOGFLARE_API_KEY` | string, defaults to `nil` | If set, this API Key can be used for interacting with the Logflare API. API key will be automatically generated if not set. |
-| `LOGFLARE_SUPABASE_MODE` | Boolean, defaults to `false` | A special mode for Logflare, where Supabase-specific resources will be seeded. Intended for Suapbase self-hosted usage. |
-| `PHX_HTTP_PORT` | Integer, defaults to `4000` | Allows configuration of the HTTP server port. |
-| `DB_SCHEMA` | String, defaults to `nil` | Allows configuration of the database schema to scope Logflare operations. |
-| `LOGFLARE_LOG_LEVEL` | String, defaults to `info`.
Options: `error`,`warning`, `info` | Allows runtime configuration of log level. |
-| `LOGFLARE_NODE_HOST` | string, defaults to `127.0.0.1` | Sets node host on startup, which affects the node name `logflare@` |
-| `LOGFLARE_LOGGER_METADATA_CLUSTER` | string, defaults to `nil` | Sets global logging metadata for the cluster name. Useful for filtering logs by cluster name. |
-| `LOGFLARE_PUBSUB_POOL_SIZE` | Integer, defaults to `10` | Sets the number of `Phoenix.PubSub.PG2` partitions to be created. Should be configured to the number of cores of your server for optimal multi-node performance. |
-| `LOGFLARE_ALERTS_ENABLED` | Boolean, defaults to `true` | Flag for enabling and disabling query alerts. |
-| `LOGFLARE_ALERTS_MIN_CLUSTER_SIZE` | Integer, defaults to `1` | Sets the required cluster size for Query Alerts to be run. If cluster size is below the provided value, query alerts will not run. |
-| `LOGFLARE_MIN_CLUSTER_SIZE` | Integer, defaults to `1` | Sets the target cluster size, and emits a warning log periodically if the cluster is below the set number of nodes.. |
-| `LOGFLARE_OTEL_ENDPOINT` | String, defaults to `nil` | Sets the OpenTelemetry Endpoint to send traces to via gRPC. Port number can be included, such as `https://logflare.app:443` |
-| `LOGFLARE_OTEL_SOURCE_UUID` | String, defaults to `nil`, optionally required for OpenTelemetry. | Sets the appropriate header for ingesting OpenTelemetry events into a Logflare source. |
-| `LOGFLARE_OTEL_ACCESS_TOKEN` | String, defaults to `nil`, optionally required for OpenTelemetry. | Sets the appropriate authentication header for ingesting OpenTelemetry events into a Logflare source. |
-| `LOGFLARE_OPEN_TELEMETRY_SAMPLE_RATIO` | Float, defaults to `0.001`, optionally required for OpenTelemetry. | Sets the sample ratio for server traces. Ingestion and Endpoint routes are dropped and are not included in tracing. |
+| Env Var | Type | Description |
+| -------------------------------------- | ------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `LOGFLARE_DB_ENCRYPTION_KEY` | Base64 encryption key, **required** | Encryption key used for encrypting sensitive data. |
+| `LOGFLARE_DB_ENCRYPTION_KEY_RETIRED` | Base64 encryption key, defaults to `nil` | The deprecated encryption key to migrate existing database secrets from. Data will be migrated to the key set under `LOGFLARE_DB_ENCRYPTION_KEY`. Used for encryption key rolling only. |
+| `LOGFLARE_SINGLE_TENANT` | Boolean, defaults to `false` | If enabled, a singular user will be seeded. All browser usage will default to the user. |
+| `LOGFLARE_API_KEY` | string, defaults to `nil` | If set, this API Key can be used for interacting with the Logflare API. API key will be automatically generated if not set. |
+| `LOGFLARE_SUPABASE_MODE` | Boolean, defaults to `false` | A special mode for Logflare, where Supabase-specific resources will be seeded. Intended for Suapbase self-hosted usage. |
+| `PHX_HTTP_PORT` | Integer, defaults to `4000` | Allows configuration of the HTTP server port. |
+| `DB_SCHEMA` | String, defaults to `nil` | Allows configuration of the database schema to scope Logflare operations. |
+| `LOGFLARE_LOG_LEVEL` | String, defaults to `info`.
Options: `error`,`warning`, `info` | Allows runtime configuration of log level. |
+| `LOGFLARE_NODE_HOST` | string, defaults to `127.0.0.1` | Sets node host on startup, which affects the node name `logflare@` |
+| `LOGFLARE_LOGGER_METADATA_CLUSTER` | string, defaults to `nil` | Sets global logging metadata for the cluster name. Useful for filtering logs by cluster name. |
+| `LOGFLARE_PUBSUB_POOL_SIZE` | Integer, defaults to `10` | Sets the number of `Phoenix.PubSub.PG2` partitions to be created. Should be configured to the number of cores of your server for optimal multi-node performance. |
+| `LOGFLARE_ALERTS_ENABLED` | Boolean, defaults to `true` | Flag for enabling and disabling query alerts. |
+| `LOGFLARE_ALERTS_MIN_CLUSTER_SIZE` | Integer, defaults to `1` | Sets the required cluster size for Query Alerts to be run. If cluster size is below the provided value, query alerts will not run. |
+| `LOGFLARE_MIN_CLUSTER_SIZE` | Integer, defaults to `1` | Sets the target cluster size, and emits a warning log periodically if the cluster is below the set number of nodes.. |
+| `LOGFLARE_OTEL_ENDPOINT` | String, defaults to `nil` | Sets the OpenTelemetry Endpoint to send traces to via gRPC. Port number can be included, such as `https://logflare.app:443` |
+| `LOGFLARE_OTEL_SOURCE_UUID` | String, defaults to `nil`, optionally required for OpenTelemetry. | Sets the appropriate header for ingesting OpenTelemetry events into a Logflare source. |
+| `LOGFLARE_OTEL_ACCESS_TOKEN` | String, defaults to `nil`, optionally required for OpenTelemetry. | Sets the appropriate authentication header for ingesting OpenTelemetry events into a Logflare source. |
+| `LOGFLARE_OPEN_TELEMETRY_SAMPLE_RATIO` | Float, defaults to `0.001`, optionally required for OpenTelemetry. | Sets the sample ratio for server traces. Ingestion and Endpoint routes are dropped and are not included in tracing. |
LOGFLARE_OPEN_TELEMETRY_SAMPLE_RATIO
Additional environment variable configurations for the OpenTelemetry libraries used can be found [here](https://hexdocs.pm/opentelemetry_exporter/readme.html).perf/bq-pipeline-sharding
@@ -56,6 +58,25 @@ Additional environment variable configurations for the OpenTelemetry libraries u
| `POSTGRES_BACKEND_URL` | string, required | PostgreSQL connection string, for connecting to the database. User must have sufficient permssions to manage the schema. |
| `POSTGRES_BACKEND_SCHEMA` | string, optional, defaults to `public` | Specifies the database schema to scope all operations. |
+## Database Encryption
+
+Certain database columns that store sensitive data are encrypted with the `LOGFLARE_DB_ENCRYPTION_KEY` key.
+Encryption keys must be Base64 encoded.
+
+Cipher used is AES with a 256-bit key in GCM mode.
+
+### Rolling Encryption Keys
+
+In order to roll encryption keys and migrate existing encrypted data, use the `LOGFLARE_DB_ENCRYPTION_KEY_RETIRED` environment variable.
+
+Steps to perform the migration are:
+
+1. Move the retired encryption key from `LOGFLARE_DB_ENCRYPTION_KEY` to `LOGFLARE_DB_ENCRYPTION_KEY_RETIRED`.
+2. Generate a new encryption key and set it to `LOGFLARE_DB_ENCRYPTION_KEY`.
+3. Restart or deploy the server with the new environment variables.
+4. Upon successful server startup, an `info` log will be emitted that says that an retired encryption key is detected, and the migration will be initiated to transition all data encrypted with the retired key to be encrypted with the new key.
+5. Once the migration is complete, the retired encryption key can be safely removed.
+
## BigQuery Setup
### Pre-requisites
diff --git a/lib/logflare/application.ex b/lib/logflare/application.ex
index 63e018251..ea9b55425 100644
--- a/lib/logflare/application.ex
+++ b/lib/logflare/application.ex
@@ -45,6 +45,7 @@ defmodule Logflare.Application do
PubSubRates,
Logs.RejectedLogEvents,
Logflare.Repo,
+ Logflare.Vault,
Logflare.Backends,
{Registry,
name: Logflare.V1SourceRegistry,
@@ -77,6 +78,7 @@ defmodule Logflare.Application do
{Task.Supervisor, name: Logflare.TaskSupervisor},
{Cluster.Supervisor, [topologies, [name: Logflare.ClusterSupervisor]]},
Logflare.Repo,
+ Logflare.Vault,
{Phoenix.PubSub, name: Logflare.PubSub, pool_size: pool_size},
Logs.LogEvents.Cache,
PubSubRates,
diff --git a/lib/logflare/backends.ex b/lib/logflare/backends.ex
index 9df92ac6e..610394e6f 100644
--- a/lib/logflare/backends.ex
+++ b/lib/logflare/backends.ex
@@ -97,7 +97,6 @@ defmodule Logflare.Backends do
backend =
%Backend{}
|> Backend.changeset(attrs)
- |> validate_config()
|> Repo.insert()
with {:ok, updated} <- backend do
@@ -115,7 +114,6 @@ defmodule Logflare.Backends do
backend_config =
backend
|> Backend.changeset(attrs)
- |> validate_config()
|> Repo.update()
with {:ok, updated} <- backend_config do
@@ -156,32 +154,20 @@ defmodule Logflare.Backends do
end
end
- # common config validation function
- defp validate_config(%{valid?: true} = changeset) do
- type = Ecto.Changeset.get_field(changeset, :type)
- mod = Backend.adaptor_mapping()[type]
-
- Ecto.Changeset.validate_change(changeset, :config, fn :config, config ->
- case Adaptor.cast_and_validate_config(mod, config) do
- %{valid?: true} -> []
- %{valid?: false, errors: errors} -> for {key, err} <- errors, do: {:"config.#{key}", err}
- end
- end)
- end
-
- defp validate_config(changeset), do: changeset
-
# common typecasting from string map to attom for config
defp typecast_config_string_map_to_atom_map(nil), do: nil
defp typecast_config_string_map_to_atom_map(%Backend{type: type} = backend) do
mod = Backend.adaptor_mapping()[type]
- Map.update!(backend, :config, fn config ->
- (config || %{})
- |> mod.cast_config()
- |> Ecto.Changeset.apply_changes()
- end)
+ updated =
+ Map.update!(backend, :config_encrypted, fn config ->
+ (config || %{})
+ |> mod.cast_config()
+ |> Ecto.Changeset.apply_changes()
+ end)
+
+ Map.put(updated, :config, updated.config_encrypted)
end
@doc """
diff --git a/lib/logflare/backends/backend.ex b/lib/logflare/backends/backend.ex
index 9d6c167eb..d7171c02a 100644
--- a/lib/logflare/backends/backend.ex
+++ b/lib/logflare/backends/backend.ex
@@ -23,8 +23,9 @@ defmodule Logflare.Backends.Backend do
field(:description, :string)
field(:token, Ecto.UUID, autogenerate: true)
field(:type, Ecto.Enum, values: Map.keys(@adaptor_mapping))
- # TODO: maybe use polymorphic embeds
+ # TODO(Ziinc): make virtual once cluster is using encrypted fields fully
field(:config, :map)
+ field(:config_encrypted, Logflare.Ecto.EncryptedMap)
many_to_many(:sources, Source, join_through: "sources_backends")
belongs_to(:user, User)
has_many(:rules, Rule)
@@ -40,8 +41,36 @@ defmodule Logflare.Backends.Backend do
|> cast(attrs, [:type, :config, :user_id, :name, :description, :metadata])
|> validate_required([:user_id, :type, :config, :name])
|> validate_inclusion(:type, Map.keys(@adaptor_mapping))
+ |> do_config_change()
+ |> validate_config()
end
+ # temp function
+ defp do_config_change(%Ecto.Changeset{changes: %{config: config}} = changeset) do
+ changeset
+ |> put_change(:config_encrypted, config)
+
+ # TODO(Ziinc): uncomment once cluster is using encrypted fields fully
+ # |> delete_change(:config)
+ end
+
+ defp do_config_change(changeset), do: changeset
+
+ # common config validation function
+ defp validate_config(%{valid?: true} = changeset) do
+ type = Ecto.Changeset.get_field(changeset, :type)
+ mod = adaptor_mapping()[type]
+
+ Ecto.Changeset.validate_change(changeset, :config, fn :config, config ->
+ case Adaptor.cast_and_validate_config(mod, config) do
+ %{valid?: true} -> []
+ %{valid?: false, errors: errors} -> for {key, err} <- errors, do: {:"config.#{key}", err}
+ end
+ end)
+ end
+
+ defp validate_config(changeset), do: changeset
+
@spec child_spec(Source.t(), Backend.t()) :: map()
defdelegate child_spec(source, backend), to: Adaptor
diff --git a/lib/logflare/ecto/encrypted_map.ex b/lib/logflare/ecto/encrypted_map.ex
new file mode 100644
index 000000000..ad41e0a2c
--- /dev/null
+++ b/lib/logflare/ecto/encrypted_map.ex
@@ -0,0 +1,3 @@
+defmodule Logflare.Ecto.EncryptedMap do
+ use Cloak.Ecto.Map, vault: Logflare.Vault
+end
diff --git a/lib/logflare/vault.ex b/lib/logflare/vault.ex
new file mode 100644
index 000000000..ef1c10b0f
--- /dev/null
+++ b/lib/logflare/vault.ex
@@ -0,0 +1,113 @@
+defmodule Logflare.Vault do
+ @doc """
+ GenServer needed for Cloak.
+ It handles secrets migration for key rolling at startup.
+
+ To run the migration at runtime, use the following:
+ ```elixir
+ iex> Logflare.Vault.do_migrate()
+ ```
+ An old encryption key should be present for the migration.
+
+ """
+ use Cloak.Vault, otp_app: :logflare
+
+ alias Cloak.Ecto.Migrator
+ require Logger
+
+ @schemas [
+ Logflare.Backends.Backend
+ ]
+
+ @impl GenServer
+ def init(config) do
+ if Application.get_env(:logflare, :env) == :test do
+ # make ets table public
+ :ets.new(@table_name, [:named_table, :public])
+ end
+
+ fallback_key = Application.get_env(:logflare, :encryption_key_fallback) |> maybe_decode!()
+
+ default_key =
+ Application.get_env(:logflare, :encryption_key_default) |> maybe_decode!() || fallback_key
+
+ retired_key = Application.get_env(:logflare, :encryption_key_retired) |> maybe_decode!()
+
+ ciphers =
+ [
+ default:
+ {Cloak.Ciphers.AES.GCM, tag: "AES.GCM.V1." <> hash(default_key), key: default_key},
+ retired:
+ if(retired_key != nil,
+ do: {Cloak.Ciphers.AES.GCM, tag: "AES.GCM.V1" <> hash(retired_key), key: retired_key},
+ else: nil
+ ),
+ fallback:
+ {Cloak.Ciphers.AES.GCM, tag: "AES.GCM.V1." <> hash(fallback_key), key: fallback_key}
+ ]
+ |> Enum.filter(fn {_k, v} -> v != nil end)
+
+ config = Keyword.put(config, :ciphers, ciphers)
+
+ Task.start_link(fn ->
+ # wait for genserver config to be saved, see https://github.com/danielberkompas/cloak/blob/v1.1.4/lib/cloak/vault.ex#L186
+ :timer.sleep(1_000)
+
+ result =
+ cond do
+ Keyword.has_key?(ciphers, :retired) ->
+ Logger.info("Encryption key marked as 'retired' found, migrating schemas to new key.")
+
+ do_migrate()
+ true
+
+ fallback_key != default_key ->
+ Logger.info("Encryption key has been provided, migrating all schemas to key.")
+ do_migrate()
+
+ true
+
+ true ->
+ :noop
+ end
+
+ if result != :noop do
+ Logger.info("Encryption migration complete")
+ end
+ end)
+
+ {:ok, config}
+ end
+
+ # helper, exposed for testing
+ def do_migrate() do
+ for schema <- @schemas do
+ Migrator.migrate(Logflare.Repo, schema)
+ end
+ end
+
+ # helper for loading keys
+ defp maybe_decode!(nil), do: nil
+ defp maybe_decode!(str), do: Base.decode64!(str)
+
+ # used to hash the tag based on the key, as cloak uses the tag to determine cipher to use.
+ defp hash(key) do
+ :sha256 |> :crypto.hash(key) |> Base.encode64()
+ end
+
+ # helper for tests
+ def get_config() do
+ Cloak.Vault.read_config(@table_name)
+ end
+
+ # helper for tests
+ def save_config(config) do
+ Cloak.Vault.save_config(@table_name, config)
+ end
+
+ # helper for tests
+ def get_cipher(key) do
+ key = key |> maybe_decode!()
+ {Cloak.Ciphers.AES.GCM, tag: "AES.GCM.V1" <> hash(key), key: key}
+ end
+end
diff --git a/mix.exs b/mix.exs
index 9f8b895c3..7538a28bf 100644
--- a/mix.exs
+++ b/mix.exs
@@ -111,6 +111,7 @@ defmodule Logflare.Mixfile do
{:libcluster, "~> 3.2"},
{:map_keys, "~> 0.1.0"},
{:observer_cli, "~> 1.5"},
+ {:cloak_ecto, "~> 1.3"},
# Parsing
{:bertex, ">= 0.0.0"},
diff --git a/mix.lock b/mix.lock
index a8c3709fe..d29591c6b 100644
--- a/mix.lock
+++ b/mix.lock
@@ -13,6 +13,8 @@
"certifi": {:hex, :certifi, "2.12.0", "2d1cca2ec95f59643862af91f001478c9863c2ac9cb6e2f89780bfd8de987329", [:rebar3], [], "hexpm", "ee68d85df22e554040cdb4be100f33873ac6051387baf6a8f6ce82272340ff1c"},
"chatterbox": {:hex, :ts_chatterbox, "0.15.1", "5cac4d15dd7ad61fc3c4415ce4826fc563d4643dee897a558ec4ea0b1c835c9c", [:rebar3], [{:hpack, "~> 0.3.0", [hex: :hpack_erl, repo: "hexpm", optional: false]}], "hexpm", "4f75b91451338bc0da5f52f3480fa6ef6e3a2aeecfc33686d6b3d0a0948f31aa"},
"citrine": {:hex, :citrine, "0.1.11", "44447cc0f4783fbf610141a1c8a5b7b4724fe94d6298e0248dddaa6b40e8e91d", [:mix], [{:crontab, "~> 1.1", [hex: :crontab, repo: "hexpm", optional: false]}], "hexpm", "4c456ae2c32f775a040d25758233668879ce8ccdb6ef8b2b52fe32f6da72a998"},
+ "cloak": {:hex, :cloak, "1.1.4", "aba387b22ea4d80d92d38ab1890cc528b06e0e7ef2a4581d71c3fdad59e997e7", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "92b20527b9aba3d939fab0dd32ce592ff86361547cfdc87d74edce6f980eb3d7"},
+ "cloak_ecto": {:hex, :cloak_ecto, "1.3.0", "0de127c857d7452ba3c3367f53fb814b0410ff9c680a8d20fbe8b9a3c57a1118", [:mix], [{:cloak, "~> 1.1.1", [hex: :cloak, repo: "hexpm", optional: false]}, {:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "314beb0c123b8a800418ca1d51065b27ba3b15f085977e65c0f7b2adab2de1cc"},
"combine": {:hex, :combine, "0.10.0", "eff8224eeb56498a2af13011d142c5e7997a80c8f5b97c499f84c841032e429f", [:mix], [], "hexpm", "1b1dbc1790073076580d0d1d64e42eae2366583e7aecd455d1215b0d16f2451b"},
"configcat": {:hex, :configcat, "2.0.1", "cffd7e6ba7a4c41e1e6bbb706379192a1be7cd848bb6b098d4ed054b13c18f9d", [:mix], [{:elixir_uuid, "~> 1.2", [hex: :elixir_uuid, repo: "hexpm", optional: false]}, {:httpoison, "~> 1.7", [hex: :httpoison, repo: "hexpm", optional: false]}, {:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "3e4a239a613d2acbcee7103a6a426c4ae52882ae65bf48cdb5c1247877b65112"},
"contex": {:hex, :contex, "0.3.0", "d390713efee604702600ba801a481bcb8534a9af43e118b29d9d37fe4495fcba", [:mix], [{:nimble_strftime, "~> 0.1.0", [hex: :nimble_strftime, repo: "hexpm", optional: false]}], "hexpm", "3fa7535cc3b265691a4eabc2707fe8622aa60a2565145a14da9aebd613817652"},
@@ -30,7 +32,7 @@
"dialyxir": {:hex, :dialyxir, "1.4.2", "764a6e8e7a354f0ba95d58418178d486065ead1f69ad89782817c296d0d746a5", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "516603d8067b2fd585319e4b13d3674ad4f314a5902ba8130cd97dc902ce6bbd"},
"earmark": {:hex, :earmark, "1.4.46", "8c7287bd3137e99d26ae4643e5b7ef2129a260e3dcf41f251750cb4563c8fb81", [:mix], [], "hexpm", "798d86db3d79964e759ddc0c077d5eb254968ed426399fbf5a62de2b5ff8910a"},
"earmark_parser": {:hex, :earmark_parser, "1.4.32", "fa739a0ecfa34493de19426681b23f6814573faee95dfd4b4aafe15a7b5b32c6", [:mix], [], "hexpm", "b8b0dd77d60373e77a3d7e8afa598f325e49e8663a51bcc2b88ef41838cca755"},
- "ecto": {:hex, :ecto, "3.11.0", "ff8614b4e70a774f9d39af809c426def80852048440e8785d93a6e91f48fec00", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7769dad267ef967310d6e988e92d772659b11b09a0c015f101ce0fff81ce1f81"},
+ "ecto": {:hex, :ecto, "3.11.2", "e1d26be989db350a633667c5cda9c3d115ae779b66da567c68c80cfb26a8c9ee", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "3c38bca2c6f8d8023f2145326cc8a80100c3ffe4dcbd9842ff867f7fc6156c65"},
"ecto_sql": {:hex, :ecto_sql, "3.11.0", "c787b24b224942b69c9ff7ab9107f258ecdc68326be04815c6cce2941b6fad1c", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.11.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16.0 or ~> 0.17.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "77aa3677169f55c2714dda7352d563002d180eb33c0dc29cd36d39c0a1a971f5"},
"elixir_uuid": {:hex, :elixir_uuid, "1.2.1", "dce506597acb7e6b0daeaff52ff6a9043f5919a4c3315abb4143f0b00378c097", [:mix], [], "hexpm", "f7eba2ea6c3555cea09706492716b0d87397b88946e6380898c2889d68585752"},
"epgsql": {:hex, :epgsql, "4.7.1", "d4e47cae46c18c8afa88e34d59a9b4bae16368d7ce1eb3da24fa755eb28393eb", [:rebar3], [], "hexpm", "b6d86b7dc42c8555b1d4e20880e5099d6d6d053148000e188e548f98e4e01836"},
@@ -66,7 +68,7 @@
"idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"},
"inflex": {:hex, :inflex, "2.1.0", "a365cf0821a9dacb65067abd95008ca1b0bb7dcdd85ae59965deef2aa062924c", [:mix], [], "hexpm", "14c17d05db4ee9b6d319b0bff1bdf22aa389a25398d1952c7a0b5f3d93162dd8"},
"iteraptor": {:hex, :iteraptor, "1.14.0", "a6a23ec9ac1c25f3065138fd87f7f739f9b5a7e08fe915cfefcd155105445167", [:mix], [], "hexpm", "88d7a8bb7829a0faa8f99e15b12a8082403f07236d0911e55ad9245b306fd558"},
- "jason": {:hex, :jason, "1.4.1", "af1504e35f629ddcdd6addb3513c3853991f694921b1b9368b0bd32beb9f1b63", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "fbb01ecdfd565b56261302f7e1fcc27c4fb8f32d56eab74db621fc154604a7a1"},
+ "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"},
"joken": {:hex, :joken, "2.6.0", "b9dd9b6d52e3e6fcb6c65e151ad38bf4bc286382b5b6f97079c47ade6b1bcc6a", [:mix], [{:jose, "~> 1.11.5", [hex: :jose, repo: "hexpm", optional: false]}], "hexpm", "5a95b05a71cd0b54abd35378aeb1d487a23a52c324fa7efdffc512b655b5aaa7"},
"jose": {:hex, :jose, "1.11.6", "613fda82552128aa6fb804682e3a616f4bc15565a048dabd05b1ebd5827ed965", [:mix, :rebar3], [], "hexpm", "6275cb75504f9c1e60eeacb771adfeee4905a9e182103aa59b53fed651ff9738"},
"jumper": {:hex, :jumper, "1.0.2", "68cdcd84472a00ac596b4e6459a41b3062d4427cbd4f1e8c8793c5b54f1406a7", [:mix], [], "hexpm", "9b7782409021e01ab3c08270e26f36eb62976a38c1aa64b2eaf6348422f165e1"},
diff --git a/priv/repo/migrations/20240802110527_add_encrypted_config_field_for_backends_table.exs b/priv/repo/migrations/20240802110527_add_encrypted_config_field_for_backends_table.exs
new file mode 100644
index 000000000..9da3e882a
--- /dev/null
+++ b/priv/repo/migrations/20240802110527_add_encrypted_config_field_for_backends_table.exs
@@ -0,0 +1,37 @@
+defmodule Logflare.Repo.Migrations.AddEncryptedConfigFieldForBackendsTable do
+ use Ecto.Migration
+ alias Logflare.Repo
+ import Ecto.Query
+ alias Logflare.Ecto.EncryptedMap
+
+ def up do
+ alter table(:backends) do
+ add :config_encrypted, :binary
+ end
+
+ flush()
+ {:ok, pid} = Logflare.Vault.start_link()
+
+ # copy configs over
+ Repo.all(from b in "backends", select: [:id, :config])
+ |> Enum.each(fn %{id: id} = backend ->
+ {:ok, config_encrypted} = EncryptedMap.dump(backend.config)
+
+ from(b in "backends",
+ where: b.id == ^id,
+ update: [set: [config_encrypted: ^config_encrypted]]
+ )
+ |> Logflare.Repo.update_all([])
+ end)
+ # stop the vault
+ Process.unlink(pid)
+ Process.exit(pid, :kill)
+ :timer.sleep(100)
+ end
+
+ def down do
+ alter table(:backends) do
+ remove(:config_encrypted)
+ end
+ end
+end
diff --git a/run.sh b/run.sh
index cb70362c5..6dbec7fbd 100644
--- a/run.sh
+++ b/run.sh
@@ -1,12 +1,5 @@
#! /bin/sh
-# maybe run a startup script
-if [ -f ./startup.sh ]
-then
- echo 'startup.sh file present, sourcing...';
- . ./startup.sh;
-fi
-
# load secrets conditionally
if [ -f /tmp/.secrets.env ]
then
@@ -14,6 +7,14 @@ then
export $(grep -v '^#' /tmp/.secrets.env | xargs);
fi
+# maybe run a startup script
+if [ -f ./startup.sh ]
+then
+ echo 'startup.sh file present, sourcing...';
+ sleep .5;
+ . ./startup.sh;
+fi
+
echo "LOGFLARE_NODE_HOST is: $LOGFLARE_NODE_HOST"
./logflare eval Logflare.Release.migrate
diff --git a/test/logflare/backends_test.exs b/test/logflare/backends_test.exs
index ed69a28f5..033920570 100644
--- a/test/logflare/backends_test.exs
+++ b/test/logflare/backends_test.exs
@@ -15,6 +15,7 @@ defmodule Logflare.BackendsTest do
alias Logflare.PubSubRates
alias Logflare.Logs.SourceRouting
alias Logflare.PubSubRates
+ alias Logflare.Repo
alias Logflare.Backends.IngestEventQueue
setup do
@@ -22,6 +23,23 @@ defmodule Logflare.BackendsTest do
:ok
end
+ describe "encryption" do
+ # TODO(Ziinc): unskip once cluster is using encrypted fields fully
+ @tag :skip
+ test "backend config is encrypted to the :config_encrypted field" do
+ insert(:backend, config_encrypted: %{some_value: "testing"})
+
+ assert [
+ %{
+ config: nil,
+ config_encrypted: encrypted
+ }
+ ] = Repo.all(from b in "backends", select: [:config, :config_encrypted])
+
+ assert is_binary(encrypted)
+ end
+ end
+
describe "backend management" do
setup do
user = insert(:user)
diff --git a/test/logflare/vault_test.exs b/test/logflare/vault_test.exs
new file mode 100644
index 000000000..0d97aeedd
--- /dev/null
+++ b/test/logflare/vault_test.exs
@@ -0,0 +1,86 @@
+defmodule Logflare.VaultTest do
+ @moduledoc false
+ use Logflare.DataCase
+ alias Logflare.Repo
+
+ describe "migrator with retired" do
+ setup do
+ insert(:backend, config_encrypted: %{some_value: "testing"})
+ {:ok, prev_config} = Logflare.Vault.get_config()
+
+ new_config =
+ Keyword.put(prev_config, :ciphers,
+ default: Logflare.Vault.get_cipher("S757rfGBA90+qpmcJ/WaDt4cBEyZVYVnYKyG4tTH5PQ="),
+ retired: prev_config[:ciphers][:default],
+ fallback: prev_config[:ciphers][:fallback]
+ )
+
+ Logflare.Vault.save_config(new_config)
+
+ on_exit(fn ->
+ Logflare.Vault.save_config(prev_config)
+ end)
+
+ :ok
+ end
+
+ test "do_migrate will migrate data using new cipher" do
+ initial = get_config_encrypted()
+ Logflare.Vault.do_migrate()
+ migrated = get_config_encrypted()
+ assert initial != migrated
+
+ decoded_initial = Logflare.Vault.decrypt!(initial) |> Jason.decode!()
+ decoded_migrated = Logflare.Vault.decrypt!(migrated) |> Jason.decode!()
+ assert decoded_initial == decoded_migrated
+ assert is_binary(migrated)
+ end
+ end
+
+ describe "migrator with new default key and a fallback, and no retired" do
+ setup do
+ # insert using fallback
+ insert(:backend, config_encrypted: %{some_value: "testing"})
+ {:ok, prev_config} = Logflare.Vault.get_config()
+
+ new_config =
+ Keyword.put(prev_config, :ciphers,
+ # use a different cipher
+ default: Logflare.Vault.get_cipher("S757rfGBA90+qpmcJ/WaDt4cBEyZVYVnYKyG4tTH5PQ="),
+ fallback: prev_config[:ciphers][:fallback]
+ )
+
+ Logflare.Vault.save_config(new_config)
+
+ on_exit(fn ->
+ Logflare.Vault.save_config(prev_config)
+ end)
+
+ :ok
+ end
+
+ test "do_migrate will migrate data encrypted with fallback to use new default cipher" do
+ initial = get_config_encrypted()
+ Logflare.Vault.do_migrate()
+ migrated = get_config_encrypted()
+ assert initial != migrated
+
+ decoded_initial = Logflare.Vault.decrypt!(initial) |> Jason.decode!()
+ decoded_migrated = Logflare.Vault.decrypt!(migrated) |> Jason.decode!()
+ assert decoded_initial == decoded_migrated
+ assert is_binary(migrated)
+ end
+ end
+
+ defp get_config_encrypted() do
+ [
+ %{
+ # TODO(Ziinc): to uncomment once fully migrated over
+ # config: nil,
+ config_encrypted: encrypted_str
+ }
+ ] = Repo.all(from b in "backends", select: [:config, :config_encrypted])
+
+ encrypted_str
+ end
+end
diff --git a/test/support/factory.ex b/test/support/factory.ex
index 5b6e4744f..2279446db 100644
--- a/test/support/factory.ex
+++ b/test/support/factory.ex
@@ -74,14 +74,25 @@ defmodule Logflare.Factory do
%SourceSchema{}
end
- def backend_factory do
+ def backend_factory(attrs) do
+ config =
+ attrs[:config] || attrs[:config_encrypted] ||
+ %{
+ project_id: TestUtils.random_string(),
+ dataset_id: TestUtils.random_string()
+ }
+
%Backend{
name: TestUtils.random_string(),
- type: :bigquery,
- config: %{
- project_id: TestUtils.random_string(),
- dataset_id: TestUtils.random_string()
- }
+ description: attrs[:description],
+ type: attrs[:type] || :bigquery,
+ config_encrypted: config,
+ config: config,
+ sources: attrs[:sources] || [],
+ rules: attrs[:rules] || [],
+ user_id: attrs[:user_id],
+ user: attrs[:user],
+ metadata: attrs[:metadata] || nil
}
end