From 7807c20a5625a00c44d9609e45dce503c37bbef4 Mon Sep 17 00:00:00 2001
From: Thom Ivy <38070512+thomivy@users.noreply.github.com>
Date: Fri, 12 Jan 2024 00:45:12 -0600
Subject: [PATCH] replaced old tangle docs with link to new site, updated
component links for actions, added tangle network to links (#158)
---
components/QuickStart.tsx | 4 +-
pages/_meta.json | 5 +
pages/docs/_meta.json | 6 +-
.../docs/ecosystem-roles/validator/_meta.json | 12 -
.../validator/api-reference/_meta.json | 3 -
.../validator/api-reference/cli.mdx | 409 ----------------
.../validator/deploy-with-docker/_meta.json | 5 -
.../deploy-with-docker/full-node.mdx | 119 -----
.../deploy-with-docker/relayer-node.mdx | 268 -----------
.../deploy-with-docker/validator-node.mdx | 239 ----------
.../validator/monitoring/_meta.json | 7 -
.../validator/monitoring/alert-manager.mdx | 342 --------------
.../validator/monitoring/grafana.mdx | 193 --------
.../validator/monitoring/loki.mdx | 334 --------------
.../validator/monitoring/prometheus.mdx | 435 ------------------
.../validator/monitoring/quickstart.mdx | 59 ---
.../ecosystem-roles/validator/quickstart.mdx | 43 --
.../validator/required-keys.mdx | 141 ------
.../validator/requirements.mdx | 189 --------
.../validator/systemd/_meta.json | 4 -
.../validator/systemd/full-node.mdx | 110 -----
.../validator/systemd/quick-node.mdx | 90 ----
.../validator/systemd/validator-node.mdx | 216 ---------
.../validator/troubleshooting.mdx | 108 -----
.../ecosystem-roles/validator/validation.mdx | 21 -
.../validator/validator-rewards.mdx | 125 -----
pages/docs/tangle-network/_meta.json | 7 -
pages/docs/tangle-network/build/_meta.json | 7 -
.../build/deploy-using-hardhat.mdx | 116 -----
.../build/json-rpc-endpoints.mdx | 51 --
.../network-information-configuration.mdx | 100 ----
.../tangle-network/build/pallets/_meta.json | 12 -
.../build/pallets/balances-and-accounts.mdx | 40 --
.../build/pallets/collectives.mdx | 74 ---
.../build/pallets/consensus-mechanism.mdx | 41 --
.../build/pallets/crosschain-pallets.mdx | 96 ----
.../build/pallets/democracy.mdx | 21 -
.../tangle-network/build/pallets/frontier.mdx | 27 --
.../tangle-network/build/pallets/identity.mdx | 19 -
.../build/pallets/interoperability.mdx | 4 -
.../build/pallets/pallet-overview.mdx | 13 -
.../build/pallets/precompile-overview.mdx | 55 ---
.../build/pallets/scheduler.mdx | 73 ---
.../tangle-network/build/pallets/treasury.mdx | 17 -
.../build/precompile-addresses.mdx | 73 ---
.../docs/tangle-network/governance/_meta.json | 9 -
.../governance/democracy-voting.mdx | 31 --
.../governance/governance-interfaces.mdx | 6 -
.../governance/governance-parameters.mdx | 20 -
.../governance/governance-procedures.mdx | 45 --
.../governance/how-to-vote-on-tangle.mdx | 31 --
.../tangle-network/governance/overview.mdx | 62 ---
.../governance/proposal-creation.mdx | 33 --
pages/docs/tangle-network/learn/_meta.json | 5 -
.../docs/tangle-network/learn/incentives.mdx | 31 --
.../learn/understanding-dkg-tangle.mdx | 74 ---
.../tangle-network/learn/webb-protocol.mdx | 35 --
pages/docs/tangle-network/node/_meta.json | 12 -
.../docs/tangle-network/node/docker-node.mdx | 295 ------------
pages/docs/tangle-network/node/flags.mdx | 100 ----
pages/docs/tangle-network/node/hardware.mdx | 37 --
.../tangle-network/node/monitoring/_meta.json | 7 -
.../node/monitoring/alert-manager.mdx | 342 --------------
.../node/monitoring/grafana.mdx | 193 --------
.../tangle-network/node/monitoring/loki.mdx | 334 --------------
.../node/monitoring/prometheus.mdx | 435 ------------------
.../node/monitoring/quickstart.mdx | 59 ---
.../tangle-network/node/node-software.mdx | 180 --------
pages/docs/tangle-network/node/quicknode.mdx | 90 ----
pages/docs/tangle-network/node/quickstart.mdx | 45 --
pages/docs/tangle-network/node/systemd.mdx | 375 ---------------
.../tangle-network/node/troubleshooting.mdx | 95 ----
.../tangle-network/node/validator/_meta.json | 5 -
.../node/validator/proxyaccount.mdx | 64 ---
.../node/validator/requirements.mdx | 211 ---------
.../node/validator/validator-rewards.mdx | 125 -----
pages/docs/tangle-network/overview.mdx | 140 ------
yarn.lock | 6 +-
78 files changed, 15 insertions(+), 7850 deletions(-)
delete mode 100644 pages/docs/ecosystem-roles/validator/_meta.json
delete mode 100644 pages/docs/ecosystem-roles/validator/api-reference/_meta.json
delete mode 100644 pages/docs/ecosystem-roles/validator/api-reference/cli.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/deploy-with-docker/_meta.json
delete mode 100644 pages/docs/ecosystem-roles/validator/deploy-with-docker/full-node.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/deploy-with-docker/relayer-node.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/deploy-with-docker/validator-node.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/monitoring/_meta.json
delete mode 100644 pages/docs/ecosystem-roles/validator/monitoring/alert-manager.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/monitoring/grafana.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/monitoring/loki.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/monitoring/prometheus.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/monitoring/quickstart.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/quickstart.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/required-keys.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/requirements.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/systemd/_meta.json
delete mode 100644 pages/docs/ecosystem-roles/validator/systemd/full-node.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/systemd/quick-node.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/systemd/validator-node.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/troubleshooting.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/validation.mdx
delete mode 100644 pages/docs/ecosystem-roles/validator/validator-rewards.mdx
delete mode 100644 pages/docs/tangle-network/_meta.json
delete mode 100644 pages/docs/tangle-network/build/_meta.json
delete mode 100644 pages/docs/tangle-network/build/deploy-using-hardhat.mdx
delete mode 100644 pages/docs/tangle-network/build/json-rpc-endpoints.mdx
delete mode 100644 pages/docs/tangle-network/build/network-information-configuration.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/_meta.json
delete mode 100644 pages/docs/tangle-network/build/pallets/balances-and-accounts.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/collectives.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/consensus-mechanism.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/crosschain-pallets.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/democracy.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/frontier.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/identity.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/interoperability.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/pallet-overview.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/precompile-overview.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/scheduler.mdx
delete mode 100644 pages/docs/tangle-network/build/pallets/treasury.mdx
delete mode 100644 pages/docs/tangle-network/build/precompile-addresses.mdx
delete mode 100644 pages/docs/tangle-network/governance/_meta.json
delete mode 100644 pages/docs/tangle-network/governance/democracy-voting.mdx
delete mode 100644 pages/docs/tangle-network/governance/governance-interfaces.mdx
delete mode 100644 pages/docs/tangle-network/governance/governance-parameters.mdx
delete mode 100644 pages/docs/tangle-network/governance/governance-procedures.mdx
delete mode 100644 pages/docs/tangle-network/governance/how-to-vote-on-tangle.mdx
delete mode 100644 pages/docs/tangle-network/governance/overview.mdx
delete mode 100644 pages/docs/tangle-network/governance/proposal-creation.mdx
delete mode 100644 pages/docs/tangle-network/learn/_meta.json
delete mode 100644 pages/docs/tangle-network/learn/incentives.mdx
delete mode 100644 pages/docs/tangle-network/learn/understanding-dkg-tangle.mdx
delete mode 100644 pages/docs/tangle-network/learn/webb-protocol.mdx
delete mode 100644 pages/docs/tangle-network/node/_meta.json
delete mode 100644 pages/docs/tangle-network/node/docker-node.mdx
delete mode 100644 pages/docs/tangle-network/node/flags.mdx
delete mode 100644 pages/docs/tangle-network/node/hardware.mdx
delete mode 100644 pages/docs/tangle-network/node/monitoring/_meta.json
delete mode 100644 pages/docs/tangle-network/node/monitoring/alert-manager.mdx
delete mode 100644 pages/docs/tangle-network/node/monitoring/grafana.mdx
delete mode 100644 pages/docs/tangle-network/node/monitoring/loki.mdx
delete mode 100644 pages/docs/tangle-network/node/monitoring/prometheus.mdx
delete mode 100644 pages/docs/tangle-network/node/monitoring/quickstart.mdx
delete mode 100644 pages/docs/tangle-network/node/node-software.mdx
delete mode 100644 pages/docs/tangle-network/node/quicknode.mdx
delete mode 100644 pages/docs/tangle-network/node/quickstart.mdx
delete mode 100644 pages/docs/tangle-network/node/systemd.mdx
delete mode 100644 pages/docs/tangle-network/node/troubleshooting.mdx
delete mode 100644 pages/docs/tangle-network/node/validator/_meta.json
delete mode 100644 pages/docs/tangle-network/node/validator/proxyaccount.mdx
delete mode 100644 pages/docs/tangle-network/node/validator/requirements.mdx
delete mode 100644 pages/docs/tangle-network/node/validator/validator-rewards.mdx
delete mode 100644 pages/docs/tangle-network/overview.mdx
diff --git a/components/QuickStart.tsx b/components/QuickStart.tsx
index 7b9d0b3d..5675414e 100644
--- a/components/QuickStart.tsx
+++ b/components/QuickStart.tsx
@@ -18,7 +18,7 @@ export const QuickStartArea = () => {
description: `Start your journey on Tangle Network. This guide will walk you through the steps to become a validator, ensuring network security and integrity.`,
name: "Validate on Tangle Network",
}}
- href="/docs/tangle-network/node/quickstart/"
+ href="https://docs.tangle.tools/docs/node/quickstart/"
>
{
description: `Want to spin up a full node on the Tangle Network? We've made it easier than ever!`,
name: "Deploy a Tangle Node!",
}}
- href="/docs/tangle-network/node/docker-node"
+ href="https://docs.tangle.tools/docs/node/docker-node/"
>
-
-
- ```sh filename="help" copy
- docker run --platform linux/amd64 --network="host" -v "/var/lib/data" --entrypoint ./tangle-standalone \
- ghcr.io/webb-tools/tangle/tangle-standalone:main \
- --help
- ```
-
-
-
-
- ```sh filename="help" copy
- # If you used the release binary
- ./tangle-standalone --help
-
- # Or if you compiled the binary
- ./target/release/tangle-standalone --help
- ```
-
-
-
-
-If you have compiled the tangle-parachain binary its important to note that the command-line arguments
-provided first will be passed to the parachain node, while the arguments
-provided after `--` will be passed to the relay chain node.
-
-```sh filename="args" copy
-tangle-parachain --
-```
-
-USAGE:
-
-```sh filename="usage" copy
-tangle-parachain [OPTIONS] [-- ...]
-tangle-parachain
-```
-
-## Common Flags
-
-The below lists the most commonly used flags for your convienance.
-
-#### `--alice`
-
-Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. Commonly
-used for development or local test networks.
-
-```sh filename="alice" copy
-tangle-standalone --alice
-```
-
-#### `--blocks-pruning `
-
-Specify the blocks pruning mode, a number of blocks to keep or 'archive'.
-
-Default is to keep all finalized blocks. otherwise, all blocks can be kept (i.e
-'archive'), or for all canonical blocks (i.e 'archive-canonical'), or for the last N
-blocks (i.e a number).
-
-NOTE: only finalized blocks are subject for removal!
-
-```sh filename="blocks-pruning" copy
-tangle-standalone --blocks-pruning 120
-```
-
-#### `--bob`
-
-Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. Commonly
-used for development or local test networks.
-
-```sh filename="bob" copy
-tangle-standalone --bob
-```
-
-#### `--bootnodes`
-
-Specify a list of bootnodes.
-
-```sh filename="bootnodes" copy
-tangle-standalone --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/12D3KooWAWueKNxuNwMbAtss3nDTQhMg4gG3XQBnWdQdu2DuEsZS
-```
-
-#### `--chain `
-
-Specify the chain specification.
-
-It can be one of the predefined ones (dev, local, or staging) or it can be a path to a
-file with the chainspec (such as one exported by the `build-spec` subcommand).
-
-```sh filename="local" copy
-tangle-standalone --chain standalone-local
-```
-
-#### `--charlie`
-
-Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to keystore. Commonly
-used for development or local test networks.
-
-```sh filename="charlie" copy
-tangle-standalone --charlie
-```
-
-#### `--collator`
-
-Run node as collator. (Not applicable at this time.)
-
-Note that this is the same as running with `--validator`.
-
-```sh filename="collator" copy
-tangle-standalone --collator
-```
-
-#### `-d, --base-path `
-
-Specify custom base path.
-
-```sh filename="base path" copy
-tangle-standalone --base-path /data
-```
-
-#### `--db-cache `
-
-Limit the memory the database cache can use
-
-```sh filename="db-cache" copy
-tangle-standalone --db-cache 128
-```
-
-#### `--detailed-log-output`
-
-Enable detailed log output.
-
-This includes displaying the log target, log level and thread name.
-
-This is automatically enabled when something is logged with any higher level than
-`info`.
-
-```sh filename="log-output" copy
-tangle-standalone --detailed-log-output
-```
-
-#### `--dev`
-
-Specify the development chain.
-
-This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, `--alice`, and
-`--tmp` flags, unless explicitly overridden.
-
-```sh filename="dev" copy
-tangle-standalone --dev
-```
-
-#### `--execution `
-
-The execution strategy that should be used by all execution contexts
-
-[possible values: native, wasm, both, native-else-wasm]
-
-`native` - only execute with the native build
-`wasm` - only execute with the Wasm build
-`both` - execute with both native and Wasm builds
-`nativeelsewasm` - execute with the native build if possible and if it fails, then execute with Wasm
-
-```sh filename="wasm" copy
-tangle-standalone --execution wasm
-```
-
-#### `--force-authoring`
-
-Enable authoring even when offline
-
-```sh filename="authoring" copy
-tangle-parachain --force-authoring
-```
-
-#### `--keystore-path `
-
-Specify custom keystore path
-
-```sh filename="keystore path" copy
-tangle-standalone --keystore-path /tmp/chain/data/
-```
-
-#### `--keystore-uri `
-
-Specify custom URIs to connect to for keystore-services
-
-```sh filename="keystore url" copy
-tangle-standalone --keystore-uri foo://example.com:8042/over/
-```
-
-#### `--name `
-
-The human-readable name for this node.
-
-The node name will be reported to the telemetry server, if enabled.
-
-```sh filename="name" copy
-tangle-standalone --name zeus
-```
-
-#### `--node-key `
-
-The secret key to use for libp2p networking.
-
-The value is a string that is parsed according to the choice of `--node-key-type` as
-follows:
-
-`ed25519`: The value is parsed as a hex-encoded Ed25519 32 byte secret key, i.e. 64 hex
-characters.
-
-The value of this option takes precedence over `--node-key-file`.
-
-WARNING: Secrets provided as command-line arguments are easily exposed. Use of this
-option should be limited to development and testing. To use an externally managed secret
-key, use `--node-key-file` instead.
-
-```sh filename="node-key" copy
-tangle-standalone --node-key b6806626f5e4490c27a4ccffed4fed513539b6a455b14b32f58878cf7c5c4e68
-```
-
-#### `--node-key-file `
-
-The file from which to read the node's secret key to use for libp2p networking.
-
-The contents of the file are parsed according to the choice of `--node-key-type` as
-follows:
-
-`ed25519`: The file must contain an unencoded 32 byte or hex encoded Ed25519 secret key.
-
-If the file does not exist, it is created with a newly generated secret key of the
-chosen type.
-
-```sh filename="node-key-file" copy
-tangle-standalone --node-key-file ./node-keys-file/
-```
-
-#### `--port `
-
-Specify p2p protocol TCP port
-
-```sh filename="port" copy
-tangle-standalone --port 9944
-```
-
-#### `--prometheus-external`
-
-Expose Prometheus exporter on all interfaces.
-
-Default is local.
-
-```sh filename="prometheus" copy
-tangle-standalone --prometheus-external
-```
-
-#### `--prometheus-port `
-
-Specify Prometheus exporter TCP Port
-
-```sh filename="prometheus-port" copy
-tangle-standalone --prometheus-port 9090
-```
-
-#### `--rpc-cors `
-
-Specify browser Origins allowed to access the HTTP & WS RPC servers.
-
-A comma-separated list of origins (protocol://domain or special `null` value). Value of
-`all` will disable origin validation. Default is to allow localhost and
-https://polkadot.js.org origins. When running in --dev mode the default is to allow all origins.
-
-```sh filename="rpc-cors" copy
-tangle-standalone --rpc-cors "*"
-```
-
-#### `--rpc-external`
-
-Listen to all RPC interfaces.
-
-Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC
-proxy server to filter out dangerous methods. More details:
-https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs. Use
-`--unsafe-rpc-external` to suppress the warning if you understand the risks.
-
-```sh filename="rpc-external" copy
-tangle-standalone --rpc-external
-```
-
-#### `--rpc-port `
-
-Specify HTTP RPC server TCP port
-
-```sh filename="rpc-port" copy
-tangle-standalone --rpc-port 9933
-```
-
-#### `--state-pruning `
-
-Specify the state pruning mode, a number of blocks to keep or 'archive'.
-
-Default is to keep only the last 256 blocks, otherwise, the state can be kept for all of
-the blocks (i.e 'archive'), or for all of the canonical blocks (i.e
-'archive-canonical').
-
-```sh filename="state-pruning" copy
-tangle-standalone --state-pruning 128
-```
-
-#### `--telemetry-url `
-
-The URL of the telemetry server to connect to.
-
-This flag can be passed multiple times as a means to specify multiple telemetry
-endpoints. Verbosity levels range from 0-9, with 0 denoting the least verbosity.
-Expected format is 'URL VERBOSITY'.
-
-```sh filename="wss" copy
-tangle-standalone --telemetry-url 'wss://foo/bar 0'
-```
-
-#### `--validator`
-
-Enable validator mode.
-
-The node will be started with the authority role and actively participate in any
-consensus task that it can (e.g. depending on availability of local keys).
-
-```sh filename="validator" copy
-tangle-standalone --validator
-```
-
-#### `--wasm-execution `
-
-Method for executing Wasm runtime code
-
-[default: compiled]
-[possible values: interpreted-i-know-what-i-do, compiled]
-
-`compiled` - this is the default and uses the Wasmtime compiled runtime
-`interpreted-i-know-what-i-do` - uses the wasmi interpreter
-
-```sh filename="wasm-execution" copy
-tangle-standalone --wasm-execution compiled
-```
-
-#### `--ws-external`
-
-Listen to all Websocket interfaces.
-
-Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC
-proxy server to filter out dangerous methods. More details:
-https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs. Use
-`--unsafe-ws-external` to suppress the warning if you understand the risks.
-
-```sh filename="ws-external" copy
-tangle-standalone --ws-external
-```
-
-#### `--ws-port `
-
-Specify WebSockets RPC server TCP port
-
-```sh filename="ws-port" copy
-tangle-standalone --ws-port 9944
-```
-
-## Subcommands
-
-The following subcommands are available:
-
-USAGE:
-
-```sh filename="subcommand" copy
-tangle-standalone
-```
-
-| Subcommand | Description |
-| -------------------- | --------------------------------------------------------------------------------------------------- |
-| benchmark | Sub-commands concerned with benchmarking. The pallet benchmarking moved to the `pallet` sub-command |
-| build-spec | Build a chain specification |
-| check-block | Validate blocks |
-| export-blocks | Export blocks |
-| export-genesis-state | Export the genesis state of the standalone node |
-| export-genesis-wasm | Export the genesis wasm of the standalone node |
-| export-state | Export the state of a given block into a chain spec |
-| help | Print this message or the help of the given subcommand(s) |
-| import-blocks | Import blocks |
-| key | Key management cli utilities |
-| purge-chain | Remove the whole chain |
-| revert | Revert the chain to a previous state |
-| try-runtime | Try some testing command against a specified runtime state |
diff --git a/pages/docs/ecosystem-roles/validator/deploy-with-docker/_meta.json b/pages/docs/ecosystem-roles/validator/deploy-with-docker/_meta.json
deleted file mode 100644
index 1f5e039a..00000000
--- a/pages/docs/ecosystem-roles/validator/deploy-with-docker/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "full-node": "Full Node",
- "validator-node": "Validator Node",
- "relayer-node": "Relayer Node"
-}
diff --git a/pages/docs/ecosystem-roles/validator/deploy-with-docker/full-node.mdx b/pages/docs/ecosystem-roles/validator/deploy-with-docker/full-node.mdx
deleted file mode 100644
index 8efdb7d4..00000000
--- a/pages/docs/ecosystem-roles/validator/deploy-with-docker/full-node.mdx
+++ /dev/null
@@ -1,119 +0,0 @@
----
-title: Deploying with Docker
-description: Deploy a Tangle node with only a few steps.
----
-
-import Callout from "../../../../../components/Callout";
-
-# Deploying with Docker
-
-An Tangle node can be spun up quickly using Docker. For more information on installing Docker,
-please visit the official Docker [docs](https://docs.docker.com/get-docker/). When connecting to Tangle on Kusama, it will take a few days to completely
-sync the embedded relay chain. Make sure that your system meets the requirements which can read [at Hardware](/docs/tangle-network/node/validator/hardware).
-
-## Using Docker
-
-The quickest and easiest way to get started is to make use of our published Docker Tangle image. In doing so, users simply pull down the image from ghcr.io,
-set their keys, fetch the applicable chainspec and run the start command to get up and running.
-
-### **1. Pull the Tangle Docker image:**
-
-```sh filename="pull" copy
-# Only use "main" if you know what you are doing, it will use the latest and maybe unstable version of the node.
-
-docker pull ghcr.io/webb-tools/tangle/tangle-standalone:main
-```
-
-### **2. Create a local directory to store the chain data:**
-
-Let us create a directory where we will store all the data for our node. This includes the chain data, and logs.
-
-```sh filename="mkdir" copy
-mkdir /var/lib/tangle/
-```
-
-### **3. Fetch applicable chainspec(s):**
-
-To join the Tangle Test network, we need to fetch the appropriate chainspec for the Tangle network.
-Download the latest chainspec for standalone testnet:
-
-```sh filename="get chainspec" copy
-# Fetches chainspec for Tangle network
-wget https://raw.githubusercontent.com/webb-tools/tangle/main/chainspecs/testnet/tangle-standalone.json
-```
-
-Please make a reference where you have stored this `json` file as we will need it in the next steps.
-
-**Note:** Full nodes do not participate in block production or consensus so no required keys are necessary.
-
-**4. Start Tangle full node:**
-
-To start the node run the following command:
-
-```sh filename="docker run" copy
-docker run --rm -it -v /var/lib/tangle/:/data ghcr.io/webb-tools/tangle/tangle-standalone:main \
- --chain tangle-testnet \
- --name="YOUR-NODE-NAME" \
- --base-path /data \
- --rpc-cors all \
- --port 9946 \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-```
-
-
- For an overview of the above flags, please refer to the [CLI Usage](/docs/ecosystem-roles/validator/api-reference/cli/) page of our documentation.
-
-
-Once Docker pulls the necessary images, your Tangle node will start, displaying lots of information,
-such as the chain specification, node name, role, genesis state, and more.
-
-If you followed the installation instructions for Tangle, once synced, you will be connected to peers and see
-blocks being produced on the Tangle network! Note that in this case you need to also sync to the Polkadot/Kusama
-relay chain, which might take a few days.
-
-### Update the Client
-
-As Tangle development continues, it will sometimes be necessary to upgrade your node software. Node operators will be notified
-on our Discord channel when upgrades are available and whether they are necessary (some client upgrades are optional).
-The upgrade process is straightforward and is the same for a full node.
-
-1. Stop the docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-2. Get the latest version of Tangle from the Tangle GitHub Release [page](https://github.com/webb-tools/tangle/pkgs/container/tangle%2Ftangle-standalone)
-
-3. Pull the latest version of Tangle binary by doing `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:{VERSION_CODE}`.
- Example, if the latest version of Tangle is v0.1.2, then the command would be `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:v0.1.12`
-
-4. Restart the tangle container and you should have the updated version of the client.
-
-### Purge Your Node
-
-If you need a fresh instance of your Tangle node, you can purge your node by removing the associated data directory.
-
-You'll first need to stop the Docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-If you did not use the `-v` flag to specify a local directory for storing your chain data when you spun up your node, then the data folder is related to the Docker container itself. Therefore, removing the Docker container will remove the chain data.
-
-If you did spin up your node with the `-v` flag, you will need to purge the specified directory. For example, for the suggested data directly, you can run the following command to purge your parachain node data:
-
-```sh filename="rm" copy
-# purges standalone data
-sudo rm -rf /data/chains/*
-```
-
-If you ran with parachain node you can run the following command to purge your relay-chain node data:
-
-```sh filename="rm" copy
-# purges relay chain data
-sudo rm -rf /data/polkadot/*
-```
-
-Now that your chain data has been purged, you can start a new node with a fresh data directory!
diff --git a/pages/docs/ecosystem-roles/validator/deploy-with-docker/relayer-node.mdx b/pages/docs/ecosystem-roles/validator/deploy-with-docker/relayer-node.mdx
deleted file mode 100644
index 037f90ca..00000000
--- a/pages/docs/ecosystem-roles/validator/deploy-with-docker/relayer-node.mdx
+++ /dev/null
@@ -1,268 +0,0 @@
----
-title: Deploying with Docker
-description: An overview of Webb Tangle node and Webb Relayer deployment process.
----
-
-import Callout from "../../../../../components/Callout";
-
-# Deploying Tangle Validator and Relayer
-
-It is likely that network participants that are running a Tangle validator node may also want to run a relayer node. This guide
-will walk you through the process of deploying a Tangle validator and a Webb Relayer. By the end of this document, you will have set up a Webb Relayer
-at a publicly accessible endpoint alongside a Tangle validator node both of which will be running within a Docker container.
-
-## Prerequisites
-
-It is a requirement to have Docker installed on the Linux machine, for instructions on how to install Docker on the machine
-please visit the offical Docker installation documentation [here](https://docs.docker.com/desktop/install/linux-install/).
-
-When connecting to Tangle on Kusama, it will take a few days to completely
-sync the embedded relay chain. Make sure that your system meets the requirements which can read [on the Hardware page.](/docs/tangle-network/node/validator/hardware).
-
-## Using Docker Compose
-
-The quickest and easiest way to get started is to make use of our published Docker Tangle image. In doing so, users simply
-create a local directory to store the chain data, download the latest chainspec for standalone testnet, set their keys, and run the start
-command to get up and running.
-
-### **1. Pull the Tangle Docker image:**
-
-We will used the pre-built Tangle Docker image to generate and insert the required keys for our node.
-
-```sh filename="pull" copy
-# Only use "main" if you know what you are doing, it will use the latest and maybe unstable version of the node.
-
-docker pull ghcr.io/webb-tools/tangle/tangle-standalone:main
-```
-
-### **2. Create a local directory to store the chain data:**
-
-Let us create a directory where we will store all the data for our node. This includes the chain data, keys, and logs.
-
-```sh filename="mkdir" copy
-mkdir /var/lib/tangle/
-```
-
-### **3. Generate and store keys:**
-
-We need to generate the required keys for our node. For more information on these keys, please see the [Required Keys]() section.
-The keys we need to generate include the following:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-
-Let's now insert our required secret keys, we will not pass the SURI in the command, instead it will be interactive, where you
-should paste your SURI when the command asks for it.
-
-**Account Keys**
-
-```sh filename="Acco" copy
-# it will ask for your suri, enter it.
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type acco
-```
-
-**Aura Keys**
-
-```sh filename="Aura" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type aura
-```
-
-**Im-online Keys** - **these keys are optional**
-
-```sh filename="Imonline" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type imon
-```
-
-**DKG Keys**
-
-```sh filename="DKG" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ecdsa \
- --key-type wdkg
-```
-
-**Grandpa Keys**
-
-```sh filename="Grandpa" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ed25519 \
- --key-type gran
-```
-
-To ensure you have successfully generated the keys correctly run:
-
-```sh filename="ls" copy
-ls ~/webb/tangle/chains/*/keystore
-# You should see a some file(s) there, these are the keys.
-```
-
-### **4.Creating Docker compose file:**
-
-Now that we have generated the keys, we can start the Tangle Validator and Relayer. We will use the `docker-compose` file provided
-in the [Tangle repo](/docs/ecosystem-roles/validator/deploy-with-docker/relayer-node/).
-
-Let's start by creating a docker-compose file:
-
-```sh filename="nano" copy
-nano ~/webb/tangle/docker-compose.yml
-```
-
-Add the following lines:
-
-```yaml filename="docker-compose.yml" copy
-# This an example of a docker compose file which contains both Relayer and Tangle Node.
-version: "3"
-
-services:
- webb_relayer:
- # Here you should checkout
- # https://github.com/webb-tools/relayer/pkgs/container/relayer/versions?filters%5Bversion_type%5D=tagged
- # For the latest stable version. Only use "edge" if
- # you know what you are doing, it will use the latest and maybe
- # unstable version of the relayer.
- image: ghcr.io/webb-tools/relayer:${RELAYER_RELEASE_VERSION}
- container_name: webb_relayer
- env_file: .env
- depends_on:
- - caddy
- ports:
- - "$WEBB_PORT:$WEBB_PORT"
- volumes:
- - $PWD/config:/config
- - relayer_data:/store
- restart: always
- command: /webb-relayer -vvv -c /config
-
- tangle_standalone:
- # Here you should checkout
- # https://github.com/webb-tools/tangle/pkgs/container/tangle-standalone/versions?filters%5Bversion_type%5D=tagged
- # For the latest stable version. Only use "main" if
- # you know what you are doing, it will use the latest and maybe
- # unstable version of the node.
- image: ghcr.io/webb-tools/tangle/tangle-standalone:${TANGLE_RELEASE_VERSION}
- container_name: tangle_standalone
- env_file: .env
- ports:
- - "30333:30333"
- - "9933:9933"
- - "9944:9944"
- - "9615:9615"
- volumes:
- - tangle_data:/data
- restart: always
- entrypoint: /tangle-standalone
- command:
- [
- "--base-path=/data",
- "--validator",
- "--chain=/data/chainspecs/tangle-standalone.json",
- "--",
- "--execution=wasm",
- ]
-
-volumes:
- relayer_data:
- driver: local
- driver_opts:
- type: none
- o: bind
- device: $PWD/relayer/data
- tangle_data:
- driver: local
- driver_opts:
- type: none
- o: bind
- device: $PWD/tangle/
-```
-
-### **5. Set environment variables:**
-
-Prior to spinning up the Docker containers, we need to set some environment variables. Below displays an example `.env` file
-but you will need to update to reflect your own environment.
-
-```sh filename="export variables" copy
-export TANGLE_RELEASE_VERSION=main
-export RELAYER_RELEASE_VERSION=0.5.0-rc1
-export BASE_PATH=/tmp/data/
-export CHAINSPEC_PATH=/tmp/chainspec
-export WEBB_PORT=9955
-```
-
-### **5. Start Relayer and Validator node:**
-
-With our keys generated and our docker-compose file created, we can now start the relayer and validator node.
-
-```sh filename="compose up" copy
-docker compose up -d
-```
-
-The `docker-compose` file will spin up a container running Tangle validator node and another running a Webb Relayer.
-
-## Update the Client
-
-As Tangle development continues, it will sometimes be necessary to upgrade your node software. Node operators will be notified
-on our Discord channel when upgrades are available and whether they are necessary (some client upgrades are optional).
-The upgrade process is straightforward and is the same for a full node or validator.
-
-1. Stop the docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-2. Get the latest version of Tangle from the Tangle GitHub Release page
-
-3. Use the latest version to spin up your node. To do so, replace the version in the Full Node or validator command with the latest and run it
-
-Once your node is running again, you should see logs in your terminal.
-
-## Purge Your Node
-
-If you need a fresh instance of your Tangle node, you can purge your node by removing the associated data directory.
-
-You'll first need to stop the Docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-If you did not use the `-v` flag to specify a local directory for storing your chain data when you spun up your node, then the data folder is related to the Docker container itself. Therefore, removing the Docker container will remove the chain data.
-
-If you did spin up your node with the `-v` flag, you will need to purge the specified directory. For example, for the suggested data directly, you can run the following command to purge your parachain node data:
-
-```sh filename="rm" copy
-# purges standalone data
-sudo rm -rf /data/chains/*
-```
-
-If you ran with parachain node you can run the following command to purge your relay-chain node data:
-
-```sh filename="rm" copy
-# purges relay chain data
-sudo rm -rf /data/polkadot/*
-```
-
-Now that your chain data has been purged, you can start a new node with a fresh data directory!
diff --git a/pages/docs/ecosystem-roles/validator/deploy-with-docker/validator-node.mdx b/pages/docs/ecosystem-roles/validator/deploy-with-docker/validator-node.mdx
deleted file mode 100644
index 6961a91f..00000000
--- a/pages/docs/ecosystem-roles/validator/deploy-with-docker/validator-node.mdx
+++ /dev/null
@@ -1,239 +0,0 @@
----
-title: Deploying with Docker
-description: Deploy a Tangle validator node with only a few steps.
----
-
-import Callout from "../../../../../components/Callout";
-
-# Deploying with Docker
-
-A Tangle node can be spun up quickly using Docker. For more information on installing Docker,
-please visit the official Docker [docs](https://docs.docker.com/get-docker/). When connecting to Tangle on Kusama, it will take a few days to completely sync the embedded relay chain. Make sure that your system meets the requirements which can read [here](/docs/tangle-network/node-operators/requirements).
-
-## Standalone Testnet
-
-### **1. Pull the Tangle Docker image:**
-
-Although we can make use of the provided `docker-compose` file in the [Tangle repo](https://github.com/webb-tools/tangle/tree/main/docker/tangle-standalone), we pull the `tangle-standalone:main` Docker image from ghcr.io
-so that we can generate and insert our required keys before starting the node.
-
-```sh filename="pull" copy
-# Only use "main" if you know what you are doing, it will use the latest and maybe unstable version of the node.
-
-docker pull ghcr.io/webb-tools/tangle/tangle-standalone:main
-```
-
-### **2. Create a local directory to store the chain data:**
-
-Let us create a directory where we will store all the data for our node. This includes the chain data, keys, and logs.
-
-```sh filename="mkdir" copy
-mkdir /var/lib/tangle/
-```
-
-### **3. Fetch applicable chainspec(s):**
-
-To join the Tangle Test network as node operator we need to fetch the appropriate chainspec for the Tangle network.
-Download the latest chainspec for standalone testnet:
-
-```sh filename="get chainspec" copy
-# Fetches chainspec for Tangle network
-wget https://raw.githubusercontent.com/webb-tools/tangle/main/chainspecs/testnet/tangle-standalone.json
-```
-
-Please make a reference where you have stored this `json` file as we will need it in the next steps.
-
-### **4. Generate and store keys:**
-
-We need to generate the required keys for our node.
-The keys we need to generate include the following:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-- ImOnline key (Sr25519)
-
-Let's now insert our required secret keys, we will not pass the SURI in the command, instead it will be interactive, where you
-should paste your SURI when the command asks for it.
-
-**Account Keys**
-
-```sh filename="Acco" copy
-# it will ask for your suri, enter it.
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type acco
-```
-
-**Aura Keys**
-
-```sh filename="Aura" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type aura
-```
-
-**Im-online Keys** - **these keys are optional (required if you are running as a validator)**
-
-```sh filename="Imonline" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type imon
-```
-
-**DKG Keys**
-
-```sh filename="DKG" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ecdsa \
- --key-type wdkg
-```
-
-**Grandpa Keys**
-
-```sh filename="Grandpa" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ed25519 \
- --key-type gran
-```
-
-To ensure you have successfully generated the keys correctly run:
-
-```sh filename="ls" copy
-ls ~/webb/tangle/chains/*/keystore
-# You should see a some file(s) there, these are the keys.
-```
-
-**Caution:** Ensure you insert the keys using the instructions at [generate keys](#generate-and-store-keys),
-if you want the node to auto generate the keys, add the `--auto-insert-keys` flag.
-
-### **5. Start Tangle Validator node:**
-
-To start the node run the following command:
-
-```sh filename="docker run" copy
-docker run --platform linux/amd64 --network="host" -v "/var/lib/data" --entrypoint ./tangle-standalone \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
---base-path=/data \
---chain tangle-testnet \
---name="YOUR-NODE-NAME" \
---execution wasm \
---wasm-execution compiled \
---trie-cache-size 0 \
---validator \
---telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-```
-
-
- For an overview of the above flags, please refer to the [CLI Usage](/docs/ecosystem-roles/validator/api-reference/cli/) page of our documentation.
-
-
-Once Docker pulls the necessary images, your Tangle node will start, displaying lots of information,
-such as the chain specification, node name, role, genesis state, and more.
-
-If you followed the installation instructions for Tangle, once synced, you will be connected to peers and see
-blocks being produced on the Tangle network!
-
-```sh filename="logs"
-2023-03-22 14:55:51 Tangle Standalone Node
-2023-03-22 14:55:51 ✌️ version 0.1.15-54624e3-aarch64-macos
-2023-03-22 14:55:51 ❤️ by Webb Technologies Inc., 2017-2023
-2023-03-22 14:55:51 📋 Chain specification: Tangle Testnet
-2023-03-22 14:55:51 🏷 Node name: cooing-morning-2891
-2023-03-22 14:55:51 👤 Role: FULL
-2023-03-22 14:55:51 💾 Database: RocksDb at /Users/local/Library/Application Support/tangle-standalone/chains/local_testnet/db/full
-2023-03-22 14:55:51 ⛓ Native runtime: tangle-standalone-115 (tangle-standalone-1.tx1.au1)
-2023-03-22 14:55:51 Bn254 x5 w3 params
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 new validator set of size 5 has been processed for era 1
-2023-03-22 14:55:52 🔨 Initializing Genesis block/state (state: 0xfd16…aefd, header-hash: 0x7c05…a27d)
-2023-03-22 14:55:52 👴 Loading GRANDPA authority set from genesis on what appears to be first startup.
-2023-03-22 14:55:53 Using default protocol ID "sup" because none is configured in the chain specs
-2023-03-22 14:55:53 🏷 Local node identity is: 12D3KooWDaeXbqokqvEMqpJsKBvjt9BUz41uP9tzRkYuky1Wat7Z
-2023-03-22 14:55:53 💻 Operating system: macos
-2023-03-22 14:55:53 💻 CPU architecture: aarch64
-2023-03-22 14:55:53 📦 Highest known block at #0
-2023-03-22 14:55:53 〽️ Prometheus exporter started at 127.0.0.1:9615
-2023-03-22 14:55:53 Running JSON-RPC HTTP server: addr=127.0.0.1:9933, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 Running JSON-RPC WS server: addr=127.0.0.1:9944, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.0.125/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.0.125/tcp/30305
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.88.12/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.88.12/tcp/30305
-```
-
-### Run via Docker Compose
-
-The docker-compose file will spin up a container running Tangle standalone node, but you have to set the following environment variables. Remember to customize your the values depending on your environment and then copy paste this to CLI.
-
-```sh filename="set variables" copy
-RELEASE_VERSION=main
-CHAINSPEC_PATH=/tmp/chainspec/
-```
-
-After that run:
-
-```sh filename="compose up" copy
-docker compose up -d
-```
-
-## Update the Client
-
-As Tangle development continues, it will sometimes be necessary to upgrade your node software. Node operators will be notified
-on our Discord channel when upgrades are available and whether they are necessary (some client upgrades are optional).
-The upgrade process is straightforward and is the same for a full node.
-
-1. Stop the docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-2. Get the latest version of Tangle from the Tangle GitHub Release [page](https://github.com/webb-tools/tangle/pkgs/container/tangle%2Ftangle-standalone)
-
-3. Pull the latest version of Tangle binary by doing `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:{VERSION_CODE}`.
- Example, if the latest version of Tangle is v0.1.2, then the command would be `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:v0.1.12`
-
-4. Restart the tangle container and you should have the updated version of the client.
-
-Once your node is running again, you should see logs in your terminal.
-
-## Purge Your Node
-
-If you need a fresh instance of your Tangle node, you can purge your node by removing the associated data directory.
-
-You'll first need to stop the Docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-If you did not use the `-v` flag to specify a local directory for storing your chain data when you spun up your node, then the data folder is related to the Docker container itself. Therefore, removing the Docker container will remove the chain data.
-
-If you did spin up your node with the `-v` flag, you will need to purge the specified directory. For example, for the suggested data directly, you can run the following command to purge your standalone node data:
-
-```sh filename="rm" copy
-# purges standalone data
-sudo rm -rf /data/chains/*
-```
-
-Now that your chain data has been purged, you can start a new node with a fresh data directory!
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/_meta.json b/pages/docs/ecosystem-roles/validator/monitoring/_meta.json
deleted file mode 100644
index e8491a85..00000000
--- a/pages/docs/ecosystem-roles/validator/monitoring/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "quickstart": "Quickstart",
- "prometheus": "Prometheus",
- "alert-manager": "AlertManager",
- "grafana": "Grafana Dashboard",
- "loki": "Loki Log Manager"
-}
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/alert-manager.mdx b/pages/docs/ecosystem-roles/validator/monitoring/alert-manager.mdx
deleted file mode 100644
index a6b4664a..00000000
--- a/pages/docs/ecosystem-roles/validator/monitoring/alert-manager.mdx
+++ /dev/null
@@ -1,342 +0,0 @@
----
-title: Alert Manager Setup
-description: Create alerts to notify the team when issues arise.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Alert Manager Setup
-
-The following is a guide outlining the steps to setup AlertManager to send alerts when a Tangle node or DKG is being disrupted. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/).
-
-In this guide we will configure the following modules to send alerts from a running Tangle node.
-
-- **Alert Manager** listens to Prometheus metrics and pushes an alert as soon as a threshold is crossed (CPU % usage for example).
-
-## What is Alert Manager?
-
-The Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of deduplicating, grouping,
-and routing them to the correct receiver integration such as email, PagerDuty, or OpsGenie. It also takes care of silencing and
-inhibition of alerts. To learn more about Alertmanager, please
-visit the official docs site [here](https://prometheus.io/docs/alerting/latest/alertmanager/).
-
-### Getting Started
-
-Let's first start by downloading the latest releases of the above mentioned modules (Alertmanager).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine. As well as,
- the user has already configured Prometheus on this machine.
-
-
-**1. Download Alertmanager**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.darwin-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.darwin-arm64.tar.gz
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.linux-arm64.tar.gz &&
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/prometheus/prometheus/releases).
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.windows-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.windows-arm64.tar.gz
- ```
-
-
-
-
-**2. Extract the Downloaded Files:**
-
-Run the following command:
-
-```sh filename="tar" copy
-tar xvf alertmanager-*.tar.gz
-```
-
-**3. Copy the Extracted Files into `/usr/local/bin`:**
-
-
- **Note:** The example below makes use of the `linux-amd64` installations, please update to make use of the target system you have installed.
-
-
-Copy the `alertmanager` binary and `amtool`:
-
-```sh filename="cp" copy
-sudo cp ./alertmanager-*.linux-amd64/alertmanager /usr/local/bin/ &&
-sudo cp ./alertmanager-*.linux-amd64/amtool /usr/local/bin/
-```
-
-**4. Create Dedicated Users:**
-
-Now we want to create dedicated users for the Alertmanager module we have installed:
-
-```sh filename="useradd" copy
-sudo useradd --no-create-home --shell /usr/sbin/nologin alertmanager
-```
-
-**5. Create Directories for `Alertmanager`:**
-
-```sh filename="mkdir" copy
-sudo mkdir /etc/alertmanager &&
-sudo mkdir /var/lib/alertmanager
-```
-
-**6. Change the Ownership for all Directories:**
-
-We need to give our user permissions to access these directories:
-
-**alertManager**:
-
-```sh filename="chown" copy
-sudo chown alertmanager:alertmanager /etc/alertmanager/ -R &&
-sudo chown alertmanager:alertmanager /var/lib/alertmanager/ -R &&
-sudo chown alertmanager:alertmanager /usr/local/bin/alertmanager &&
-sudo chown alertmanager:alertmanager /usr/local/bin/amtool
-```
-
-**7. Finally, let's clean up these directories:**
-
-```sh filename="rm" copy
-rm -rf ./alertmanager*
-```
-
-Great! You have now installed and setup your environment. The next series of steps will be configuring the service.
-
-## Configuration
-
-If you are interested to see how we configure the Tangle Network nodes for monitoring check out https://github.com/webb-tools/tangle/tree/main/monitoring.
-
-### Prometheus
-
-The first thing we need to do is add `rules.yml` file to our Prometheus configuration:
-
-Let’s create the `rules.yml` file that will give the rules for Alert manager:
-
-```sh filename="nano" copy
-sudo touch /etc/prometheus/rules.yml
-sudo nano /etc/prometheus/rules.yml
-```
-
-We are going to create 2 basic rules that will trigger an alert in case the instance is down or the CPU usage crosses 80%.
-You can create all kinds of rules that can triggered, for an exhausted list of rules see our rules list [here](https://github.com/webb-tools/tangle/blob/main/monitoring/prometheus/rules.yml).
-
-Add the following lines and save the file:
-
-```sh filename="group" copy
-groups:
- - name: alert_rules
- rules:
- - alert: InstanceDown
- expr: up == 0
- for: 5m
- labels:
- severity: critical
- annotations:
- summary: "Instance $labels.instance down"
- description: "[{{ $labels.instance }}] of job [{{ $labels.job }}] has been down for more than 1 minute."
-
- - alert: HostHighCpuLoad
- expr: 100 - (avg by(instance)(rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80
- for: 0m
- labels:
- severity: warning
- annotations:
- summary: Host high CPU load (instance bLd Kusama)
- description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
-```
-
-The criteria for triggering an alert are set in the `expr:` part. You can customize these triggers as you see fit.
-
-Then, check the rules file:
-
-```yaml filename="promtool rules" copy
-promtool check rules /etc/prometheus/rules.yml
-```
-
-And finally, check the Prometheus config file:
-
-```yaml filename="promtool check" copy
-promtool check config /etc/prometheus/prometheus.yml
-```
-
-### Gmail setup
-
-We can use a Gmail address to send the alert emails. For that, we will need to generate an app password from our Gmail account.
-
-Note: we recommend you here to use a dedicated email address for your alerts. Review Google's own guide for
-proper set up [here](https://support.google.com/mail/answer/185833?hl=en).
-
-### Slack notifications
-
-We can also utilize Slack notifications to send the alerts through. For that we need to a specific Slack channel to send the notifications to, and
-to install Incoming WebHooks Slack application.
-
-To do so, navigate to:
-
-1. Administration > Manage Apps.
-2. Search for "Incoming Webhooks"
-3. Install into your Slack workspace.
-
-### Alertmanager
-
-The Alert manager config file is used to set the external service that will be called when an alert is triggered. Here, we are going to use the Gmail and Slack notification created previously.
-
-Let’s create the file:
-
-```sh filename="nano" copy
-sudo touch /etc/alertmanager/alertmanager.yml
-sudo nano /etc/alertmanager/alertmanager.yml
-```
-
-And add the Gmail configuration to it and save the file:
-
-```sh filename="Gmail config" copy
-global:
- resolve_timeout: 1m
-
-route:
- receiver: 'gmail-notifications'
-
-receivers:
-- name: 'gmail-notifications'
- email_configs:
- - to: 'EMAIL-ADDRESS'
- from: 'EMAIL-ADDRESS'
- smarthost: 'smtp.gmail.com:587'
- auth_username: 'EMAIL-ADDRESS'
- auth_identity: 'EMAIL-ADDRESS'
- auth_password: 'EMAIL-ADDRESS'
- send_resolved: true
-
-
-# ********************************************************************************************************************************************
-# Alert Manager for Slack Notifications *
-# ********************************************************************************************************************************************
-
- global:
- resolve_timeout: 1m
- slack_api_url: 'INSERT SLACK API URL'
-
- route:
- receiver: 'slack-notifications'
-
- receivers:
- - name: 'slack-notifications'
- slack_configs:
- - channel: 'channel-name'
- send_resolved: true
- icon_url: https://avatars3.githubusercontent.com/u/3380462
- title: |-
- [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
- {{- if gt (len .CommonLabels) (len .GroupLabels) -}}
- {{" "}}(
- {{- with .CommonLabels.Remove .GroupLabels.Names }}
- {{- range $index, $label := .SortedPairs -}}
- {{ if $index }}, {{ end }}
- {{- $label.Name }}="{{ $label.Value -}}"
- {{- end }}
- {{- end -}}
- )
- {{- end }}
- text: >-
- {{ range .Alerts -}}
- *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
- *Description:* {{ .Annotations.description }}
- *Details:*
- {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
- {{ end }}
- {{ end }}
-```
-
-Of course, you have to change the email addresses and the auth_password with the one generated from Google previously.
-
-## Service Setup
-
-### Alert manager
-
-Create and open the Alert manager service file:
-
-```sh filename="create service" copy
-sudo tee /etc/systemd/system/alertmanager.service > /dev/null << EOF
-[Unit]
- Description=AlertManager Server Service
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=alertmanager
- Group=alertmanager
- Type=simple
- ExecStart=/usr/local/bin/alertmanager \
- --config.file /etc/alertmanager/alertmanager.yml \
- --storage.path /var/lib/alertmanager \
- --web.external-url=http://localhost:9093 \
- --cluster.advertise-address='0.0.0.0:9093'
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-## Starting the Services
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="daemon-reload" copy
-sudo systemctl daemon-reload
-```
-
-Next, we will want to start the alertManager service:
-
-**alertManager**:
-
-```sh filename="start service" copy
-sudo systemctl start alertmanager.service
-```
-
-And check that they are working fine:
-
-**alertManager**::
-
-```sh filename="status" copy
-sudo systemctl status alertmanager.service
-```
-
-If everything is working adequately, activate the services!
-
-**alertManager**:
-
-```sh filename="enable" copy
-sudo systemctl enable alertmanager.service
-```
-
-Amazing! We have now successfully added alert monitoring for our Tangle node!
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/grafana.mdx b/pages/docs/ecosystem-roles/validator/monitoring/grafana.mdx
deleted file mode 100644
index 916cb9ac..00000000
--- a/pages/docs/ecosystem-roles/validator/monitoring/grafana.mdx
+++ /dev/null
@@ -1,193 +0,0 @@
----
-title: Grafana Dashboard Setup
-description: Create visual dashboards for the metrics captured by Prometheus.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Grafana Setup
-
-The following is a guide outlining the steps to setup Grafana Dashboard to visualize metric data for a Tangle node. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/).
-
-In this guide we will configure the following modules to visualize metric data from a running Tangle node.
-
-- **Grafana** is the visual dashboard tool that we access from the outside (through SSH tunnel to keep the node secure).
-
-## What are Grafana Dashboards?
-
-A dashboard is a set of one or more panels organized and arranged into one or more rows. Grafana ships with a variety of panels making it easy to
-construct the right queries, and customize the visualization so that you can create the perfect dashboard for your need. Each panel can interact
-with data from any configured Grafana data source. To learn more about Grafana Dashboards, please
-visit the official docs site [here](https://grafana.com/docs/grafana/latest/dashboards/).
-
-### Getting Started
-
-Let's first start by downloading the latest releases of the above mentioned modules (Grafana).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine. As well as,
- the user has already configured Prometheus on this machine.
-
-
-**1. Download Grafana**
-
-
-
-
- ```sh filename="brew" copy
- brew update
- brew install grafana
- ```
-
-
-
-
- ```sh filename="linux" copy
- sudo apt-get install -y apt-transport-https
- sudo apt-get install -y software-properties-common wget
- wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -
- ```
-
- For other linux distrubutions please visit official release page [here](https://grafana.com/grafana/download?edition=oss&platform=linux).
-
-
-
-
-**2. Add Grafana repository to APT sources:**
-
-
- This guide assumes the user is installing and configuring Grafana for a linux machine. For Macos instructions
- please visit the offical docs [here](https://grafana.com/docs/grafana/v9.0/setup-grafana/installation/mac/).
-
-
-```sh filename="add-apt" copy
-sudo add-apt-repository "deb https://packages.grafana.com/oss/deb stable main"
-```
-
-**3. Refresh your APT cache to update your package lists:**
-
-```sh filename="apt update" copy
-sudo apt update
-```
-
-**4. Next, make sure Grafana will be installed from the Grafana repository:**
-
-```sh filename="apt-cache" copy
-apt-cache policy grafana
-```
-
-The output of the previous command tells you the version of Grafana that you are about to install, and where you will retrieve the package from. Verify that the installation candidate at the top of the list will come from the official Grafana repository at `https://packages.grafana.com/oss/deb`.
-
-```sh filename="output"
-Output of apt-cache policy grafana
-grafana:
- Installed: (none)
- Candidate: 6.3.3
- Version table:
- 6.3.3 500
- 500 https://packages.grafana.com/oss/deb stable/main amd64 Packages
-...
-```
-
-**5. You can now proceed with the installation:**
-
-```sh filename="install grafana" copy
-sudo apt install grafana
-```
-
-**6. Install the Alert manager plugin for Grafana:**
-
-```sh filename="grafana-cli" copy
-sudo grafana-cli plugins install camptocamp-prometheus-alertmanager-datasource
-```
-
-## Service Setup
-
-### Grafana
-
-The Grafana’s service is automatically created during extraction of the deb package, you do not need to create it manually.
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="daemon-reload" copy
-sudo systemctl daemon-reload
-```
-
-**Start the Grafana service:**
-
-```sh filename="start service" copy
-sudo systemctl start grafana-server
-```
-
-And check that they are working fine, one by one:
-
-```sh filename="status" copy
-systemctl status grafana-server
-```
-
-If everything is working adequately, activate the services!
-
-```sh filename="enable" copy
-sudo systemctl enable grafana-server
-```
-
-## Run Grafana dashboard
-
-Now we are going to setup the dashboard to visiualize the metrics we are capturing.
-
-From the browser on your local machine, navigate to `http://localhost:3000/login`. You should be greeted with
-a login screen. You can login with the default credentials, `admin/admin`. Be sure to update your password afterwards.
-
-
- This guide assumes the user has configured Prometheus, AlertManager, and Loki as a data source.
-
-
-**Next, we need to add Prometheus as a data source.**
-
-1. Open the Settings menu
-2. Select **Data Sources**
-3. Select **Add Data Source**
-4. Select Prometheus
-5. Input the URL field with http://localhost:9090
-6. Click Save & Test
-
-**Next, we need to add AlertManager as a data source.**
-
-1. Open the Settings menu
-2. Select **Data Sources**
-3. Select **Add Data Source**
-4. Select AlertManager
-5. Input the URL field with http://localhost:9093
-6. Click Save & Test
-
-**Next, we need to add Loki as a data source.**
-
-1. Open the Settings menu
-2. Select **Data Sources**
-3. Select **Add Data Source**
-4. Select Loki
-5. Input the URL field with http://localhost:3100
-6. Click Save & Test
-
-We have our data sources connected, now its time to import the dashboard we want to use. You may
-create your own or import others, but the purposes of this guide we will use the Polkadot Essentials dashboard created
-by bLD nodes!
-
-**To import a dashboard:**
-
-1. Select the + button
-2. Select **Import**
-3. Input the dashboard number, **13840**
-4. Select Prometheus and AlertManager as data sources from the dropdown menu
-5. Click Load
-
-**In the dashboard selection, make sure you select:**
-
-- **Chain Metrics**: substrate
-- **Chain Instance Host**: localhost:9615 to point the chain data scrapper
-- **Chain Process Name**: the name of your node binary
-
-Congratulations!! You have now configured Grafana to visualize the metrics we are capturing. You now
-have monitoring setup for your node!
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/loki.mdx b/pages/docs/ecosystem-roles/validator/monitoring/loki.mdx
deleted file mode 100644
index 31d92fa6..00000000
--- a/pages/docs/ecosystem-roles/validator/monitoring/loki.mdx
+++ /dev/null
@@ -1,334 +0,0 @@
----
-title: Loki Log Management
-description: A service dedidated to aggregate and query system logs.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Loki Log Management
-
-The following is a guide outlining the steps to setup Loki for log management of a Tangle node. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/).
-
-In this guide we will configure the following modules to scrape metrics from the running Tangle node.
-
-- **Loki** provides log aggregation system and metrics.
-- **Promtail** is the agent responsible for gathering logs, and sending them to Loki.
-
-Let's first start by downloading the latest releases of the above mentioned modules (Loki, Promtail download pages).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine.
-
-
-**1. Download Loki**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-darwin-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-darwin-arm64.zip"
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-linux-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-linux-arm64.zip"
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/grafana/loki/releases).
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-windows-amd64.exe.zip"
- ```
-
-
-
-
-**2. Download Promtail**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-darwin-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-darwin-arm64.zip"
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-linux-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-linux-arm64.zip"
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-windows-amd64.exe.zip"
- ```
-
-
-
-
-**3. Extract the Downloaded Files:**
-
-```sh filename="unzip" copy
-unzip "loki-linux-amd64.zip" &&
-unzip "promtail-linux-amd64.zip"
-```
-
-**4. Copy the Extracted Files into `/usr/local/bin`:**
-
-```sh filename="cp" copy
-sudo cp loki-linux-amd64 /usr/local/bin/ &&
-sudo cp promtail-linux-amd64 /usr/local/bin/
-```
-
-**5. Create Dedicated Users:**
-
-Now we want to create dedicated users for each of the modules we have installed:
-
-```sh filename="useradd" copy
-sudo useradd --no-create-home --shell /usr/sbin/nologin loki &&
-sudo useradd --no-create-home --shell /usr/sbin/nologin promtail
-```
-
-**6. Create Directories for `loki`, and `promtail`:**
-
-```sh filename="mkdir" copy
-sudo mkdir /etc/loki &&
-sudo mkdir /etc/promtail
-```
-
-**7. Change the Ownership for all Directories:**
-
-We need to give our user permissions to access these directories:
-
-```sh filename="chown" copy
-sudo chown loki:loki /usr/local/bin/loki-linux-amd64 &&
-sudo chown promtail:promtail /usr/local/bin/promtail-linux-amd64
-```
-
-**9. Finally, let's clean up these directories:**
-
-```sh filename="rm" copy
-rm -rf ./loki-linux-amd64* &&
-rm -rf ./promtail-linux-amd64*
-```
-
-The next series of steps will be configuring each service.
-
-## Configuration
-
-If you are interested to see how we configure the Tangle Network nodes for monitoring check out https://github.com/webb-tools/tangle/tree/main/monitoring.
-
-### Loki
-
-Loki's configuration details what ports to listen to, how to store the logs, and other configuration options.
-There are many other config options for Loki, and you can read more about Loki configuration at: https://grafana.com/docs/loki/latest/configuration/
-
-Let’s create the file:
-
-```sh filename="nano" copy
-sudo touch /etc/loki/config.yml
-sudo nano /etc/loki/config.yml
-```
-
-```yaml filename="config.yaml" copy
-auth_enabled: false
-
-server:
- http_listen_port: 3100
- grpc_listen_port: 9096
-
-ingester:
- lifecycler:
- address: 127.0.0.1
- ring:
- kvstore:
- store: inmemory
- replication_factor: 1
- final_sleep: 0s
- chunk_idle_period: 5m
- chunk_retain_period: 30s
- max_transfer_retries: 0
-
-schema_config:
- configs:
- - from: 2020-10-24
- store: boltdb-shipper
- object_store: filesystem
- schema: v11
- index:
- prefix: index_
- period: 168h
-
-
-storage_config:
- boltdb:
- directory: /data/loki/index
-
- filesystem:
- directory: /data/loki/chunks
-
-limits_config:
- enforce_metric_name: false
- reject_old_samples: true
- reject_old_samples_max_age: 168h
-
-chunk_store_config:
- max_look_back_period: 0s
-
-table_manager:
- retention_deletes_enabled: false
- retention_period: 0
-```
-
-### Promtail
-
-The Promtail configuration details what logs to send to Loki. In the below configuration we are indicating
-to send the logs to Loki from the `/var/log/dkg` directory. This directory can be changed based on what logs you
-want to pick up. There are many other config options for Promtail, and you can read more about Promtail configuration at: https://grafana.com/docs/loki/latest/clients/promtail/configuration/
-
-Let’s create the file:
-
-```sh filename="nano" copy
-sudo touch /etc/promtail/config.yml
-sudo nano /etc/promtail/config.yml
-```
-
-```yaml filename="config.yaml" copy
-server:
- http_listen_port: 9080
- grpc_listen_port: 0
-
-positions:
- filename: /data/loki/positions.yaml
-
-clients:
- - url: http://localhost:3100/loki/api/v1/push
-
-scrape_configs:
-- job_name: system
- static_configs:
- - targets:
- - localhost
- labels:
- job: varlogs
- __path__: /var/log/dkg/*log
-```
-
-## Service Setup
-
-### Loki
-
-Create and open the Loki service file:
-
-```sh filename="loki.service" copy
-sudo tee /etc/systemd/system/loki.service > /dev/null << EOF
-[Unit]
- Description=Loki Service
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=loki
- Group=loki
- Type=simple
- ExecStart=/usr/local/bin/loki-linux-amd64 -config.file /etc/loki/config.yml
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-### Promtail
-
-Create and open the Promtail service file:
-
-```sh filename="promtail.service" copy
-sudo tee /etc/systemd/system/promtail.service > /dev/null << EOF
-[Unit]
- Description=Promtail Service
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=promtail
- Group=promtail
- Type=simple
- ExecStart=/usr/local/bin/promtail-linux-amd64 -config.file /etc/promtail/config.yml
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-Great! You have now configured all the services needed to run Loki.
-
-## Starting the Services
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="daemon-reload" copy
-sudo systemctl daemon-reload
-```
-
-Next, we will want to start each service:
-
-```sh filename="start service" copy
-sudo systemctl start loki.service &&
-sudo systemctl start promtail.service
-```
-
-And check that they are working fine, one by one:
-
-**loki**:
-
-```sh filename="status" copy
-systemctl status loki.service
-```
-
-**promtail**:
-
-```sh filename="status" copy
-systemctl status promtail.service
-```
-
-If everything is working adequately, activate the services!
-
-```sh filename="enable" copy
-sudo systemctl enable loki.service &&
-sudo systemctl enable promtail.service
-```
-
-Amazing! You have now successfully configured Loki for log management. Check out the Grafana
-documentation to create a Loki log dashboard!
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/prometheus.mdx b/pages/docs/ecosystem-roles/validator/monitoring/prometheus.mdx
deleted file mode 100644
index bbcb3f74..00000000
--- a/pages/docs/ecosystem-roles/validator/monitoring/prometheus.mdx
+++ /dev/null
@@ -1,435 +0,0 @@
----
-title: Prometheus Setup
-description: Setup Prometheus for scraping node metrics and more.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Prometheus Setup
-
-The following is a guide outlining the steps to setup Prometheus to monitor a Tangle node. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/). It is important to note that
-this guide's purpose is to help you get started with monitoring your Tangle node, not to advise on how to setup a node securely. Please
-take additional security and privacy measures into consideration.
-
-In this guide we will configure the following modules to scrape metrics from the running Tangle node.
-
-- **Prometheus** is the central module; it pulls metrics from different sources to provide them to the Grafana dashboard and Alert Manager.
-- **Node exporter** provides hardware metrics of the dashboard.
-- **Process exporter** provides processes metrics for the dashboard (optional).
-
-## What is Prometheus?
-
-Prometheus is an open-source systems monitoring and alerting toolkit originally built at SoundCloud. Since its inception in 2012,
-many companies and organizations have adopted Prometheus, and the project has a very active developer and user community.
-It is now a standalone open source project and maintained independently of any company. To learn more about Prometheus, please
-visit the official docs site [here](https://prometheus.io/docs/introduction/overview/).
-
-### Getting Started
-
-Let's first start by downloading the latest releases of the above mentioned modules (Prometheus, Process exporter, and Node exporter).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine.
-
-
-**1. Download Prometheus**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.darwin-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.darwin-arm64.tar.gz
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.linux-arm64.tar.gz
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/prometheus/prometheus/releases).
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.windows-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.windows-arm64.tar.gz
- ```
-
-
-
-
-**2. Download Node Exporter**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.darwin-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.darwin-arm64.tar.gz
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.linux-arm64.tar.gz
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/prometheus/node_exporter/releases).
-
-
-
-
-**3. Download Process Exporter**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/ncabatoff/process-exporter/releases/download/v0.7.10/process-exporter-0.7.10.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/ncabatoff/process-exporter/releases/download/v0.7.10/process-exporter-0.7.10.linux-arm64.tar.gz
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/ncabatoff/process-exporter/releases).
-
-
-
-
-**4. Extract the Downloaded Files:**
-
-Run the following command:
-
-```sh filename="tar" copy
-tar xvf prometheus-*.tar.gz &&
-tar xvf node_exporter-*.tar.gz &&
-tar xvf process-exporter-*.tar.gz
-```
-
-**5. Copy the Extracted Files into `/usr/local/bin`:**
-
-
- **Note:** The example below makes use of the `linux-amd64` installations, please update to make use of the target system you have installed.
-
-
-We are first going to copy the `prometheus` binary:
-
-```sh filename="cp" copy
-sudo cp ./prometheus-*.linux-amd64/prometheus /usr/local/bin/
-```
-
-Next, we are going to copy over the `prometheus` console libraries:
-
-```sh filename="cp" copy
-sudo cp -r ./prometheus-*.linux-amd64/consoles /etc/prometheus &&
-sudo cp -r ./prometheus-*.linux-amd64/console_libraries /etc/prometheus
-```
-
-We are going to do the same with `node-exporter` and `process-exporter`:
-
-```sh filename="cp" copy
-sudo cp ./node_exporter-*.linux-amd64/node_exporter /usr/local/bin/ &&
-sudo cp ./process-exporter-*.linux-amd64/process-exporter /usr/local/bin/
-```
-
-**6. Create Dedicated Users:**
-
-Now we want to create dedicated users for each of the modules we have installed:
-
-```sh filename="useradd" copy
-sudo useradd --no-create-home --shell /usr/sbin/nologin prometheus &&
-sudo useradd --no-create-home --shell /usr/sbin/nologin node_exporter &&
-sudo useradd --no-create-home --shell /usr/sbin/nologin process-exporter
-```
-
-**7. Create Directories for `Prometheus`, and `Process exporter`:**
-
-```sh filename="mkdir" copy
-sudo mkdir /var/lib/prometheus &&
-sudo mkdir /etc/process-exporter
-```
-
-**8. Change the Ownership for all Directories:**
-
-We need to give our user permissions to access these directories:
-
-**prometheus**:
-
-```sh filename="chown" copy
-sudo chown prometheus:prometheus /etc/prometheus/ -R &&
-sudo chown prometheus:prometheus /var/lib/prometheus/ -R &&
-sudo chown prometheus:prometheus /usr/local/bin/prometheus
-```
-
-**node_exporter**:
-
-```sh filename="chwon" copy
-sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
-```
-
-**process-exporter**:
-
-```sh filename="chown" copy
-sudo chown process-exporter:process-exporter /etc/process-exporter -R &&
-sudo chown process-exporter:process-exporter /usr/local/bin/process-exporter
-```
-
-**9. Finally, let's clean up these directories:**
-
-```sh filename="rm" copy
-rm -rf ./prometheus* &&
-rm -rf ./node_exporter* &&
-rm -rf ./process-exporter*
-```
-
-Great! You have now installed and setup your environment. The next series of steps will be configuring each service.
-
-## Configuration
-
-If you are interested to see how we configure the Tangle Network nodes for monitoring check out https://github.com/webb-tools/tangle/tree/main/monitoring.
-
-### Prometheus
-
-Let’s edit the Prometheus config file and add all the modules in it:
-
-```sh filename="nano" copy
-sudo nano /etc/prometheus/prometheus.yml
-```
-
-Add the following code to the file and save:
-
-```yaml filename="promtheus.yml" copy
-global:
- scrape_interval: 15s
- evaluation_interval: 15s
-
-rule_files:
- - 'rules.yml'
-
-alerting:
- alertmanagers:
- - static_configs:
- - targets:
- - localhost:9093
-
-scrape_configs:
- - job_name: "prometheus"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9090"]
- - job_name: "substrate_node"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9615"]
- - job_name: "node_exporter"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9100"]
- - job_name: "process-exporter"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9256"]
-```
-
-- **scrape_interval** defines how often Prometheus scrapes targets, while evaluation_interval controls how often the software will evaluate rules.
-- **rule_files** set the location of Alert manager rules we will add next.
-- **alerting** contains the alert manager target.
-- **scrape_configs** contain the services Prometheus will monitor.
-
-You can notice the first scrap where Prometheus monitors itself.
-
-### Process exporter
-
-Process exporter needs a config file to be told which processes they should take into account:
-
-```sh filename="nano" copy
-sudo touch /etc/process-exporter/config.yml
-sudo nano /etc/process-exporter/config.yml
-```
-
-Add the following code to the file and save:
-
-```sh filename="config.yml" copy
-process_names:
- - name: "{{.Comm}}"
- cmdline:
- - '.+'
-```
-
-## Service Setup
-
-### Prometheus
-
-Create and open the Prometheus service file:
-
-```sh filename="promtheus.service" copy
-sudo tee /etc/systemd/system/prometheus.service > /dev/null << EOF
-[Unit]
- Description=Prometheus Monitoring
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=prometheus
- Group=prometheus
- Type=simple
- ExecStart=/usr/local/bin/prometheus \
- --config.file /etc/prometheus/prometheus.yml \
- --storage.tsdb.path /var/lib/prometheus/ \
- --web.console.templates=/etc/prometheus/consoles \
- --web.console.libraries=/etc/prometheus/console_libraries
- ExecReload=/bin/kill -HUP $MAINPID
-
-[Install]
- WantedBy=multi-user.target
-EOF
-```
-
-### Node exporter
-
-Create and open the Node exporter service file:
-
-```sh filename="node_exporter.service" copy
-sudo tee /etc/systemd/system/node_exporter.service > /dev/null << EOF
-[Unit]
- Description=Node Exporter
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=node_exporter
- Group=node_exporter
- Type=simple
- ExecStart=/usr/local/bin/node_exporter
-
-[Install]
- WantedBy=multi-user.target
-EOF
-```
-
-### Process exporter
-
-Create and open the Process exporter service file:
-
-```sh filename="process-exporter.service" copy
-sudo tee /etc/systemd/system/process-exporter.service > /dev/null << EOF
-[Unit]
- Description=Process Exporter
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=process-exporter
- Group=process-exporter
- Type=simple
- ExecStart=/usr/local/bin/process-exporter \
- --config.path /etc/process-exporter/config.yml
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-## Starting the Services
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="deamon-reload" copy
-sudo systemctl daemon-reload
-```
-
-Next, we will want to start each service:
-
-**prometheus**:
-
-```sh filename="start serive" copy
-sudo systemctl start prometheus.service
-```
-
-**node_exporter**:
-
-```sh filename="start serive" copy
-sudo systemctl start node_exporter.service
-```
-
-**process-exporter**:
-
-```sh filename="start serive" copy
-sudo systemctl start process-exporter.service
-```
-
-And check that they are working fine:
-
-**prometheus**:
-
-```sh filename="status" copy
-systemctl status prometheus.service
-```
-
-**node_exporter**:
-
-```sh filename="status" copy
-systemctl status node_exporter.service
-```
-
-**process-exporter**:
-
-```sh filename="status" copy
-systemctl status process-exporter.service
-```
-
-If everything is working adequately, activate the services!
-
-**prometheus**:
-
-```sh filename="enable" copy
-sudo systemctl enable prometheus.service
-```
-
-**node_exporter**:
-
-```sh filename="enable" copy
-sudo systemctl enable node_exporter.service
-```
-
-**process-exporter**:
-
-```sh filename="enable" copy
-sudo systemctl enable process-exporter.service
-```
-
-Amazing! We have now completely setup our Prometheus monitoring and are scraping metrics from our
-running Tangle node.
-
-You can view those metrics on the Prometheus dashboard by going to `http://localhost:9090/metrics` !
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/quickstart.mdx b/pages/docs/ecosystem-roles/validator/monitoring/quickstart.mdx
deleted file mode 100644
index a39eae5b..00000000
--- a/pages/docs/ecosystem-roles/validator/monitoring/quickstart.mdx
+++ /dev/null
@@ -1,59 +0,0 @@
----
-title: Quickstart
-description: Creating monitoring stack for Tangle node.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Monitoring Tangle Node
-
-The following is a guide outlining the steps to setup monitoring for an Tangle node. If you do not have Tangle node setup yet, please
-review the **How to run an Tangle node** setup guide [here](https://docs.webb.tools/v1/node-operators/run-tangle-node). It is important to note that
-this guide's purpose is to help you get started with monitoring your Tangle node, not to advise on how to setup a node securely. Please
-take additional security and privacy measures into consideration.
-
-Here is how our final configuration will look like at the end of this guide.
-
-- **Prometheus** is the central module; it pulls metrics from different sources to provide them to the Grafana dashboard and Alert Manager.
-- **Grafana** is the visual dashboard tool that we access from the outside (through SSH tunnel to keep the node secure).
-- **Alert Manager** listens to Prometheus metrics and pushes an alert as soon as a threshold is crossed (CPU % usage for example).
-- **Tangle Node** natively provides metrics for monitoring.
-- **Process exporter** provides processes metrics for the dashboard (optional).
-- **Loki** provides log aggregation system and metrics.
-- **Promtail** is the agent responsible for gathering logs, and sending them to Loki.
-
-
- Running the monitoring stack requires that you are already running the tangle network node with at least the following ports exports:
- - Prometheus : `https://localhost:9615`
-
-
-## Docker usage
-
-The quickest way to setup monitoring for your node is to use our provided `docker-compose` file. The docker image starts all the above monitoring
-tools with the exception of `Node exporter`. `node-exporter` is ommitted since some metrics are not available when running inside a docker container.
-
-Follow the instructions [here](/prometheus) to start the prometheus node exporter.
-
-### Prerequisites
-
-Before starting the monitoring stack, ensure the configs are setup correctly,
-
-- (Optional) Set the `__SLACK_WEBHOOK_URL__` in `alertmanager.yml` to receive slack alerts
-- Ensure the promtail mount path matches your log directory
-
-Note : All containers require connection to the localhost, this behaviour is different in Linux/Windows/Mac, the configs within the `docker-compose` and yml
-files assume a linux environment. Refer [this](https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach) to make necessary adjustments for your environment.
-
-### Usage
-
-**To start the monitoring stack, run:**
-
-```sh filename="compose up" copy
-cd monitoring
-docker compose up -d
-```
-
-You can then navigate to `http://localhost:3000` to access the Grafana dashboard!
-
-![Tangle Dashboard](../../../../../components/images/tangle-metrics.png)
diff --git a/pages/docs/ecosystem-roles/validator/quickstart.mdx b/pages/docs/ecosystem-roles/validator/quickstart.mdx
deleted file mode 100644
index 0fc80041..00000000
--- a/pages/docs/ecosystem-roles/validator/quickstart.mdx
+++ /dev/null
@@ -1,43 +0,0 @@
----
-title: Node Operator Quickstart
-description: Participate in the Webb ecosystem by deploying a Tangle node, to validate, serve data or more.
----
-
-import { QuickDeployArea, DeployArea, SupportArea, MonitoringArea } from "../../../../components/TangleQuickstart"
-import { RepoArea } from "../../../../components/RepoArea";
-import FullWebbCTA from "../../../../components/FullWebbCTA";
-
-# Node Operator Quickstart
-
-Becoming a node operator on the Tangle Network requires some technical skills, trust, and support from the community. Below
-is a collection of quick links for quick setups!
-
-**If you're looking to understand how to become a Validator in Substrate systems like Tangle, see the [Polkadot Docs](https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot) as well.**
-
-## Quick Setup
-
-
-
-## Advanced Setup
-
-
-
-## Monitoring
-
-Monitoring and troubleshooting your Tangle node is essential, and we provide setup instructions to make it incredibly easy to get started!
-
-
-
-## Support Channels
-
-Run into weird issues? Or have questions about the Tangle Network? Join the Webb community and become connected to the entire Webb ecosystem.
-
-
-
-## Repositories
-
-Interested in what we are building at Webb? Clone the below repositories, and start contributing to a private cross-chain future!
-
-
-
-
diff --git a/pages/docs/ecosystem-roles/validator/required-keys.mdx b/pages/docs/ecosystem-roles/validator/required-keys.mdx
deleted file mode 100644
index 8abb1604..00000000
--- a/pages/docs/ecosystem-roles/validator/required-keys.mdx
+++ /dev/null
@@ -1,141 +0,0 @@
----
-title: Required Keys
-description: Describes the keys necessary to start and run a Tangle node.
----
-
-import Callout from "../../../../components/Callout";
-
-
- This guide assumes you have a validator already running, refer [Running With Docker](./deploy-with-docker/validator-node.mdx) or [Running with systemd](./systemd/validator-node.mdx) to ensure your node is setup correctly
-
-
-# Required Keys
-
-In order to participate in the distributed key generation protocol, block production, and block finalization, you will be required to set up a few keys. These keys
-include:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-- ImOnline key (Sr25519)
-
-To generate each of the above keys we will make use of [subkey](https://docs.substrate.io/reference/command-line-tools/subkey/). You will need to install
-subkey before running the command.
-
-
- Keep in mind the below commands are using `/tangle-data` base-path, please specify your preferred base-path during execution.
-
-
-**Once installed, to generate the DKG key you can run the following:**
-
-```sh filename="DKG Key" copy
-tangle-standalone key insert --base-path /tangle-data \
---chain "" \
---scheme Ecdsa \
---suri "<12-PHRASE-MNEMONIC>" \
---key-type wdkg
-```
-
-**To generate the Aura key you can run the following:**
-
-```sh filename="Aura Key" copy
-tangle-standalone key insert --base-path /tangle-data \
---chain "" \
---scheme Sr25519 \
---suri "<12-PHRASE-MNEMONIC>" \
---key-type aura
-```
-
-**To generate the Account key you can run the following:**
-
-```sh filename="Account Key" copy
-tangle-standalone key insert --base-path /tangle-data \
---chain "" \
---scheme Sr25519 \
---suri "<12-PHRASE-MNEMONIC>" \
---key-type acco
-```
-
-**To generate the Imonline key you can run the following:**
-
-```sh filename="Imonline Key" copy
-tangle-standalone key insert --base-path /tangle-data \
---chain "" \
---scheme Sr25519 \
---suri "<12-PHRASE-MNEMONIC>" \
---key-type imon
-```
-
-### Synchronize Chain Data
-
-You can begin syncing your node by running the following command:
-
-```sh filename="Syncing node" copy
-./target/release/tangle-parachain
-```
-
-Once your node has fully syncronized with the Relay Chain you may proceed to setup the
-necessary accounts to operate a node.
-
-## Bond funds
-
-To start collating, you need to have x TNT tokens for Tangle Network. It is highly recommended that you make your controller
-and stash accounts be two separate accounts. For this, you will create two accounts and make sure each of them have at least
-enough funds to pay the fees for making transactions. Keep most of your funds in the stash account since it is meant to be
-the custodian of your staking funds.
-
-Make sure not to bond all your TNT balance since you will be unable to pay transaction fees from your bonded balance.
-
-It is now time to set up our validator. We will do the following:
-
-- Bond the TNT of the Stash account. These TNT tokens will be put at stake for the security of the network and can be slashed.
-- Select the Controller. This is the account that will decide when to start or stop validating.
-
-First, go to the Staking section. Click on "Account Actions", and then the "+ Stash" button. It should look something
-similar to the below image.
-
-![bond](../../../../components/images/bond.png)
-
-Once everything is filled in properly, click Bond and sign the transaction with your Stash account.
-
-## Session Keys
-
-Operators need to set their `Author` session keys. Run the following command to author session keys.
-**Note:** You may need to change `http://localhost:9933` to your correct address.
-
-```sh filename="Generate session key" copy
-curl -H "Content-Type: application/json" -d '{"id":1, "jsonrpc":"2.0", "method": "author_rotateKeys", "params":[]}' http://localhost:9933
-```
-
-Result will look like this, copy the key:
-
-```
-{"jsonrpc":"2.0","result":"0x400e3cef43bdessab331e4g03115c4bcecws3cxff608fa3b8sh6b07y369386570","id":1}
-```
-
-### Set session keys
-
-1. Go to the Polkadot.js portal: `Developer > Extrinsic`.
-2. Select your account and extrinsic type: session / setKeys.
-3. Enter the session keys and set proof to `0x00`.
-4. Submit the transaction.
-
-### Setting identity
-
-Operators need to set their identity.
-
-1. Go to the Polkadot.js portal: `Accounts`
-2. Open the 3 dots next to your address: `Set on-chain Identity`
-3. Enter all fields you want to set.
-4. Send the transaction.
-
-### Request judgment
-
-1. Go to the Polkadot.js portal: `Developer > Extrinsic`
-2. Select your account and extrinsic type: `identity / requestJudgment`
-3. Send the transaction.
-
-### Production blocks
-
-Once your is active, you will see your name inside Network tab every time you produce a block!
diff --git a/pages/docs/ecosystem-roles/validator/requirements.mdx b/pages/docs/ecosystem-roles/validator/requirements.mdx
deleted file mode 100644
index 0650ca60..00000000
--- a/pages/docs/ecosystem-roles/validator/requirements.mdx
+++ /dev/null
@@ -1,189 +0,0 @@
----
-title: Requirements
-description: An overview of Webb Tangle node requirements.
----
-
-import { Tabs, Tab } from "../../../../components/Tabs";
-
-# Requirements
-
-The current Tangle testnet is a standalone network, meaning that it is not connected to the Polkadot or Kusama relay chain.
-Since the Tangle is not a parachain, the size of nodes are quite a small build as it only contains code to run the standalone Tangle network and not syncing
-the relay chain or communicate between the two. As such, the build is smaller, and does not require the same minumum spec requirements as a parachain node.
-
-The following specifications are the ideal or recommended, but nodes can be run with less. Testnet nodes have also been run using AWS t3.Large instances.
-
-| Component | Requirements |
-| --------- | ------------------------------------------------------------------------------------------------------ |
-| CPU | Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz |
-| Storage | An NVMe solid state drive of 500 GB (As it should be reasonably sized to deal with blockchain growth). |
-| Memory | 32GB ECC |
-| Firewall | P2P port must be open to incoming traffic:
- Source: Any
- Destination: 30333, 30334 TCP |
-
-## Running Ports
-
-As stated before, the standalone nodes will listen on multiple ports. The default Substrate ports are used in the standalone,
-while the relay chain will listen on the next higher port.
-
-The only ports that need to be open for incoming traffic are those designated for P2P.
-
-**Default Ports for a Tangle Full-Node:**
-
-| Description | Port |
-| ----------- | ----------- |
-| P2P | 30333 (TCP) |
-| RPC | 9933 |
-| WS | 9944 |
-| Prometheus | 9615 |
-
-## Dependencies
-
-In order to build a Tangle node from source your machine must have specific dependecies installed. This guide
-outlines those requirements.
-
-This guide uses [https://rustup.rs](https://rustup.rs) installer and the `rustup` tool to manage the Rust toolchain. Rust is required to
-compile a Tangle node.
-
-First install and configure `rustup`:
-
-```sh filename="Install Rust" copy
-# Install
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-
-# Configure
-source ~/.cargo/env
-```
-
-Configure the Rust toolchain to default to the latest stable version, add nightly and the nightly wasm target:
-
-```sh filename="Configure Rust" copy
-rustup default nightly
-rustup update
-rustup update nightly
-rustup target add wasm32-unknown-unknown --toolchain nightly
-```
-
-Great! Now your Rust environment is ready! 🚀🚀
-
-### Substrate Dependencies
-
-
-
-
- Debian version:
- ```sh filename=" Debian" copy
- sudo apt install --assume-yes git clang curl libssl-dev llvm libudev-dev make protobuf-compiler
- ```
- Arch version:
- ```sh filename="Arch" copy
- pacman -Syu --needed --noconfirm curl git clang make protobuf
- ```
- Fedora version:
- ```sh filename="Fedora" copy
- sudo dnf update
- sudo dnf install clang curl git openssl-devel make protobuf-compiler
- ```
- Opensuse version:
- ```sh filename="Opensuse" copy
- sudo zypper install clang curl git openssl-devel llvm-devel libudev-devel make protobuf
- ```
-
- Remember that different distributions might use different package managers and bundle packages in different ways.
- For example, depending on your installation selections, Ubuntu Desktop and Ubuntu Server might have different packages
- and different requirements. However, the packages listed in the command-line examples are applicable for many common Linux
- distributions, including Debian, Linux Mint, MX Linux, and Elementary OS.
-
-
-
-
- Assumes user has Homebrew already installed.
-
- ```sh filename="Brew" copy
- brew update
- brew install openssl gmp protobuf cmake
- ```
-
-
-
-
- For Windows users please refer to the official Substrate documentation:
- [Windows](https://docs.substrate.io/install/windows/)
-
-
-
-
-### Build from Source 💻
-
-Once the development environment is set up, you can build the Tangle node from source.
-
-```sh filename="Clone repo" copy
-git clone https://github.com/webb-tools/tangle.git
-```
-
-```sh filename="Build" copy
-cargo build --release
-```
-
-> NOTE: You _must_ use the release builds! The optimizations here are required
-> as in debug mode, it is expected that nodes are not able to run fast enough to produce blocks.
-
-You will now have the `tangle-standalone` binary built in `target/release/` dir
-
-#### Feature Flags
-
-Some features of tangle node are setup behind feature flags, to enable these features you will have to build the binary with these flags enabled
-
-1. **txpool**
-
-This feature flag is useful to help trace and debug evm transactions on the chain, you should build node with this flag if you intend to use the node for any evm transaction following
-
-```sh filename="Build txpool" copy
-cargo build --release --features txpool
-```
-
-2. **relayer**
-
-This feature flag is used to start the embedded tx relayer with tangle node, you should build node with this flag if you intend to run a node with a relayer which can be used for transaction relaying or data querying
-
-```sh filename="Build relayer" copy
-cargo build --release --features relayer
-```
-
-3. **light-client**
-
-This feature flag is used to start the embedded light client with tangle node, you should build node with this flag if you intend to run a node with a light client relayer to sync EVM data on Tangle
-
-```sh filename="Build light" copy
-cargo build --release --features light-client
-```
-
-### Use Precompiled binary 💻
-
-Every release of tangle node includes a Precompiled binary, its currently limited to amd-64 architecture but we plan to
-support more soon. You can view all releases [here](https://github.com/webb-tools/tangle/releases).
-
-In the below commands, substiture `LATEST_RELEASE` with the version you want to use, the current latest version is `0.4.6`
-
-### Get tangle binary
-
-```sh filename="Get binary" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-linux-amd64
-```
-
-### Get tangle binary with txpool feature
-
-```sh filename="Get binary txpool" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-txpool-linux-amd64
-```
-
-### Get tangle binary with relayer feature
-
-```sh filename="Get binary relayer" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-relayer-linux-amd64
-```
-
-### Get tangle binary with light-client feature
-
-```sh filename="Get binary light" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-light-client-linux-amd64
-```
diff --git a/pages/docs/ecosystem-roles/validator/systemd/_meta.json b/pages/docs/ecosystem-roles/validator/systemd/_meta.json
deleted file mode 100644
index a3cff2b9..00000000
--- a/pages/docs/ecosystem-roles/validator/systemd/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "full-node": "Full Node",
- "validator-node": "Validator Node"
-}
diff --git a/pages/docs/ecosystem-roles/validator/systemd/full-node.mdx b/pages/docs/ecosystem-roles/validator/systemd/full-node.mdx
deleted file mode 100644
index e6db89c0..00000000
--- a/pages/docs/ecosystem-roles/validator/systemd/full-node.mdx
+++ /dev/null
@@ -1,110 +0,0 @@
----
-title: Running with Systemd
-description: Run a Tangle full node using systemd.
----
-
-# Running with Systemd
-
-You can run your full node as a systemd process so that it will automatically restart on server reboots
-or crashes (and helps to avoid getting slashed!).
-
-Before following this guide you should have already set up your machines environment, installed the dependencies, and
-compiled the Tangle binary. If you have not done so, please refer to the [Hardware](/docs/tangle-network/node/hardware) and [Software](/docs/tangle-network/node/node-software) page.
-
-## System service setup
-
-Run the following commands to create the service configuration file:
-
-```sh filename="mv" copy
-# Move the tangle-standalone binary to the bin directory (assumes you are in repo root directory)
-sudo mv ./target/release/tangle-standalone /usr/bin/
-```
-
-Add the following contents to the service configuration file. Make sure to replace the **USERNAME** with the username you created in the previous step, add your own node name, and update
-any paths or ports to your own preference.
-
-**Note:** The below configuration assumes you are targeting the Tangle Network chainspec.
-
-**Full Node**
-
-```sh filename="full.service" copy
-sudo tee /etc/systemd/system/full.service > /dev/null << EOF
-[Unit]
-Description=Tangle Full Node
-After=network-online.target
-StartLimitIntervalSec=0
-
-[Service]
-User=
-Restart=always
-RestartSec=3
-ExecStart=/usr/bin/tangle-standalone \
- --base-path /data/full-node \
- --name \
- --chain tangle-testnet \
- --node-key-file "/home//node-key" \
- --rpc-cors all \
- --port 9946 \
- --no-mdns \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-**Full Node with evm trace**
-
-**Note:** To run with evm trace, you should use a binary built with `txpool` flag, refer [requirements](../requirements.mdx) page for more details.
-
-```sh filename="full.service" copy
-sudo tee /etc/systemd/system/full.service > /dev/null << EOF
-[Unit]
-Description=Tangle Full Node
-After=network-online.target
-StartLimitIntervalSec=0
-
-[Service]
-User=
-Restart=always
-RestartSec=3
-ExecStart=/usr/bin/tangle-standalone \
- --base-path /data/full-node \
- --name \
- --chain tangle-testnet \
- --node-key-file "/home//node-key" \
- --rpc-cors all \
- --port 9946 \
- --no-mdns --ethapi trace,debug,txpool
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-### Enable the services
-
-Double check that the config has been written to `/etc/systemd/system/full.service` correctly.
-If so, enable the service so it runs on startup, and then try to start it now:
-
-```sh filename="enable service" copy
-sudo systemctl daemon-reload
-sudo systemctl enable full
-sudo systemctl start full
-```
-
-Check the status of the service:
-
-```sh filename="status" copy
-sudo systemctl status full
-```
-
-You should see the node connecting to the network and syncing the latest blocks.
-If you need to tail the latest output, you can use:
-
-```sh filename="logs" copy
-sudo journalctl -u full.service -f
-```
-
-Congratulations! You have officially setup a Tangle Network node using Systemd. If you are interested
-in learning how to setup monitoring for your node, please refer to the [monitoring](../monitoring/quickstart.mdx) page.
diff --git a/pages/docs/ecosystem-roles/validator/systemd/quick-node.mdx b/pages/docs/ecosystem-roles/validator/systemd/quick-node.mdx
deleted file mode 100644
index c8f26935..00000000
--- a/pages/docs/ecosystem-roles/validator/systemd/quick-node.mdx
+++ /dev/null
@@ -1,90 +0,0 @@
----
-title: Quickstart
-description: Run a Tangle Validator node using systemd.
----
-
-# Tangle Validator Quickstart
-
-**Caution:** The following guide is only meant as a quickstart for anyone looking to run a tangle node with minimal
-config, this guide uses automated keys and it is not recommended to run a validator using this setup long term, refer to [advanced](/docs/ecosystem-roles/validator/systemd/validator-node/) guide
-for a more secure long term setup.
-
-Before following this guide you should have already set up your machines environment, installed the dependencies, and
-compiled the Tangle binary. If you have not done so, please refer to the [Hardware](/docs/tangle-network/node/validator/hardware) page.
-
-## Standalone Testnet
-
-### 1. Fetch the tangle binary
-
-Use the latest release version in the url in place of ``, you can visit [releases](https://github.com/webb-tools/tangle/releases) page to view the latest info
-
-```
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-linux-amd64
-```
-
-For example, at the time of writing this document, the latest release is v0.4.7 and the link would be as follows
-
-```
-wget https://github.com/webb-tools/tangle/releases/download/v0.4.7/tangle-standalone-linux-amd64
-```
-
-### 2. Start the node binary
-
-To start the binary you can run the following command (ensure you are in the same folder where tangle-standalone is downloaded)
-
-Make sure to change the following params before executing the command
-
-1. `` : This is the path where your chain DB will live
-2. `` : This is a unique node name for your node, use a unique name here to help identity your node to other validators and telemetry data
-
-```
-./tangle-standalone-linux-amd64 \
- --base-path \
- --name \
- --chain tangle-testnet \
- --port 9944 \
- --validator \
- --auto-insert-keys \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-```
-
-If the node is running correctly, you should see an output similar to below:
-
-```
-2023-03-22 14:55:51 Tangle Standalone Node
-2023-03-22 14:55:51 ✌️ version 0.1.15-54624e3-aarch64-macos
-2023-03-22 14:55:51 ❤️ by Webb Technologies Inc., 2017-2023
-2023-03-22 14:55:51 📋 Chain specification: Tangle Testnet
-2023-03-22 14:55:51 🏷 Node name: cooing-morning-2891
-2023-03-22 14:55:51 👤 Role: FULL
-2023-03-22 14:55:51 💾 Database: RocksDb at /Users/local/Library/Application Support/tangle-standalone/chains/local_testnet/db/full
-2023-03-22 14:55:51 ⛓ Native runtime: tangle-standalone-115 (tangle-standalone-1.tx1.au1)
-2023-03-22 14:55:51 Bn254 x5 w3 params
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 new validator set of size 5 has been processed for era 1
-2023-03-22 14:55:52 🔨 Initializing Genesis block/state (state: 0xfd16…aefd, header-hash: 0x7c05…a27d)
-2023-03-22 14:55:52 👴 Loading GRANDPA authority set from genesis on what appears to be first startup.
-2023-03-22 14:55:53 Using default protocol ID "sup" because none is configured in the chain specs
-2023-03-22 14:55:53 🏷 Local node identity is: 12D3KooWDaeXbqokqvEMqpJsKBvjt9BUz41uP9tzRkYuky1Wat7Z
-2023-03-22 14:55:53 💻 Operating system: macos
-2023-03-22 14:55:53 💻 CPU architecture: aarch64
-2023-03-22 14:55:53 📦 Highest known block at #0
-2023-03-22 14:55:53 〽️ Prometheus exporter started at 127.0.0.1:9615
-2023-03-22 14:55:53 Running JSON-RPC HTTP server: addr=127.0.0.1:9933, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 Running JSON-RPC WS server: addr=127.0.0.1:9944, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.0.125/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.0.125/tcp/30305
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.88.12/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.88.12/tcp/30305
-```
-
-**Note** : Since the `--auto-insert-keys` flag was used the logs will print out the keys automatically generated for you,
-make sure to note down and keep this safely, in case you need to migrate or restart your node, these keys are essential.
-
-Congratulations! You have officially setup an Tangle Network node. The quickstart is only meant as a quickstart for anyone looking to run a tangle node with minimal
-config, this guide uses automated keys and it is not recommended to run a validator using this setup long term, refer to [advanced](/docs/ecosystem-roles/validator/systemd/validator-node/) guide
-for a more secure long term setup.. If you are interested
-in learning how to setup monitoring for your node, please refer to the [monitoring](../monitoring/quickstart.mdx) page.
diff --git a/pages/docs/ecosystem-roles/validator/systemd/validator-node.mdx b/pages/docs/ecosystem-roles/validator/systemd/validator-node.mdx
deleted file mode 100644
index 78d572be..00000000
--- a/pages/docs/ecosystem-roles/validator/systemd/validator-node.mdx
+++ /dev/null
@@ -1,216 +0,0 @@
----
-title: Running with Systemd
-description: Run a Tangle Validator node using systemd.
----
-
-# Running with Systemd
-
-You can run your validator node as a Systemd process so that it will automatically restart on server reboots
-or crashes (and helps to avoid getting slashed!).
-
-Before following this guide you should have already set up your machines environment, installed the dependencies, and
-compiled the Tangle binary. If you have not done so, please refer to the [Hardware](/docs/tangle-network/node/validator/requirements) page.
-
-## Standalone Testnet
-
-### Generate and store keys
-
-We need to generate the required keys for our node. For more information on these keys, please see the [Required Keys](/docs/ecosystem-roles/validator/required-keys/) section.
-The keys we need to generate include the following:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-- ImOnline key (Sr25519)
-
-Let's now insert our required secret keys, we will not pass the SURI in the command, instead it will be interactive, where you
-should paste your SURI when the command asks for it.
-
-**Account Keys**
-
-```sh filename="Acco" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Sr25519 \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type acco
-```
-
-**Aura Keys**
-
-```sh filename="Aura" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Sr25519 \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type aura
-```
-
-**Im-online Keys** - **these keys are optional**
-
-```sh filename="Imonline" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Sr25519 \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type imon
-```
-
-**DKG Keys**
-
-```sh filename="DKG" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Ecdsa \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type wdkg
-```
-
-**Grandpa Keys**
-
-```sh filename="Grandpa" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Ed25519 \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type gran
-```
-
-To ensure you have successfully generated the keys correctly run:
-
-```sh filename="ls" copy
-ls ~/data/validator//keystore
-# You should see a some file(s) there, these are the keys.
-```
-
-## System service setup
-
-Run the following commands to create the service configuration file:
-
-```sh filename="mv" copy
-# Move the tangle-standalone binary to the bin directory (assumes you are in repo root directory)
-sudo mv ./target/release/tangle-standalone /usr/bin/
-```
-
-Add the following contents to the service configuration file. Make sure to replace the **USERNAME** with the username you created in the previous step, add your own node name, and update any paths or ports to your own preference.
-
-**Note:** The below configuration assumes you are targeting the Tangle Network chainspec.
-
-**Caution:** Ensure you insert the keys using the instructions at [generate keys](#generate-and-store-keys),
-if you want the node to auto generate the keys, add the `--auto-insert-keys` flag.
-
-**Validator Node**
-
-```sh filename="validator.service" copy
-sudo tee /etc/systemd/system/validator.service > /dev/null << EOF
-[Unit]
-Description=Tangle Validator Node
-After=network-online.target
-StartLimitIntervalSec=0
-
-[Service]
-User=
-Restart=always
-RestartSec=3
-ExecStart=/usr/bin/tangle-standalone \
- --base-path /data/validator/ \
- --name \
- --chain tangle-testnet \
- --node-key-file "/home//node-key" \
- --port 30333 \
- --validator \
- --no-mdns \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-### Enable the services
-
-Double check that the config has been written to `/etc/systemd/system/validator.service` correctly.
-If so, enable the service so it runs on startup, and then try to start it now:
-
-```sh filename="enable service" copy
-sudo systemctl daemon-reload
-sudo systemctl enable validator
-sudo systemctl start validator
-```
-
-Check the status of the service:
-
-```sh filename="status" copy
-sudo systemctl status validator
-```
-
-You should see the node connecting to the network and syncing the latest blocks.
-If you need to tail the latest output, you can use:
-
-```sh filename="logs" copy
-sudo journalctl -u validator.service -f
-```
-
-If the node is running correctly, you should see an output similar to below:
-
-```sh filename="output"
-2023-03-22 14:55:51 Tangle Standalone Node
-2023-03-22 14:55:51 ✌️ version 0.1.15-54624e3-aarch64-macos
-2023-03-22 14:55:51 ❤️ by Webb Technologies Inc., 2017-2023
-2023-03-22 14:55:51 📋 Chain specification: Tangle Testnet
-2023-03-22 14:55:51 🏷 Node name: cooing-morning-2891
-2023-03-22 14:55:51 👤 Role: FULL
-2023-03-22 14:55:51 💾 Database: RocksDb at /Users/local/Library/Application Support/tangle-standalone/chains/local_testnet/db/full
-2023-03-22 14:55:51 ⛓ Native runtime: tangle-standalone-115 (tangle-standalone-1.tx1.au1)
-2023-03-22 14:55:51 Bn254 x5 w3 params
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 new validator set of size 5 has been processed for era 1
-2023-03-22 14:55:52 🔨 Initializing Genesis block/state (state: 0xfd16…aefd, header-hash: 0x7c05…a27d)
-2023-03-22 14:55:52 👴 Loading GRANDPA authority set from genesis on what appears to be first startup.
-2023-03-22 14:55:53 Using default protocol ID "sup" because none is configured in the chain specs
-2023-03-22 14:55:53 🏷 Local node identity is: 12D3KooWDaeXbqokqvEMqpJsKBvjt9BUz41uP9tzRkYuky1Wat7Z
-2023-03-22 14:55:53 💻 Operating system: macos
-2023-03-22 14:55:53 💻 CPU architecture: aarch64
-2023-03-22 14:55:53 📦 Highest known block at #0
-2023-03-22 14:55:53 〽️ Prometheus exporter started at 127.0.0.1:9615
-2023-03-22 14:55:53 Running JSON-RPC HTTP server: addr=127.0.0.1:9933, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 Running JSON-RPC WS server: addr=127.0.0.1:9944, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.0.125/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.0.125/tcp/30305
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.88.12/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.88.12/tcp/30305
-```
-
-### Network sync
-
-After a validator node is started, it will start syncing with the current chain state. Depending on the size of the chain when you do this, this step may take anywhere from a few minutes to a few hours.
-
-Example of node sync :
-
-```sh filename="output after synced" copy
-2021-06-17 03:07:39 🔍 Discovered new external address for our node: /ip4/10.26.16.1/tcp/30333/ws/p2p/12D3KooWLtXFWf1oGrnxMGmPKPW54xWCHAXHbFh4Eap6KXmxoi9u
-2021-06-17 03:07:40 ⚙️ Syncing 218.8 bps, target=#5553764 (17 peers), best: #24034 (0x08af…dcf5), finalized #23552 (0xd4f0…2642), ⬇ 173.5kiB/s ⬆ 12.7kiB/s
-2021-06-17 03:07:45 ⚙️ Syncing 214.8 bps, target=#5553765 (20 peers), best: #25108 (0xb272…e800), finalized #25088 (0x94e6…8a9f), ⬇ 134.3kiB/s ⬆ 7.4kiB/s
-2021-06-17 03:07:50 ⚙️ Syncing 214.8 bps, target=#5553766 (21 peers), best: #26182 (0xe7a5…01a2), finalized #26112 (0xcc29…b1a9), ⬇ 5.0kiB/s ⬆ 1.1kiB/s
-2021-06-17 03:07:55 ⚙️ Syncing 138.4 bps, target=#5553767 (21 peers), best: #26874 (0xcf4b…6553), finalized #26624 (0x9dd9…27f8), ⬇ 18.9kiB/s ⬆ 2.0kiB/s
-2021-06-17 03:08:00 ⚙️ Syncing 37.0 bps, target=#5553768 (22 peers), best: #27059 (0x5b73…6fc9), finalized #26624 (0x9dd9…27f8), ⬇ 14.3kiB/s ⬆ 4.4kiB/s
-```
-
-### Bond TNT and setup validator Account
-
-After your node is synced, you are ready to setup keys and onboard as a validator, make sure to complete the steps
-at [required keys](../required-keys.mdx) to start validating.
-
----
-
-Congratulations! You have officially setup an Tangle Network node using Systemd. If you are interested
-in learning how to setup monitoring for your node, please refer to the [monitoring](../monitoring/quickstart.mdx) page.
diff --git a/pages/docs/ecosystem-roles/validator/troubleshooting.mdx b/pages/docs/ecosystem-roles/validator/troubleshooting.mdx
deleted file mode 100644
index 5ebbeeac..00000000
--- a/pages/docs/ecosystem-roles/validator/troubleshooting.mdx
+++ /dev/null
@@ -1,108 +0,0 @@
----
-title: Troubleshooting
-description: Provides a series of suggestive fixes that are common issues when starting a Tangle node.
----
-
-# Logs
-
-If you would like to run the node with verbose logs you may add the following arguments during initial setup. You may change the target to include `debug | error | info| trace | warn`.
-
-```
--ldkg=debug \
--ldkg_metadata=debug \
--lruntime::offchain=debug \
--ldkg_proposal_handler=debug \
--ldkg_proposals=debug
-```
-
-# Troubleshooting
-
-## P2P Ports Not Open
-
-If you don't see an Imported message (without the [Relaychain] tag), you need to check the P2P port configuration. P2P port must be open to incoming traffic.
-
-## In Sync
-
-Both chains must be in sync at all times, and you should see either Imported or Idle messages and have connected peers.
-
-## Genesis Mismatching
-
-If you notice similar log messages as below:
-
-```
-DATE [Relaychain] Bootnode with peer id `ID` is on a different
-chain (our genesis: 0x3f5... theirs: 0x45j...)
-```
-
-This typically means that you are running an older version and will need to upgrade.
-
-## Troubleshooting for Apple Silicon users
-
-Install Homebrew if you have not already. You can check if you have it installed with the following command:
-
-```sh filename="brew" copy
-brew help
-```
-
-If you do not have it installed open the Terminal application and execute the following commands:
-
-```sh filename="install brew" copy
-# Install Homebrew if necessary https://brew.sh/
-/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
-
-# Make sure Homebrew is up-to-date, install openssl
-brew update
-brew install openssl
-```
-
-❗ **Note:** Native ARM Homebrew installations are only going to be supported at `/opt/homebrew`. After Homebrew installs, make sure to add `/opt/homebrew/bin` to your PATH.
-
-```sh filename="add PATH" copy
-echo 'export PATH=/opt/homebrew/bin:$PATH' >> ~/.bash_profile
-```
-
-An example `bash_profile` for reference may look like the following:
-
-```sh filename="export PATH" copy
-export PATH=/opt/homebrew/bin:$PATH
-export PATH=/opt/homebrew/opt/llvm/bin:$PATH
-export CC=/opt/homebrew/opt/llvm/bin/clang
-export AR=/opt/homebrew/opt/llvm/bin/llvm-ar
-export LDFLAGS=-L/opt/homebrew/opt/llvm/lib
-export CPPFLAGS=-I/opt/homebrew/opt/llvm/include
-export RUSTFLAGS='-L /opt/homebrew/lib'
-```
-
-In order to build **dkg-substrate** in `--release` mode using `aarch64-apple-darwin` Rust toolchain you need to set the following environment variables:
-
-```sh filename="export" copy
-echo 'export RUSTFLAGS="-L /opt/homebrew/lib"' >> ~/.bash_profile
-```
-
-Ensure `gmp` dependency is installed correctly.
-
-```sh filename="install gmp" copy
-brew install gmp
-```
-
-If you are still receiving an issue with `gmp`, you may need to adjust your path to the `gmp` lib. Below is a suggestive fix, but paths are machine / environment specific.
-
-Run:
-
-```sh filename="clean" copy
-cargo clean
-```
-
-Then:
-
-```sh filename="export" copy
-export LIBRARY_PATH=$LIBRARY_PATH:$(brew --prefix)/lib:$(brew --prefix)/opt/gmp/lib
-```
-
-This should be added to your bash_profile as well.
-
-Ensure `protobuf` dependency is installed correctly.
-
-```sh filename="install protobuf" copy
-brew install protobuf
-```
diff --git a/pages/docs/ecosystem-roles/validator/validation.mdx b/pages/docs/ecosystem-roles/validator/validation.mdx
deleted file mode 100644
index 992f4571..00000000
--- a/pages/docs/ecosystem-roles/validator/validation.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Validation
-
-In a blockchain context, validating usually refers to the process performed by nodes (often called validators) in the network to ensure that transactions and blocks meet the necessary rules and protocols of the network. This can involve verifying that transactions are correctly signed, that they don't double-spend coins, and that newly created blocks are formatted correctly and include valid transactions. By validating data, transactions, or blocks, we ensure that the systems or networks in question operate as intended and maintain their integrity. In Proof-of-Stake systems, Validators are often incentivized and rewarded through portions of new tokens generated by inflation or otherwise.
-
-## Stepping into Responsibility
-
-Embarking on the journey to becoming a blockchain validator comes with considerable responsibility. As a validator, you are entrusted not only with your own stake but also the stake of those who nominate you. Any errors or breaches can lead to penalties known as slashing, impacting both your token balance and your standing within the network. However, being a validator can be immensely rewarding, offering you the opportunity to actively contribute to the security of a decentralized network and grow your digital assets.
-
-## Proceed with Caution
-
-We strongly advise that you possess substantial system administration experience before choosing to run your own validator. The role goes beyond merely running a blockchain binary; it requires the ability to address and resolve technical issues and anomalies independently. Running a validator is as much about knowledge as it is about problem-solving skills.
-
-## Security: Your Priority
-
-Security is paramount when running a successful validator. You should thoroughly familiarize yourself with the secure validator guidelines to understand the considerations when setting up your infrastructure. As you grow and evolve as a validator, these guidelines can serve as a foundation upon which you build your modifications and customizations.
-
-## Your Support Network
-
-Remember, you are not alone in this journey. We encourage you to connect with the [Webb community](https://webb.tools/community). These communities are teeming with experienced team members and fellow validators who are more than willing to answer questions, provide insights, and share valuable experiences. Additionally, you will want to make community members aware of your validator services, so they can nominate their stake to you.
-
-Embarking on the validator journey is both challenging and rewarding. With careful preparation, a strong understanding of the associated responsibilities and risks, and the support of the community, you can make significant contributions to the Webb ecosystem.
diff --git a/pages/docs/ecosystem-roles/validator/validator-rewards.mdx b/pages/docs/ecosystem-roles/validator/validator-rewards.mdx
deleted file mode 100644
index 3168e931..00000000
--- a/pages/docs/ecosystem-roles/validator/validator-rewards.mdx
+++ /dev/null
@@ -1,125 +0,0 @@
----
-title: Validator Rewards
-description: A brief overview of Tangle Network rewards and their payout scheme.
----
-
-# Validator Rewards
-
-Running a [validator](validation.mdx) node on the Tangle Network allows you to connect to the network, sync with a bootnode, obtain local access to RPC endpoints, and also author blocks. The network rewards successful validators (users running validator nodes and actively producing blocks) by paying a set amount of network tokens as rewards. Validators are chosen using an algorithm [AURA](https://docs.substrate.io/reference/glossary/#authority-round-aura) that works to give every validator in the active set, a chance at authoring a block.
-
-## How Rewards are Calculated
-
-## Era Points
-
-For every era (a period of time approximately 6 hours in length in Tangle), validators are paid proportionally to the amount of _era points_ they have collected. Era
-points are reward points earned for payable actions like:
-
-- producing a non-uncle block in the Chain.
-- producing a reference to a previously unreferenced uncle block.
-- producing a referenced uncle block.
-
-An uncle block is a block that is valid in every regard, but which failed to become
-canonical. This can happen when two or more validators are block producers in a single slot, and the
-block produced by one validator reaches the next block producer before the others. We call the
-lagging blocks uncle blocks.
-
-Payments occur at the end of every era.
-
-Era points create a probabilistic component for staking rewards.
-
-If the _mean_ of staking rewards is the average rewards per era, then the _variance_ is the
-variability from the average staking rewards. The exact TNT value of each era point is not known in
-advance since it depends on the total number of points earned by all validators in a given era. This
-is designed this way so that the total payout per era depends on Tangle's inflation model, and not on the number of payable
-actions (f.e., authoring a new block) executed.
-
-In this case, analyzing the _expected value_ of staking rewards will paint a better picture as the
-weight of era points of validators and para-validators in the reward average are taken into
-consideration.
-
-#### High-level breakdown of reward variance
-
-This should only serve as a high-level overview of the probabilistic nature for staking rewards.
-
-Let:
-
-- `pe` = para-validator era points,
-- `ne` = non-para-validator era points,
-- `EV` = expected value of staking rewards,
-
-Then, `EV(pe)` has more influence on the `EV` than `EV(ne)`.
-
-Since `EV(pe)` has a more weighted probability on the `EV`, the increase in variance against the
-`EV` becomes apparent between the different validator pools (aka. validators in the active set and
-the ones chosen to para-validate).
-
-Also, let:
-
-- `v` = the variance of staking rewards,
-- `p` = number of para-validators,
-- `w` = number validators in the active set,
-- `e` = era,
-
-Then, `v` ↑ if `w` ↑, as this reduces `p` : `w`, with respect to `e`.
-
-Increased `v` is expected, and initially keeping `p` ↓ using the same para-validator set for
-all parachains ensures availability and approval voting. In addition, despite `v` ↑ on an `e` to `e`
-basis, over time, the amount of rewards each validator receives will equal out based on the
-continuous selection of para-validators.
-
-## Payout Scheme
-
-No matter how much total stake is behind a validator, all validators split the block authoring
-payout essentially equally. The payout of a specific validator, however, may differ based on
-era points, as described above. Although there is a probabilistic component to
-receiving era points, and they may be impacted slightly depending on factors such as network
-connectivity, well-behaving validators should generally average out to having similar era point
-totals over a large number of eras.
-
-Validators may also receive "tips" from senders as an incentive to include transactions in their
-produced blocks. Validators will receive 100% of these tips directly.
-
-For simplicity, the examples below will assume all validators have the same amount of era points,
-and received no tips.
-
-```
-Validator Set Size (v): 4
-Validator 1 Stake (v1): 18 tokens
-Validator 2 Stake (v2): 9 tokens
-Validator 3 Stake (v3): 8 tokens
-Validator 4 Stake (v4): 7 tokens
-Payout (p): 8 TNT
-
-Payout for each validator (v1 - v4):
-p / v = 8 / 4 = 2 tokens
-```
-
-Note that this is different than most other Proof-of-Stake systems such as Cosmos. As long as a
-validator is in the validator set, it will receive the same block reward as every other validator.
-Validator `v1`, who had 18 tokens staked, received the same reward (2 tokens) in this era as `v4`
-who had only 7 tokens staked.
-
-## Slashing
-
-Although rewards are paid equally, slashes are relative to a validator's stake. Therefore, if you do
-have enough TNT to run multiple validators, it is in your best interest to do so. A slash of 30%
-will, of course, be more TNT for a validator with 18 TNT staked than one with 9 TNT staked.
-
-Running multiple validators does not absolve you of the consequences of misbehavior. Polkadot
-punishes attacks that appear coordinated more severely than individual attacks. You should not, for
-example, run multiple validators hosted on the same infrastructure. A proper multi-validator
-configuration would ensure that they do not fail simultaneously.
-
-Nominators have the incentive to nominate the lowest-staked validator, as this will result in the
-lowest risk and highest reward. This is due to the fact that while their vulnerability to slashing
-remains the same (since it is percentage-based), their rewards are higher since they will be a
-higher proportion of the total stake allocated to that validator.
-
-To clarify this, let us imagine two validators, `v1` and `v2`. Assume both are in the active set,
-have commission set to 0%, and are well-behaved. The only difference is that `v1` has 90 TNT
-nominating it and `v2` only has 10. If you nominate `v1`, it now has `90 + 10 = 100` TNT, and you
-will get 10% of the staking rewards for the next era. If you nominate `v2`, it now has
-`10 + 10 = 20` TNT nominating it, and you will get 50% of the staking rewards for the next era. In
-actuality, it would be quite rare to see such a large difference between the stake of validators,
-but the same principle holds even for smaller differences. If there is a 10% slash of either
-validator, then you will lose 1 TNT in each case.
diff --git a/pages/docs/tangle-network/_meta.json b/pages/docs/tangle-network/_meta.json
deleted file mode 100644
index 9da11bf7..00000000
--- a/pages/docs/tangle-network/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "overview": "Overview",
- "build": "Build",
- "node": "Node",
- "learn": "Learn",
- "governance": "Govern"
-}
diff --git a/pages/docs/tangle-network/build/_meta.json b/pages/docs/tangle-network/build/_meta.json
deleted file mode 100644
index 8d616567..00000000
--- a/pages/docs/tangle-network/build/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "deploy-using-hardhat": "Deploy on Tangle with Hardhat",
- "network-information-configuration": "Network Configuration",
- "json-rpc-endpoints": "RPC Endpoints",
- "pallets": "Pallets",
- "precompile-addresses": "Precompiles"
-}
diff --git a/pages/docs/tangle-network/build/deploy-using-hardhat.mdx b/pages/docs/tangle-network/build/deploy-using-hardhat.mdx
deleted file mode 100644
index 702b71fb..00000000
--- a/pages/docs/tangle-network/build/deploy-using-hardhat.mdx
+++ /dev/null
@@ -1,116 +0,0 @@
-# Deploy on Tangle using Hardhat
-
-## Pre-requisites and Assumptions
-
-This guide to deploying a smart contract on a Substrate-based blockchain network with EVM compatibility (similar to Moonbeam) using Hardhat assumes a basic understanding of Blockchain, Smart Contracts, Solidity, Hardhat and Substrate: You should be familiar with Ethereum as it forms the basis of any EVM-compatible blockchain.
-
-If any of the above assumptions do not hold true, we recommend taking the time to fill in the gaps in your knowledge and setup before proceeding. This will ensure that you can follow along with the guide effectively.
-
-## Setup
-
-1. **Install Node.js and npm**
- Make sure you have Node.js and npm installed. The recommended versions are Node v14.17.6 and npm v6.14.15.
-
-2. **Install Hardhat**
- Create a new directory and initialize an npm project. Then install Hardhat using npm:
-
-```bash
-mkdir
-cd
-npm init -y
-npm install --save-dev hardhat
-```
-
-3. **Create a new Hardhat project**
- Run the following command to create a new Hardhat project:
-
-```bash
-npx hardhat
-```
-
-Select "Create an empty hardhat.config.js" when prompted.
-
-## Configure Hardhat for Tangle Network
-
-1. **Install necessary plugins**
- Install `@nomiclabs/hardhat-ethers`, `@nomiclabs/hardhat-waffle`, `ethereum-waffle`, and `ethers` plugins:
-
-```bash
-npm install --save-dev @nomiclabs/hardhat-ethers @nomiclabs/hardhat-waffle ethereum-waffle ethers
-```
-
-2. **Update hardhat.config.js**
- Open hardhat.config.js and replace its content with the following configuration, updating the placeholder fields:
-
-```javascript
-require("@nomiclabs/hardhat-waffle");
-
-module.exports = {
- solidity: "0.8.0",
- networks: {
- tangle: {
- url: "",
- accounts: [`0x${}`],
- chainId: ,
- gasPrice: 10000000000,
- }
- }
-};
-```
-
-
-Tangle Chain Information
-You can view the latest details on networks, chainIDs, RPC URLs on the [Network Information and Configurations](/docs/tangle-network/getting-started/network-information-configuration/) page.
-
-
-Replace `` with the RPC URL of the Tangle Network. Replace `` with the private key of the account that will be used to deploy the contracts. `` should be replaced with the ChainId of the Tangle Network.
-
-## Deploy Contracts
-
-1. **Compile Contracts**
- Assuming your contracts are in the contracts folder, you can compile them by running:
-
-```bash
-npx hardhat compile
-```
-
-2. **Create a deployment script**
- Create a new directory named scripts in your project root, then create a file in this directory, say deploy.js, with the following content:
-
-```javascript
-async function main() {
- const [deployer] = await ethers.getSigners();
-
- console.log(
- "Deploying contracts with the account:",
- deployer.address
- );
-
- console.log("Account balance:", (await deployer.getBalance()).toString());
-
- const Contract = await ethers.getContractFactory("");
- const contract = await Contract.deploy();
-
- console.log("Contract address:", contract.address);
-}
-
-main()
- .then(() => process.exit(0))
- .catch((error) => {
- console.error(error);
- process.exit(1);
- });
-```
-
-Replace `` with the name of your contract and `` with the parameters required by your contract's constructor.
-
-3. **Run the deployment script**
- You can now deploy your contract to Tangle Network using Hardhat by running:
-
-```bash
-npx hardhat run --network tangle scripts/deploy.js
-```
-
-After running this command, Hardhat will execute the deployment script using the account and network configuration provided in `hardhat.config.js`.
-
-Make sure you update ``, ``, ``, ``, and `` with your actual values.
diff --git a/pages/docs/tangle-network/build/json-rpc-endpoints.mdx b/pages/docs/tangle-network/build/json-rpc-endpoints.mdx
deleted file mode 100644
index aac41dc5..00000000
--- a/pages/docs/tangle-network/build/json-rpc-endpoints.mdx
+++ /dev/null
@@ -1,51 +0,0 @@
-# Substrate and Custom JSON-RPC Methods
-
-RPCs are exposed as a method on a specific module. This signifies that once available, you can invoke any RPC via `api.rpc..(...params[])`. This is also applicable for accessing Ethereum RPCs using the Polkadot.js API, in the format of `polkadotApi.rpc.eth.*`.
-
-Certain methods accessible via the Polkadot.js API interface are also available as JSON-RPC endpoints on Tangle Network nodes. This section offers some examples; you can request a list of exposed RPC endpoints by invoking `api.rpc.rpc.methods()` or the `rpc_methods` endpoint indicated below.
-
-## Supported Ethereum methods
-
-- **[eth_protocolVersion](https://eth.wiki/json-rpc/API#eth_protocolversion)** — returns `1` by default
-- **[eth_syncing](https://eth.wiki/json-rpc/API#eth_syncing)** — returns an object with data about the sync status or `false`
-- **[eth_hashrate](https://eth.wiki/json-rpc/API#eth_hashrate)** — returns `"0x0"` by default
-- **[eth_coinbase](https://eth.wiki/json-rpc/API#eth_coinbase)** — returns the latest block author. Not necessarily a finalized block
-- **[eth_mining](https://eth.wiki/json-rpc/API#eth_mining)** — returns `false` by default
-- **[eth_chainId](https://eth.wiki/json-rpc/API#eth_chainid)** — returns the chain ID used for signing at the current block
-- **[eth_gasPrice](https://eth.wiki/json-rpc/API#eth_gasprice)** — returns the base fee per unit of gas used. This is currently the minimum gas price for each network
-- **[eth_accounts](https://eth.wiki/json-rpc/API#eth_accounts)** — returns a list of addresses owned by the client
-- **[eth_blockNumber](https://eth.wiki/json-rpc/API#eth_blocknumber)** — returns the highest available block number
-- **[eth_getBalance](https://eth.wiki/json-rpc/API#eth_getbalance)** — returns the balance of the given address
-- **[eth_getStorageAt](https://eth.wiki/json-rpc/API#eth_getstorageat)** — returns content of the storage at a given address
-- **[eth_getBlockByHash](https://eth.wiki/json-rpc/API#eth_getblockbyhash)** — returns information about the block of the given hash including `baseFeePerGas` on post-London blocks
-- **[eth_getBlockByNumber](https://eth.wiki/json-rpc/API#eth_getblockbynumber)** — returns information about the block specified by block number including `baseFeePerGas` on post-London blocks
-- **[eth_getTransactionCount](https://eth.wiki/json-rpc/API#eth_gettransactioncount)** — returns the number of transactions sent from the given address (nonce)
-- **[eth_getBlockTransactionCountByHash](https://eth.wiki/json-rpc/API#eth_getblocktransactioncountbyhash)** — returns the number of transactions in a block with a given block hash
-- **[eth_getBlockTransactionCountByNumber](https://eth.wiki/json-rpc/API#eth_getblocktransactioncountbynumber)** — returns the number of transactions in a block with a given block number
-- **[eth_getUncleCountByBlockHash](https://eth.wiki/json-rpc/API#eth_getunclecountbyblockhash)** — returns `"0x0"` by default
-- **[eth_getUncleCountByBlockNumber](https://eth.wiki/json-rpc/API#eth_getunclecountbyblocknumber)** — returns `"0x0"` by default
-- **[eth_getCode](https://eth.wiki/json-rpc/API#eth_getcode)** — returns the code at given address at given block number
-- **[eth_sendTransaction](https://eth.wiki/json-rpc/API#eth_sendtransaction)** — creates new message call transaction or a contract creation, if the data field contains code. Returns the transaction hash, or the zero hash if the transaction is not yet available
-- **[eth_sendRawTransaction](https://eth.wiki/json-rpc/API#eth_sendrawtransaction)** — creates new message call transaction or a contract creation for signed transactions. Returns the transaction hash, or the zero hash if the transaction is not yet available
-- **[eth_call](https://eth.wiki/json-rpc/API#eth_call)** — executes a new message call immediately without creating a transaction on the block chain, returning the value of the executed call
-- **[eth_estimateGas](https://eth.wiki/json-rpc/API#eth_estimategas)** — returns an estimate amount of how much gas is necessary for a given transaction to succeed. You can optionally specify a `gasPrice` or `maxFeePerGas` and `maxPriorityFeePerGas`
-- **[eth_feeHistory](https://docs.alchemy.com/alchemy/apis/ethereum/eth-feehistory)** — returns `baseFeePerGas`, `gasUsedRatio`, `oldestBlock`, and `reward` for a specified range of up to 1024 blocks
-- **[eth_getTransactionByHash](https://eth.wiki/json-rpc/API#eth_gettransactionbyhash)** — returns the information about a transaction with a given hash. EIP-1559 transactions have `maxPriorityFeePerGas` and `maxFeePerGas` fields
-- **[eth_getTransactionByBlockHashAndIndex](https://eth.wiki/json-rpc/API#eth_gettransactionbyblockhashandindex)** — returns information about a transaction at a given block hash, and a given index position. EIP-1559 transactions have `maxPriorityFeePerGas` and `maxFeePerGas` fields
-- **[eth_getTransactionByBlockNumberAndIndex](https://eth.wiki/json-rpc/API#eth_gettransactionbyblocknumberandindex)** — returns information about a transaction at a given block number, and a given index position. EIP-1559 transactions have `maxPriorityFeePerGas` and `maxFeePerGas` fields
-- **[eth_getTransactionReceipt](https://eth.wiki/json-rpc/API#eth_gettransactionreceipt)** — returns the transaction receipt of a given transaction hash. After London support was added in runtime 1200, a new field named `effectiveGasPrice` has been added to the receipt, specifying the gas price of the transaction
-- **[eth_getUncleByBlockHashAndIndex](https://eth.wiki/json-rpc/API#eth_getunclebyblockhashandindex)** — returns `null` by default
-- **[eth_getUncleByBlockNumberAndIndex](https://eth.wiki/json-rpc/API#eth_getunclebyblocknumberandindex)** — returns `null` by default
-- **[eth_getLogs](https://eth.wiki/json-rpc/API#eth_getlogs)** — returns an array of all logs matching a given filter object
-- **[eth_getWork](https://eth.wiki/json-rpc/API#eth_getwork)** — returns `["0x0","0x0","0x0"]` by default
-
-### Unsupported Ethereum Methods
-
-- **[eth_submitWork](https://eth.wiki/json-rpc/API#eth_submitwork)** —not supported.
-- **[eth_submitHashRate](https://eth.wiki/json-rpc/API#eth_submithashrate)** - not supported.
-
-More information will be added to this page.
-
-### Polkadot.js API Utility Functions
-
-The Polkadot.js API also incorporates numerous utility libraries for computing frequently used cryptographic primitives and hash functions. You can view the full list at https://www.npmjs.com/package/@polkadot/util-crypto/v/0.32.19.
diff --git a/pages/docs/tangle-network/build/network-information-configuration.mdx b/pages/docs/tangle-network/build/network-information-configuration.mdx
deleted file mode 100644
index af0f6c4a..00000000
--- a/pages/docs/tangle-network/build/network-information-configuration.mdx
+++ /dev/null
@@ -1,100 +0,0 @@
----
-title: Tangle Network - Getting Started Guide for Developers
----
-
-import { UseCasesArea, ParticipateArea, TestNetworkArea } from "../../../../components/UseCasesTangle";
-import FullWebbCTA from "../../../../components/FullWebbCTA";
-
-# Getting Started with Tangle Network
-
-## Networks in the Tangle Ecosystem
-
-Before you dive into development, it's crucial to familiarize yourself with the different networks within the Tangle ecosystem.
-
-| Network | Network Type | Native Asset Symbol | Native Asset Decimals |
-| -------------- | ------------ | ------------------- | --------------------- |
-| Tangle Network | MainNet | TNT | 18 |
-| Tangle Testnet | TestNet | tTNT | 18 |
-
-## Network Configurations
-
-Developer tools may require specific configurations for the Tangle Network. Use the following details:
-
-| Variable | Value |
-| --------------- | ---------------------------------- |
-| Chain ID | 4006 |
-| Public RPC URLs | `https://testnet-rpc.tangle.tools` |
-| Public WSS URLs | `wss://testnet-rpc.tangle.tools` |
-
-## Types for Chain Integration
-
-To find the latest types for Tangle Network, [visit the lookup.tsx file in our Github repo.](https://github.com/webb-tools/tangle/blob/main/types/src/interfaces/lookup.ts) It automatically produces a full file of the types for the network on changes.
-
-## Block Explorers
-
-We will offer two types of block explorers to accommodate the Ethereum API and Substrate API. Transactions based on the Ethereum Virtual Machine (EVM) can be accessed through the Ethereum API. Conversely, the Substrate API is your go-to for native Substrate operations such as governance and staking. While the Substrate API does provide some details about EVM-based transactions, the information it presents is somewhat limited.
-
-| Block Explorer | Type | URL |
-| -------------- | --------- | ---------------------------------------------------------------------------------- |
-| BlockScout | EVM | [link](https://https://explorer.tangle.tools/) |
-| PolkadotJS | Substrate | [link](https://polkadot.js.org/apps/?rpc=wss://testnet-rpc.tangle.tools#/explorer) |
-
-## Interfaces
-
-### Polkadot Apps Interface
-
-
-
-## Funding Your Accounts
-
-Before starting development on our TestNets, fund your account with tTNT tokens, strictly intended for testing purposes and devoid of real-world value. You can obtain this at [Webb Faucet](https://faucet.webb.tools/.)
-
-| Network | Source For Tokens |
-| -------------- | -------------------------- |
-| Tangle Testnet | https://faucet.webb.tools/ |
-| Tangle Mainnet | To be announced |
-
-## Development Tools
-
-As an Ethereum-compatible, Substrate-based chain, Tangle Network allows for the use of both Substrate and Ethereum tools.
-
-| Network | Tool | Type |
-| --------- | -------------------------------------------------------------------------------- | --------------- |
-| Substrate | [Substrate Python Inteface](https://github.com/polkascan/py-substrate-interface) | Library |
-| Substrate | [Polkadot JS API](https://polkadot.js.org/docs/api/) | Library |
-| EVM | [Web3JS](https://web3js.readthedocs.io/) | Library |
-| EVM | [EthersJS](https://docs.ethers.io/) | Library |
-| EVM | [Hardhat](https://hardhat.org/) | Dev Environment |
-
-## Participating in the Tangle Network
-
-Joining the Tangle Network community is easy. You can run a node, develop a Dapp, or contribute to our open-source codebase. For more information, visit our [Node Operation Guide](/docs/ecosystem-roles/role-overview/), and [Contribution Guide](/docs/contribute).
-
-## Looking Ahead: Mainnet Launches
-
-Our official Mainnet launch date is forthcoming, so stay tuned! Post-launch, we'll continue to improve the network, working with our community to ensure that Tangle Network remains secure, robust, and user-friendly.
-
-### Recommended Testnet Actions
-
-Users can participate by engaging in the following activities, you can share your contributions to [our Discord](https://webb.tools/community) in the `#contributions` channel.
-
-| Task | Instructions |
-| ---------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Creative Contribution | - |
-| Use Case Concepts | User Submission. Write an article discussing a use case for the technologies Tangle uses! |
-| Publish a video or tutorial on Tangle/Webb | Publish a high-quality video discussing the network or its privacy technologies. |
-| Author a Protocol Extension | Create and submit an extension to the protocols used in Tangle. See our GitHub |
-| Tweet a Semaphore identity w/ Webb | Share your [Semaphore identity](https://semaphore.appliedzkp.org/docs/guides/identities) and tag [@webbprotocol](https://twitter.com/webbprotocol) |
-| DKG Authority (get voted in) | See our DKG documentation |
-| Tweet about Webb | Tag [@webbprotocol](https://twitter.com/webbprotocol) in a high quality tweet about the network. |
-| Deposit → Transfer → Withdraw | Perform a transaction flow on the network or the Hubble Bridge. |
-| Tweet a hash of your address + a secret number | Make sure to tag @Webbprotocol. |
-| Vote in Governance | See the Polkadot Governance Docs for [how to participate in Substrate governance.](https://wiki.polkadot.network/docs/maintain-guides-democracy) |
-| Propose and Pass in Governance | See the Polkadot Governance Docs for [how to participate in Substrate governance.](https://wiki.polkadot.network/docs/maintain-guides-democracy) |
-| Tweet an ETH address or ENS domain | Tag [@webbprotocol](https://twitter.com/webbprotocol) in a high quality tweet about the network. |
-| Bug Bounty with PR Fix Submission | See our Docs on Contributing a Bug Bounty, and provide a fix for the bug via Github. |
-| Run a validator | See our Docs on running a Validator Node |
-| Scripts to make Transactions | Engineer and publish transaction scripts for the network |
-| Bug Report | See the @Bug Reporting Guidance |
-
-Stay updated by following our [Blog](https://blog.webb.tools /) and joining our [Community](https://webb.tools/community).
diff --git a/pages/docs/tangle-network/build/pallets/_meta.json b/pages/docs/tangle-network/build/pallets/_meta.json
deleted file mode 100644
index 23cce300..00000000
--- a/pages/docs/tangle-network/build/pallets/_meta.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "pallet-overview": "Overview",
- "democracy": "Democracy Pallet",
- "collectives": "Collectives Pallet",
- "identity": "Identity Pallet",
- "crosschain-pallets": "Cross-chain Pallets",
- "balances-and-accounts": "Balances Pallet",
- "consensus-mechanism": "nPoS and the Staking Pallet",
- "frontier": "Frontier (EVM) Pallets",
- "scheduler": "Scheduler Pallet",
- "interoperability": "Interoperability"
-}
diff --git a/pages/docs/tangle-network/build/pallets/balances-and-accounts.mdx b/pages/docs/tangle-network/build/pallets/balances-and-accounts.mdx
deleted file mode 100644
index ae3956a1..00000000
--- a/pages/docs/tangle-network/build/pallets/balances-and-accounts.mdx
+++ /dev/null
@@ -1,40 +0,0 @@
----
-title: "Guide to the Balances Pallet in Substrate Chains"
----
-
-# Guide to the Balances Pallet in Substrate Chains
-
-The Balances pallet is a core component of many Substrate-based blockchains, managing the token balances within the system. This guide will dive deeper into the core functionalities of the Balances pallet and how it can be utilized in a Substrate-based blockchain.
-
-## Overview
-
-The Balances pallet handles the creation and management of fungible assets, primarily the native tokens of a blockchain. It allows for token transfers, reserves tokens for specific purposes, and enforces rules related to the minimum balance.
-
-## Key Concepts
-
-### Accounts
-
-Each entity on the blockchain is represented by an account, which has a unique identifier in the form of a public key. In Substrate's unified account system, each account can hold multiple types of assets and interact with the network in various roles.
-
-### Balances
-
-Tokens within an account are categorized into two types: free and reserved. Free balance is what can be spent by the account holder, while reserved balance is tied to a specific purpose and cannot be spent in the usual manner. There is also a concept of a locked balance, which is owned but temporarily unusable, common in staking scenarios.
-
-### Existential Deposit
-
-This is a concept unique to Substrate, where a minimum balance is set to prevent the creation of dust accounts. Any account whose free balance falls below this value will be removed.
-
-## Transfers
-
-Transfers are a fundamental operation in any blockchain, and Substrate provides several means to transfer tokens. This includes standard transfers, keeping alive transfers that prevent the sender's account from being deleted, and best effort transfers that reduce the transferred amount if necessary to meet the keeping alive guarantee.
-
-## Pallet API and Usage
-
-The Balances pallet provides a versatile API that allows interaction from within other runtime modules. Some common functions include:
-
-- `transfer`: Transfers some free balance from one account to another.
-- `reserve`: Moves some free balance to reserved balance.
-- `unreserve`: Unreserve some funds, returning any amount that was unable to be unreserved.
-- `slash`: Deducts up to a value from the combined balance of `who`, preferring to deduct from the free balance. This function cannot fail.
-
-Please refer to the [official API documentation](https://substrate.dev/rustdocs/latest/pallet_balances/index.html) to learn more about the API provided by the Balances
diff --git a/pages/docs/tangle-network/build/pallets/collectives.mdx b/pages/docs/tangle-network/build/pallets/collectives.mdx
deleted file mode 100644
index e4ae91b3..00000000
--- a/pages/docs/tangle-network/build/pallets/collectives.mdx
+++ /dev/null
@@ -1,74 +0,0 @@
----
-title: Tangle Network Collectives Pallet Documentation
-description: Documentation on the Collectives pallet, a fundamental component of the Tangle Network. Learn how to create, manage, and operate decentralized organizations.
----
-
-# Collectives Pallet in Tangle Network
-
-The Collectives Pallet is a key module in the Tangle Network that provides the functionality to create and manage decentralized organizations or collectives. It allows for collective decision making by a set of members where proposals can be made, voted upon, and enacted.
-
-## Overview of the Collectives Pallet
-
-The Collectives Pallet operates on the concept of a "collective". A collective is a group of members that collectively make decisions. Each collective has a predefined set of members, and decisions within the collective are made based on the voting of these members. The collective manages proposals, where each proposal consists of a specific action to be executed when approved.
-
-## Use Cases
-
-The Collectives Pallet enables a variety of applications in the realm of decentralized governance. It can be used to form decentralized autonomous organizations (DAOs), to establish a multisig wallet, to manage treasury funds, and more.
-
-The use of the Collectives Pallet ensures a democratic, decentralized decision-making process, which is a key principle of blockchain systems.
-
-Remember, the Collectives Pallet is a significant part of the Tangle Network that enables the formation and operation of decentralized collectives. From proposal creation to voting and decision making, it fosters a collaborative approach in managing and governing blockchain applications.
-
-## Creating a Collective
-
-A new collective can be created by calling the `create_collective` function:
-
-```rust
-let members = vec![alice, bob, charlie];
-let collective = Collectives::create_collective(members, vote_threshold);
-```
-
-In this case, `member`s is a list of account IDs that will be the members of the collective, and `vote_threshold` is the minimum number of member votes needed for a proposal to be approved.
-
-## Making a Proposal
-
-A member of a collective can make a proposal by calling the `propose` function:
-
-```rust
-let proposal = Call::some_module::some_function(args).into();
-let result = Collectives::propose(sender, members_needed, proposal);
-```
-
-Here, `sender` is the account ID of the member making the proposal, `members_needed` is the minimum number of votes needed for the proposal to be approved, and `proposal` is the proposed action to be executed when the proposal is approved.
-
-## Voting on a Proposal
-
-Members of a collective can vote on a proposal by calling the `vote` function:
-
-```rust
-let result = Collectives::vote(sender, proposal_index, approve);
-```
-
-Here, `sender` is the account ID of the member voting, `proposal_index` is the index of the proposal to vote on, and `approve` is a boolean indicating whether the member approves or disapproves the proposal.
-
-## Closing a Proposal
-
-A `proposal` can be closed, meaning it can be finalized and enacted (if approved), by calling the `close` function:
-
-```rust
-let result = Collectives::close(sender, proposal_index);
-```
-
-Here, `sender` is the account ID of the member closing the proposal, and `proposal_index` is the index of the proposal to close.
-
-If the proposal received enough approvals (based on `vote_threshold` during the creation of the collective and `members_needed` during the proposal), then the proposed action will be dispatched for execution.
-
-## Removing a Member
-
-A member can be removed from the collective by calling the `remove_member` function:
-
-```rust
-let result = Collectives::remove_member(sender, removee);
-```
-
-Here, `sender` is the account ID of the member making the request, and `removee` is the account ID of the member to be removed.
diff --git a/pages/docs/tangle-network/build/pallets/consensus-mechanism.mdx b/pages/docs/tangle-network/build/pallets/consensus-mechanism.mdx
deleted file mode 100644
index 7b45948d..00000000
--- a/pages/docs/tangle-network/build/pallets/consensus-mechanism.mdx
+++ /dev/null
@@ -1,41 +0,0 @@
----
-title: "Staking Pallet and Nominated Proof of Stake (nPoS) Consensus"
----
-
-# Guide to the Nominated Proof of Stake (nPoS) Consensus
-
-Tangle Network and many Substrate-based chains use a consensus mechanism called Nominated Proof of Stake (nPoS). This guide will dive into the specifics of nPoS, explaining the key concepts and how they are implemented in Substrate.
-
-## Overview
-
-Nominated Proof of Stake (nPoS) is a variant of Proof of Stake (PoS) consensus that introduces the concept of nominators. Validators are responsible for block production, while nominators back one or more validators with their own stake. This process increases the security of the network.
-
-nPoS is designed to be highly inclusive, maximizing the number of validators and reducing the risk of centralization. The mechanism also seeks to ensure that validators are competent and behave appropriately.
-
-## Key Concepts
-
-### Validators and Nominators
-
-In nPoS, validators are nodes responsible for maintaining the network and producing new blocks. They are elected based on the total stake backing them, including their own and their nominators' stake.
-
-Nominators, on the other hand, are stakeholders who nominate one or more validators to be elected. They lock up their own tokens in favor of these validators and share in the rewards (and risks) of backing them.
-
-### Staking and Slashing
-
-Staking is the process of locking up tokens in a contract to participate in the network as a validator or nominator. The more tokens staked by a validator (their own plus nominations), the higher the chances they have to be selected for block production.
-
-However, the nPoS consensus includes a mechanism called slashing, where validators can lose (i.e., get "slashed") a portion of their stake for misbehavior, such as being offline, failing to validate correctly, or acting maliciously. Nominators who back a slashed validator can also be slashed.
-
-## The Staking Pallet
-
-The mechanics of nPoS are primarily implemented in Substrate's Staking pallet. This module manages the staking, nominating, rewarding, and slashing processes.
-
-Key functions of the Staking pallet include:
-
-- `bond`: To participate in the network, tokens must be bonded, i.e., locked in the staking system.
-- `unbond`: Tokens can be unbonded, i.e., scheduled for release from the staking system.
-- `validate`: A bonded account can declare its intention to be a validator.
-- `nominate`: A bonded account can nominate one or more validators to be elected.
-- `payout_stakers`: Pay the last reward to the stakers of a validator.
-
-Please refer to the [official API documentation](https://substrate.dev/rustdocs/latest/pallet_staking/index.html) to learn more about the API provided by the Staking pallet.
diff --git a/pages/docs/tangle-network/build/pallets/crosschain-pallets.mdx b/pages/docs/tangle-network/build/pallets/crosschain-pallets.mdx
deleted file mode 100644
index fa339d0f..00000000
--- a/pages/docs/tangle-network/build/pallets/crosschain-pallets.mdx
+++ /dev/null
@@ -1,96 +0,0 @@
-# DKG Governance
-
-The DKG governance is split between different Substrate pallets that each handle a different aspect of the system.
-
-## Pallets
-
-The DKG runtime is uses the following pallets which are central to how the protocol functions.
-
-### pallet-dkg-metadata
-
-The metadata pallet tracks information about the DKG state. This includes the active and next authority
-sets and their authority set IDs, the active and next DKG public keys, thresholds, historical refreshes,
-and more. It’s main purpose is to provide on-chain information about the DKG and the next DKG for clients
-who are participating in the protocol.
-
-The pallet houses a few sub-protocols, namely:
-
-- The refresh protocol
-- The misbehaviour and reputation protocol
-
-One main importance of this pallet is to deterministically identify the best authorities for the next
-DKG’s authority set in order to signal participation to clients in the membership set. This ensures that
-all offchain clients see the same state of the world.
-
-- [Docs](https://webb-tools.github.io/dkg-substrate/pallet_dkg_metadata/index.html)
-- [Source](https://github.com/webb-tools/dkg-substrate/tree/master/pallets/dkg-metadata)
-
-### pallet-dkg-proposals
-
-This pallet maintains the valid proposers and the first layer of the governance system: voting on proposals
-to be signed by the DKG. The valid proposers is superset of the current DKG authorities. Active DKG
-authorities are continuously rotated into the proposer set.
-
-This pallet maintains a queue for pending proposals which the DKG authorities vote on and if the vote threshold
-is met, the proposal is passed on to be handled by a type that implements the `ProposalHandlerTrait`. The
-proposals meant to be processed by this pallet are primarily `AnchorUpdateProposals` but can be extended to
-support any type of proposal that is meant to be submitted by the valid proposers.
-
-- [Docs](https://webb-tools.github.io/dkg-substrate/pallet_dkg_proposals/index.html)
-- [Source](https://github.com/webb-tools/dkg-substrate/tree/master/pallets/dkg-proposals)
-
-### pallet-dkg-proposal-handler
-
-This pallet implements the `ProposalHandlerTrait` and accepts proposals through this handler system. In the
-current incarnation, the **pallet-dkg-proposals** passes successfully passed unsigned proposals to this pallet
-for queuing for eventual signing by the DKG. All unsigned proposals handled here are added to a queue which each
-DKG client continues to poll from using a runtime API.
-
-Off-chain, unsigned proposals move through the DKG’s threshold signature protocol and eventually, if successful,
-get re-submitted on-chain as signed proposals. The unsigned proposal records are removed and the signed proposals
-are stored in the pallet’s storage for inspection by any observing system, such as an oracle or relayer network.
-
-This pallet represents the second stage in the governance protocol. That is, after the first layer of the governance
-system decides on which proposals to sign, this pallet helps expose those proposals and enable submission of them
-after successful threshold-signing.
-
-- [Docs](https://webb-tools.github.io/dkg-substrate/pallet_dkg_proposal_handler/index.html)
-- [Source](https://github.com/webb-tools/dkg-substrate/tree/master/pallets/dkg-proposal-handler)
-
-## Client
-
-The DKG client (or gadget) is the main service that interfaces with the pallet system and overall governance protocol.
-It is responsible for listening to the chain and participating (if selected) in the DKG protocol.
-
-### dkg-gadget
-
-The DKG gadget is an offchain service that executes the DKG protocols and stores data in off-chain storage for the
-on-chain system to fetch and post back on-chain. It also listens to changes in the proposal handler and metadata
-pallets in order to properly:
-
-- Rotate keys
-- Sign unsigned proposals
-- Set and clear offchain storage
-- Report misbehaviours.
-
-We are always executing a DKG signing protocol for the current authority set **and the DKG key generation protocol
-for next authority set if none has completed**.
-
-- [Docs](https://webb-tools.github.io/dkg-substrate/dkg_gadget/index.html)
-- [Source](https://github.com/webb-tools/dkg-substrate/tree/master/dkg-gadget)
-
-### **Note on Offchain workers**
-
-The DKG makes use of offchain workers to store data ready for on-chain submission.
-
-If running a live chain as a validator, please add your sr25519 account keys to the node's local keystore
-either by using the `author_insertKey` RPC or using the `key` subcommand
-
-```
-dkg-standalone-node key insert --key-type acco --scheme sr25519 --suri
-```
-
-> Key Type is `acco` Scheme is sr25519
-
-**Note** For the standalone node the account being added to the keystore should be the Stash account used in staking
-not the Controller account
diff --git a/pages/docs/tangle-network/build/pallets/democracy.mdx b/pages/docs/tangle-network/build/pallets/democracy.mdx
deleted file mode 100644
index 3e39f494..00000000
--- a/pages/docs/tangle-network/build/pallets/democracy.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Introduction to the Democracy Pallet
-
-The Democracy Pallet is one of the fundamental components in the governance of many Substrate-based chains, such as the Tangle Network. This pallet introduces a mechanism for community members to propose and vote on potential changes to the runtime code. It offers a democratic system in which stakeholders of the network can directly influence its direction.
-
-At a high level, the Democracy Pallet provides an interface for the following actions:
-
-1. **Proposing Referenda**: Any token holder can propose a change to the runtime. A fixed deposit is required to propose a referendum, which prevents spam proposals. If the proposal garners sufficient support during a public voting period, it becomes a referendum.
-
-2. **Voting on Referenda**: Once a proposal becomes a referendum, token holders can vote for or against the referendum. The Democracy Pallet features adaptive quorum biasing, which adjusts the passing threshold based on the turnout. This means that a super-majority is required for proposals with low turnout, and as turnout increases, a simple majority is sufficient for the proposal to pass.
-
-3. **Delegating Votes**: If a token holder does not have the knowledge or time to vote on referenda, they can delegate their voting power to a knowledgeable party. Delegation can be removed at any time.
-
-4. **Proxy Voting**: This feature allows a third party to vote on behalf of the original token holder, promoting participation from individuals who may not have direct access to vote themselves.
-
-5. **Emergency Proposals (Fast Track)**: In critical situations, council members can fast track a proposal to skip the public support phase and turn it directly into a referendum.
-
-6. **Veto (by council)**: The council has the ability to veto a proposed referendum. This power can only be used sparingly due to the "cool-off" period after each veto, during which the council cannot veto the same proposal.
-
-Technically, the Democracy Pallet achieves its functionality by integrating with other Substrate Pallets. For instance, it uses the Balances Pallet for handling proposal deposits, the Collective Pallet for council operations, and the Scheduler Pallet for enacting approved proposals after a predefined enactment delay.
-
-In essence, the Democracy Pallet facilitates on-chain governance in a democratic and transparent manner, allowing stakeholders to propose, discuss, and implement changes to the network, and providing a robust and responsive governance system.
diff --git a/pages/docs/tangle-network/build/pallets/frontier.mdx b/pages/docs/tangle-network/build/pallets/frontier.mdx
deleted file mode 100644
index 6d5ca106..00000000
--- a/pages/docs/tangle-network/build/pallets/frontier.mdx
+++ /dev/null
@@ -1,27 +0,0 @@
----
-title: "Guide to the Frontier EVM Compatibility Pallet"
----
-
-# Guide to the Frontier EVM Compatibility Pallet
-
-The Substrate ecosystem is designed to be highly flexible and customizable, accommodating a wide variety of blockchain models. One of its key features is its capacity for Ethereum compatibility through the Frontier project. This guide explores the Frontier pallet and its role in providing Ethereum Virtual Machine (EVM) compatibility in Substrate chains.
-
-## Overview
-
-Frontier is a set of modules (also known as pallets in Substrate terminology) that allows Substrate-based blockchains to host an EVM environment, providing compatibility with Ethereum. This means that Substrate chains using Frontier can run Ethereum smart contracts and can be interacted with using familiar Ethereum tools, such as MetaMask and Truffle.
-
-The Frontier project is community-driven and is constantly evolving to track changes and improvements in both the Substrate and Ethereum ecosystems.
-
-## Key Concepts
-
-### Ethereum Compatibility
-
-Frontier facilitates Ethereum compatibility by hosting an EVM within the Substrate environment. This allows developers to write smart contracts in Solidity (or any other Ethereum-compatible language) and deploy them on a Substrate-based chain. Developers can also interact with these smart contracts using Ethereum's JSON-RPC API, making it easier for developers with Ethereum experience to work with Substrate.
-
-### RPC Layer
-
-The RPC layer provided by the Frontier project enables Substrate nodes to speak Ethereum's JSON-RPC. This includes support for methods such as `eth_sendTransaction`, `eth_call`, and `eth_getBalance`, among others. As a result, tools that interact with Ethereum nodes can be used with a Substrate node running the Frontier pallets.
-
-### EVM Pallet
-
-The EVM pallet is the core component that provides EVM compatibility. It allows the execution of Ethereum smart contracts and supports Ethereum's transaction model. It also includes an Ethereum-to-Substrate mapping of accounts and gas price management.
diff --git a/pages/docs/tangle-network/build/pallets/identity.mdx b/pages/docs/tangle-network/build/pallets/identity.mdx
deleted file mode 100644
index de3474db..00000000
--- a/pages/docs/tangle-network/build/pallets/identity.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-# Introduction to the Identity Pallet
-
-The Identity Pallet is a key component in Substrate-based blockchains, fostering the concept of on-chain identity in an innovative way. It is devised to provide a standardized method for users to handle and authenticate identities on the blockchain.
-
-Here's a glimpse at the core capabilities the Identity Pallet provides:
-
-1. **Identity Registration**: Users have the ability to register an on-chain identity, composed of multiple fields representing various facets of their identity such as legal name, email address, website, Twitter handle, and more.
-
-2. **Identity Verification**: Other network participants can function as attestors to verify individual fields of an identity. This verification process enhances the overall trustworthiness and credibility of an identity on the network.
-
-3. **Identity Management**: Users have complete control over managing their registered identities. This includes the ability to add, update, or delete fields from their identity.
-
-4. **Sub-identities (Subs)**: Users can establish sub-identities under their primary identity, each with its own unique set of attributes. This proves advantageous when users want to perform distinct roles within the network (e.g., acting as a validator or a nominator).
-
-5. **Identity Slashing**: Any false claims or attempts to manipulate the system can lead to the slashing of the identity deposit.
-
-From a technical perspective, the Identity Pallet revolves around two main constituents: the identity itself and the registrars. The identity encompasses all the information that delineates an individual or an organization. Registrars, contrastingly, are trusted entities appointed by the network's governance to authenticate the information related to an identity.
-
-In a nutshell, the Identity Pallet offers a pathway for users to create and manage verifiable on-chain identities, imparting an element of trust and security to Substrate-based networks.
diff --git a/pages/docs/tangle-network/build/pallets/interoperability.mdx b/pages/docs/tangle-network/build/pallets/interoperability.mdx
deleted file mode 100644
index 50e18dd2..00000000
--- a/pages/docs/tangle-network/build/pallets/interoperability.mdx
+++ /dev/null
@@ -1,4 +0,0 @@
-# Interoperability
-
-Tangle Network is designed to connect to other blockchains through bridges, and is specially designed to govern and host connections to Webb's bespoke Hubble Bridge, a private and interoperable transfer bridge.
-See Hubble at https://app.webb.tools or the [Docs for Hubble Bridge](/docs/projects/hubble-bridge/overview)
diff --git a/pages/docs/tangle-network/build/pallets/pallet-overview.mdx b/pages/docs/tangle-network/build/pallets/pallet-overview.mdx
deleted file mode 100644
index d6bff406..00000000
--- a/pages/docs/tangle-network/build/pallets/pallet-overview.mdx
+++ /dev/null
@@ -1,13 +0,0 @@
-# Introduction to Pallets
-
-In the Substrate ecosystem, a pallet refers to a distinct, reusable piece of code which contributes a specific piece of functionality to a runtime. Think of pallets as modules that are utilized to construct Substrate-based blockchains. The Tangle Network, like any Substrate-based chain, employs a variety of these pallets to accomplish the network's overall functionalities.
-
-The repository of each pallet consists of a set of related functionalities, which collectively contribute to the overall operation of the Substrate runtime. From managing balances and transaction fees to handling governance and staking processes, pallets essentially serve as the backbone of the Substrate infrastructure.
-
-The flexibility and modularity of Substrate pallets contribute significantly to the customization and upgradeability of the Tangle Network. They provide for easy runtime upgrades without needing to fork the entire network, ensuring a seamless evolution of the network's capabilities.
-
-Amongst the Substrate standard pallets, Tangle Network also incorporates several custom pallets specifically tailored to meet its unique requirements. These custom pallets encapsulate Tangle Network's unique features and functions that extend beyond the conventional Substrate offerings.
-
-Just as with precompiles, these custom pallets can be interacted with through familiar interfaces, this time using the Substrate API. This ensures that developers can make the most out of the rich functionalities offered by the Tangle Network without having to navigate through the complex underlying logic of the pallets themselves.
-
-In essence, pallets form the building blocks of the Tangle Network, contributing to its robustness, customization, and scalability. The beauty of this modular architecture lies in its adaptability and expandability, allowing the Tangle Network to continuously grow and adapt to meet the changing needs of its community.
diff --git a/pages/docs/tangle-network/build/pallets/precompile-overview.mdx b/pages/docs/tangle-network/build/pallets/precompile-overview.mdx
deleted file mode 100644
index 130edfd3..00000000
--- a/pages/docs/tangle-network/build/pallets/precompile-overview.mdx
+++ /dev/null
@@ -1,55 +0,0 @@
-# Overview of Precompiles and Pallets
-
-## Introduction to Precompiles
-
-On Tangle Network, a precompiled contract refers to native Substrate code that possesses an Ethereum-like address and can be engaged through the Ethereum API, as with any other smart contract. These precompiles enable you to directly interact with the Substrate runtime, a functionality that is usually inaccessible from the Ethereum aspect of Tangle Network.
-
-The Substrate code that oversees the implementation of precompiles is located within the EVM pallet. This EVM pallet comprises the standard precompiles existing on Ethereum along with some other precompiles that aren't unique to Ethereum. It further offers the capacity to form and execute custom precompiles through the versatile Precompiles trait. A range of custom Tangle Network-specific precompiles have been developed and can be found within the Tangle Network codebase.
-
-The Ethereum precompiled contracts encompass complex functions that require substantial computational resources, including hashing and encryption. On Tangle Network, the custom precompiled contracts allow access to Substrate-based features such as staking, governance, XCM-related operations, and more.
-
-These Tangle Network-specific precompiles can be accessed through familiar and user-friendly Solidity interfaces utilizing the Ethereum API, which ultimately interact with the underlying Substrate interface.
-
-### Ethereum Mainnet Precompiles
-
-See the repositories for these at the [Parity Github.](https://github.com/paritytech/frontier/tree/master/frame/evm/precompile)
-
-| Contract | Address |
-| ---------------------- | ------------------------------------------ |
-| ECRECOVER | 0x0000000000000000000000000000000000000001 |
-| SHA256 | 0x0000000000000000000000000000000000000002 |
-| RIPEMD160 | 0x0000000000000000000000000000000000000003 |
-| Identity | 0x0000000000000000000000000000000000000004 |
-| Modular Exponentiation | 0x0000000000000000000000000000000000000005 |
-| BN128Add | 0x0000000000000000000000000000000000000006 |
-| BN128Mul | 0x0000000000000000000000000000000000000007 |
-| BN128Pairing | 0x0000000000000000000000000000000000000008 |
-| Blake2 | 0x0000000000000000000000000000000000000009 |
-| SHA3FIPS256 | 0x0000000000000000000000000000000000000400 |
-| Dispatch | 0x0000000000000000000000000000000000000401 |
-| ECRecoverPublicKey | 0x0000000000000000000000000000000000000402 |
-
-### General Precompiles
-
-| Contract | Address |
-| ------------------- | ------------------------------------------ |
-| Democracy | 0x0000000000000000000000000000000000000803 |
-| Batch | 0x0000000000000000000000000000000000000808 |
-| Call Permit | 0x000000000000000000000000000000000000080a |
-| Preimage | 0x0000000000000000000000000000000000000813 |
-| Precompile Registry | 0x0000000000000000000000000000000000000815 |
-| Pallet Staking | 0x0000000000000000000000000000000000000800 |
-
-## Introduction to Pallets
-
-In the Substrate ecosystem, a pallet refers to a distinct, reusable piece of code which contributes a specific piece of functionality to a runtime. Think of pallets as modules that are utilized to construct Substrate-based blockchains. The Tangle Network, like any Substrate-based chain, employs a variety of these pallets to accomplish the network's overall functionalities.
-
-The repository of each pallet consists of a set of related functionalities, which collectively contribute to the overall operation of the Substrate runtime. From managing balances and transaction fees to handling governance and staking processes, pallets essentially serve as the backbone of the Substrate infrastructure.
-
-The flexibility and modularity of Substrate pallets contribute significantly to the customization and upgradeability of the Tangle Network. They provide for easy runtime upgrades without needing to fork the entire network, ensuring a seamless evolution of the network's capabilities.
-
-Amongst the Substrate standard pallets, Tangle Network also incorporates several custom pallets specifically tailored to meet its unique requirements. These custom pallets encapsulate Tangle Network's unique features and functions that extend beyond the conventional Substrate offerings.
-
-Just as with precompiles, these custom pallets can be interacted with through familiar interfaces, this time using the Substrate API. This ensures that developers can make the most out of the rich functionalities offered by the Tangle Network without having to navigate through the complex underlying logic of the pallets themselves.
-
-In essence, pallets form the building blocks of the Tangle Network, contributing to its robustness, customization, and scalability. The beauty of this modular architecture lies in its adaptability and expandability, allowing the Tangle Network to continuously grow and adapt to meet the changing needs of its community.
diff --git a/pages/docs/tangle-network/build/pallets/scheduler.mdx b/pages/docs/tangle-network/build/pallets/scheduler.mdx
deleted file mode 100644
index 65f43c47..00000000
--- a/pages/docs/tangle-network/build/pallets/scheduler.mdx
+++ /dev/null
@@ -1,73 +0,0 @@
----
-title: Tangle Network Scheduler Pallet Documentation
-description: Documentation on the Scheduler pallet, one of the critical components of the Tangle Network. Understand how to create, manage, and execute scheduled tasks.
----
-
-# Scheduler Pallet in Tangle Network
-
-The Scheduler Pallet is a key component of the Tangle Network, responsible for managing tasks that need to be executed at specific points in time. This pallet allows you to schedule calls to be dispatched at a future block number, thus providing the network with the ability to execute deferred or recurring tasks.
-
-### Introduction to the Scheduler Pallet
-
-The Scheduler Pallet uses the concept of "agenda" to manage tasks. An agenda is simply a list of tasks that are scheduled for execution in a specific block. When a block is finalized, all tasks within the agenda of that block are executed. Each task is associated with a priority, and tasks within an agenda are executed in decreasing order of their priority.
-
-### Scheduling a Task
-
-You can schedule a task by calling the `schedule` function. This function requires the following parameters:
-
-- `when`: The block number when the task should be executed.
-- `priority`: The priority of the task. Tasks with higher priority are executed before tasks with lower priority within the same block.
-- `call`: The call to be dispatched when the task is executed.
-
-The `schedule` function returns a `TaskAddress` which can be used to cancel or reschedule the task.
-
-```rust
-let task_address = Scheduler::schedule(when, priority, call);
-```
-
-### Canceling a Task
-
-A scheduled task can be canceled before it's executed by calling the cancel function with the TaskAddress returned by the schedule function:
-
-```rust
-let result = Scheduler::cancel(task_address);
-```
-
-### Checking if a Task is Scheduled
-
-You can check if a specific task is scheduled by calling the is_scheduled function:
-
-```rust
-let is_scheduled = Scheduler::is_scheduled(task_address);
-```
-
-## Rescheduling a Task
-
-A scheduled task can be rescheduled to a different block by calling the reschedule function:
-
-```rust
-let result = Scheduler::reschedule(task_address, new_when);
-```
-
-### Execution of Tasks
-
-The execution of tasks is handled automatically by the Tangle Network. When a block is finalized, the Scheduler Pallet checks if there are any tasks in the agenda of that block. If there are, it executes them in the order of their priority. If a task fails, it's removed from the agenda and won't be retried in future blocks.
-
-### Example Use Case: Recurring Tasks
-
-The Scheduler Pallet can be used to create recurring tasks. For instance, if you want to update the exchange rate of a token every 10 blocks, you can schedule a task that updates the exchange rate and then schedules itself to be executed again after 10 blocks.
-
-```rust
-fn update_exchange_rate() {
- // Update the exchange rate...
-
- // Schedule this function to be called again after 10 blocks.
- let when = >::block_number() + 10.into();
- let priority = 0;
- let call = Call::update_exchange_rate().into();
-
- let _ = Scheduler::schedule(when, priority, call);
-}
-```
-
-The Scheduler Pallet in the Tangle Network is a powerful tool that allows the network to manage and execute tasks at specific points in time. From delayed execution to recurring tasks, it opens up a whole new range of possibilities for designing and implementing blockchain applications.
diff --git a/pages/docs/tangle-network/build/pallets/treasury.mdx b/pages/docs/tangle-network/build/pallets/treasury.mdx
deleted file mode 100644
index c93e177a..00000000
--- a/pages/docs/tangle-network/build/pallets/treasury.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
-# Introduction to the Treasury Pallet
-
-The Treasury Pallet is a crucial component in the Substrate framework, playing a central role in managing the common wealth of a blockchain community. It's a designated pot of funds that can be utilized to back proposals deemed beneficial to the network, thereby stimulating the overall growth and development of the ecosystem.
-
-Here are some of the key functions the Treasury Pallet encompasses:
-
-1. **Collecting Funds**: The Treasury gathers funds from various sources like transaction fees, slashing, staking inefficiencies, and any other activities that lead to the burning of tokens.
-
-2. **Funding Proposals**: Community members can propose initiatives for the betterment of the ecosystem. These could include development work, promotional activities, or other community projects. If the proposal wins the approval of the council, the treasury will fund the project.
-
-3. **Burn Mechanism**: To maintain a balance and ensure that the treasury doesn’t accumulate more funds than necessary, a fraction of the treasury's funds can be burnt in each spending period.
-
-4. **Treasury Tips**: The treasury system also provides tips for individuals who provide significant services to the community, such as identifying bugs in the software, or flagging malicious behavior.
-
-From a technical standpoint, the Treasury Pallet operates via a simple, yet efficient mechanism. Proposals are submitted along with a bond, which is forfeited if the proposal is rejected, and reimbursed if accepted. The proposal is then reviewed by the council and voted upon. If the proposal passes, the treasury will disburse the funds, and the project can commence.
-
-In essence, the Treasury Pallet is a resourceful tool fostering community-driven growth and enabling sustainable funding for value-adding initiatives within a Substrate-based blockchain network.
diff --git a/pages/docs/tangle-network/build/precompile-addresses.mdx b/pages/docs/tangle-network/build/precompile-addresses.mdx
deleted file mode 100644
index 131bb34c..00000000
--- a/pages/docs/tangle-network/build/precompile-addresses.mdx
+++ /dev/null
@@ -1,73 +0,0 @@
-# Precompiles on Tangle Network
-
-## Introduction to Precompiles
-
-On Tangle Network, a precompiled contract refers to native Substrate code that possesses an Ethereum-like address and can be engaged through the Ethereum API, as with any other smart contract. These precompiles enable you to directly interact with the Substrate runtime, a functionality that is usually inaccessible from the Ethereum aspect of Tangle Network.
-
-The Substrate code that oversees the implementation of precompiles is located within the EVM pallet. This EVM pallet comprises the standard precompiles existing on Ethereum along with some other precompiles that aren't unique to Ethereum. It further offers the capacity to form and execute custom precompiles through the versatile Precompiles trait. A range of custom Tangle Network-specific precompiles have been developed and can be found within the Tangle Network codebase.
-
-The Ethereum precompiled contracts encompass complex functions that require substantial computational resources, including hashing and encryption. On Tangle Network, the custom precompiled contracts allow access to Substrate-based features such as staking, governance, XCM-related operations, and more.
-
-These Tangle Network-specific precompiles can be accessed through familiar and user-friendly Solidity interfaces utilizing the Ethereum API, which ultimately interact with the underlying Substrate interface.
-
----
-
-# Precompile Addresses
-
-This page lists the existing precompiles used in Tangle Network, descriptions of their functions, and the contract addresses of the functionality.
-
-## Ethereum Precompiles
-
-| Precompile | Description | Address |
-| ---------------------- | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------- |
-| ECRECOVER | Recovers the public key associated with the given signature, a critical operation in verifying wallet signatures. | `0x0000000000000000000000000000000000000001` |
-| SHA256 | Computes the SHA256 cryptographic hash function, widely used for data integrity verification. | `0x0000000000000000000000000000000000000002` |
-| RIPEMD160 | Calculates the RIPEMD-160 hash, which is used in various security applications and protocols. | `0x0000000000000000000000000000000000000003` |
-| Identity | A simple data copy operation. | `0x0000000000000000000000000000000000000004` |
-| Modular Exponentiation | Performs modular exponentiation, a key operation in many cryptographic functions. | `0x0000000000000000000000000000000000000005` |
-| BN128Add | Performs point addition on a BN128 elliptic curve. | `0x0000000000000000000000000000000000000006` |
-| BN128Mul | Performs point multiplication on a BN128 elliptic curve. | `0x0000000000000000000000000000000000000007` |
-| BN128Pairing | Checks the pairing on a BN128 elliptic curve. | `0x0000000000000000000000000000000000000008` |
-| Blake2 | Computes the Blake2 cryptographic hash function. | `0x0000000000000000000000000000000000000009` |
-| SHA3FIPS256 | Computes the SHA3 (FIPS 202 compliant) hash function. | `0x0000000000000000000000000000000000000400` |
-| Dispatch | Handles dispatching and managing contract calls and interactions. | `0x0000000000000000000000000000000000000401` |
-| ECRecoverPublicKey | Recovers the public key from an elliptic curve signature. | `0x0000000000000000000000000000000000000402` |
-
-## EVM-to-Substrate Precompiles
-
-| Precompile | Description | Address |
-| ------------------- | --------------------------------------------------------------------------- | -------------------------------------------- |
-| DemocracyInterface | An interface for interacting with on-chain governance. | `0x0000000000000000000000000000000000000803` |
-| Batch | Allows for multiple calls to be made within a single transaction. | `0x0000000000000000000000000000000000000808` |
-| CallPermit | Facilitates authorized calls to certain functions. | `0x000000000000000000000000000000000000080a` |
-| Preimage | Used for managing preimages, which are proposals before they become public. | `0x0000000000000000000000000000000000000813` |
-| Precompile-Registry | Manages the registration of new precompiles. | `0x0000000000000000000000000000000000000815` |
-| Pallet-staking | Handles staking-related operations. | `0x0000000000000000000000000000000000000800` |
-
-## Future Precompiles
-
-The following are under consideration for inclusion, and are drawn from the open-source development done in the Substrate ecosystem.
-
-| Precompile | Description | Address |
-| ------------------- | ------------------------------------------------------------------------------------------------------ | ------- |
-| Collective | Performs democracy functions through any of the collectives on Moonbeam, such as the Treasury Council. | TBD |
-| Conviction Voting | Allows voting on referenda, setting up voting delegations, and more. | TBD |
-| Native Token ERC-20 | Provides an ERC-20 representation of the native token on Moonbeam. | TBD |
-| Proxy | Allows adding and removing proxy accounts from Substrate's Proxy Pallet. | TBD |
-| Randomness | Provides VRF randomness on Moonbeam. | TBD |
-| Referenda | Allows viewing and submitting proposals on-chain to be put forth for referenda. | TBD |
-| Staking Functions | Allows developers to access staking features using the Ethereum API in a precompiled contract. | TBD |
-| X-Tokens | Allows sending XC-20s to other chains using the X-Tokens Pallet. | TBD |
-| XCM Transactor | Allows performing remote XCM execution from Moonbeam to other chains in the ecosystem. | TBD |
-| XCM Utilities | Provides various XCM related utility functions to smart contact developers. | TBD |
-| DappsStaking | Functions related to managing dApp staking. | TBD |
-| Sr25519 | Handles operations related to the Sr25519 signature scheme. | TBD |
-| SubstrateEcdsa | Manages operations related to the Substrate Ecdsa. | TBD |
-| XCM | Handles cross-chain message (XCM) operations. | TBD |
-| XVM | Handles operations related to the XVM. | TBD |
-| assets-erc20 | Manages ERC20 asset operations. | TBD |
-
-## Further resources
-
-**EVM Precompiles Repo**
-See the repositories for these at the [Parity Github.](https://github.com/paritytech/frontier/tree/master/frame/evm/precompile)
diff --git a/pages/docs/tangle-network/governance/_meta.json b/pages/docs/tangle-network/governance/_meta.json
deleted file mode 100644
index 37887bcf..00000000
--- a/pages/docs/tangle-network/governance/_meta.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
- "overview": "Overview of On-chain Governance",
- "democracy-voting": "Voting in Democracy",
- "governance-interfaces": "Governance Interfaces",
- "governance-parameters": "On-chain Governance Parameters",
- "proposal-creation": "Create a Proposal",
- "how-to-vote-on-tangle": "How to Vote",
- "governance-procedures": "Other Procedures"
-}
diff --git a/pages/docs/tangle-network/governance/democracy-voting.mdx b/pages/docs/tangle-network/governance/democracy-voting.mdx
deleted file mode 100644
index 26851a89..00000000
--- a/pages/docs/tangle-network/governance/democracy-voting.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# Voting in Democracy Referenda
-
-Substrate-based blockchains often have built-in on-chain governance mechanisms, which include voting on referenda. Here's a step-by-step guide on how to vote in democracy referenda on a Substrate blockchain:
-
-Note: This guide assumes you have already set up a Substrate-based wallet and have some tokens in your account.
-
-1. **Access the Polkadot/Substrate User Interface (UI):**
- Visit the [Substrate UI](https://polkadot.js.org/apps/). This web interface is used to interact with the Tangle network and other Substrate chains, during our testnet phase you can use [Webb's alpha interface](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Ftangle-standalone-archive.webb.tools#/accounts)
-
-2. **Connect to the correct network:**
- Ensure you're connected to the Tangle Network, if not, at the top-left of the page, you will see a drop-down menu. Here you can select the Tangle network.
-
-3. **Access the Democracy module:**
- On the left sidebar, under the Governance tab, click on Democracy. This is the on-chain voting system where all the current referenda are listed.
-
-4. **Choose a Referendum:**
- You will see a list of active referenda each represented by a number. Click on a specific referendum to see more details.
-
-5. **Review the Referendum Details:**
- Each referendum has a description and specific details. Review these carefully to understand what you are voting for or against.
-
-6. **Cast Your Vote:**
- Once you've decided how to vote, click on the "Vote" button. You'll be asked to choose between 'Aye' (yes) and 'Nay' (no), and you'll have the option to adjust your vote's "conviction," which multiplies your vote's power at the cost of locking your tokens for a longer period.
-
-7. **Sign and Submit the Transaction:**
- After clicking the "Vote" button, you will need to sign the transaction using your account. Enter your password and click on "Sign and Submit". Your vote will be recorded on the blockchain once the transaction is included in a block.
-
-8. **Wait for the Voting Period to End:**
- Each referendum has a voting period. When this period ends, votes are tallied, and the decision is enacted based on the majority vote.
-
-Remember that **voting in a referendum will lock your tokens until the end of the enactment period (if the proposal passes) or until the end of the voting period (if the proposal does not pass).** The length of these periods can vary, refer to [our parameters.](/docs/tangle-network/governance/governance-parameters/)
diff --git a/pages/docs/tangle-network/governance/governance-interfaces.mdx b/pages/docs/tangle-network/governance/governance-interfaces.mdx
deleted file mode 100644
index 15762f85..00000000
--- a/pages/docs/tangle-network/governance/governance-interfaces.mdx
+++ /dev/null
@@ -1,6 +0,0 @@
-# Governance Interfaces
-
-At this time, two interfaces are planned that you can utilize to vote, discuss, propose and run for office:
-
-1. Polkadot JS ('Apps') (See Testnet Apps at https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Ftangle-standalone-archive.webb.tools#/)
-2. [Commonwealth](https://commonwealth.im/webb)
diff --git a/pages/docs/tangle-network/governance/governance-parameters.mdx b/pages/docs/tangle-network/governance/governance-parameters.mdx
deleted file mode 100644
index 1a3b6db1..00000000
--- a/pages/docs/tangle-network/governance/governance-parameters.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# Governance Parameters
-
-The following durations control windows of action for several governance processes on Tangle Network. These values will likely change as we approach mainnet.
-
-| Parameter | Duration (minutes) | Duration (days) |
-| ----------------------- | -----------------: | --------------: |
-| `LaunchPeriod` | 40320 | 28 |
-| `VotingPeriod` | 40320 | 28 |
-| `FastTrackVotingPeriod` | 4320 | 3 |
-| `EnactmentPeriod` | 43200 | 30 |
-| `CooloffPeriod` | 40320 | 28 |
-
-**Descriptions**
-`LaunchPeriod`: Represents the duration of the launch period in real-world time.
-`VotingPeriod`: Represents the duration of the voting period in real-world time.
-`FastTrackVotingPeriod`: Represents the duration of the fast-track voting period in real-world time.
-`EnactmentPeriod`: Represents the duration of the enactment period in real-world time.
-`CooloffPeriod`: Represents the duration of the cool-off period in real-world time.
-`MinimumDeposit`: This parameter defines the minimum balance (measured in some unspecified unit) that must be deposited in some context. The value is 100 times the base UNIT.
-`MaxProposals`: This parameter limits the maximum number of proposals that can be active at any given time to 100.
diff --git a/pages/docs/tangle-network/governance/governance-procedures.mdx b/pages/docs/tangle-network/governance/governance-procedures.mdx
deleted file mode 100644
index 737fcf60..00000000
--- a/pages/docs/tangle-network/governance/governance-procedures.mdx
+++ /dev/null
@@ -1,45 +0,0 @@
-# Procedures in On-chain Governance
-
-## Launching a Simple-Majority Referenda
-
-A Simple Majority proposal is a proposal that necessitates a majority, or more than 51% of votes, to pass, rather than the 'Super Majority' (2/3 of voters) which is the default requirement. This method ensures a predisposition towards approval, preventing the potential override of the general will of many smaller stakeholders by a single party that might vote 6x against a proposal. This strategy aligns power with the broader community, rather than just a few individuals.
-
-### Process to Execute a Simple Majority Proposal
-
-The process is slightly intricate at the moment, requiring multiple stages and council approval. The objective is to automate this via modifications to the existing treasury pallet. However, until that is achieved, here's the process.
-
-1. **Creating a Treasury Proposal**
-
- Visit the Polkadotjs app, switch to the network and navigate to the treasury tab found in the governance dropdown menu. Here, you'll see the ongoing treasury proposals.
-
- Click 'Submit Proposal' located on the right side of the interface and enter the proposal's details. You'll be required to bond a percentage of the total requested tokens using the 'Submit with account'. Usually, a beneficiary account should have been set up and added to your address book. The standard practice is to use a minimum of 2/3 multi-sig accounts for grants.
-
- Your treasury proposal will enter the proposal queue after correct submission. It will receive a number - this is the `proposalID`, important for the next step.
-
-2. **Creating a 'Preimage' and Accompanying 'Hash'**
-
- Navigate to the Governance dropdown menu, select Democracy, and create a preimage of the proposal, which is the formal key-value description of the on-chain proposal. Choose treasury from the options list, which reveals a few extra options. Choose `approveProposal(proposalID)` and enter the `proposalID` from step 1.
-
- Copy the resultant `preimage hash`, submit, and sign this stage with your account.
-
-3. **Council Proposes Simple Majority as a Motion**
-
- This stage requires a council member, either directly involved as the proposing party, or indirectly as an intermediary to help a community member submit a Simple Majority proposal as a Council motion.
-
- Navigate to the Developer dropdown menu and select the Extrinsics option. The proposing council member must switch to their Council member account in the first box.
-
- From the next dropdown menu, select Council and propose(`threshold`, `proposal, `lengthbound`) from the subsequent options. For `threshold`, choose 8, meaning the Simple Majority motion needs a minimum of 8 out of the 13 Council members to pass and execute Treasury proposal as a simple majority referendum.
-
- In the next box, under proposal: Proposal, select democracy and then next to that externalProposeMajority (proposalHash).
-
- Paste the preimage hash (the proposal hash) received in the last stage into the box below proposalHash: Hash.
-
- For the final box, `lengthBound: Compact` enter 42.
-
- Now hit Submit Transaction.
-
-4. **Council Approves Simple Majority Motion**
-
- At least 8/13 council members need to vote Aye to approve this motion and set the treasury proposal on the path to becoming a simple majority referendum.
-
- Once 8 members have voted Aye, the motion can be closed, either by the original council member or any other council member, including the last person to vote Aye. The motion exists for 13 days. If there are insufficient votes Aye/Nay, it won't execute. If it does pass, the proposal will progress towards becoming a simple majority referendum, appearing in the Governance dropdown menu as an external proposal.
diff --git a/pages/docs/tangle-network/governance/how-to-vote-on-tangle.mdx b/pages/docs/tangle-network/governance/how-to-vote-on-tangle.mdx
deleted file mode 100644
index bfb3e30f..00000000
--- a/pages/docs/tangle-network/governance/how-to-vote-on-tangle.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# Voting in Democracy Referenda
-
-Substrate-based blockchains often have built-in on-chain governance mechanisms, which include voting on referenda. Here's a step-by-step guide on how to vote in democracy referenda on a Substrate blockchain:
-
-Note: This guide assumes you have already set up a Substrate-based wallet and have some tokens in your account.
-
-1. **Access the Polkadot/Substrate User Interface (UI):**
- Visit the [Substrate UI](https://polkadot.js.org/apps/?rpc=wss://testnet-rpc.tangle.tools#/explorer). This web interface is used to interact with the Tangle network and other Substrate chains, during our testnet phase you can use [Webb's alpha interface](https://polkadot.js.org/apps/?rpc=wss://testnet-rpc.tangle.tools#/explorer)
-
-2. **Connect to the correct network:**
- Ensure you're connected to the Tangle Network, if not, at the top-left of the page, you will see a drop-down menu. Here you can select the Tangle network.
-
-3. **Access the Democracy module:**
- On the left sidebar, under the Governance tab, click on Democracy. This is the on-chain voting system where all the current referenda are listed.
-
-4. **Choose a Referendum:**
- You will see a list of active referenda each represented by a number. Click on a specific referendum to see more details.
-
-5. **Review the Referendum Details:**
- Each referendum has a description and specific details. Review these carefully to understand what you are voting for or against.
-
-6. **Cast Your Vote:**
- Once you've decided how to vote, click on the "Vote" button. You'll be asked to choose between 'Aye' (yes) and 'Nay' (no), and you'll have the option to adjust your vote's "conviction," which multiplies your vote's power at the cost of locking your tokens for a longer period.
-
-7. **Sign and Submit the Transaction:**
- After clicking the "Vote" button, you will need to sign the transaction using your account. Enter your password and click on "Sign and Submit". Your vote will be recorded on the blockchain once the transaction is included in a block.
-
-8. **Wait for the Voting Period to End:**
- Each referendum has a voting period. When this period ends, votes are tallied, and the decision is enacted based on the majority vote.
-
-Remember that **voting in a referendum will lock your tokens until the end of the enactment period (if the proposal passes) or until the end of the voting period (if the proposal does not pass).** The length of these periods can vary, refer to [our parameters.](../governance/governance-parameters/)
diff --git a/pages/docs/tangle-network/governance/overview.mdx b/pages/docs/tangle-network/governance/overview.mdx
deleted file mode 100644
index 2b6bc6a0..00000000
--- a/pages/docs/tangle-network/governance/overview.mdx
+++ /dev/null
@@ -1,62 +0,0 @@
-# Overview of On-chain Governance
-
-In many contemporary on-chain governed blockchain networks, the concept of shared power and decision-making is fundamental. These decentralized ecosystems operate under a community governance model, allowing every token holder to have a say in network upgrades and the evolution of its protocol.
-
-The network's governance structure typically encompasses several key roles: the council and the general token holder population. Each of these roles contributes to the decision-making process, bringing balance to the governance model.
-
-**The Council**, elected by token holders, typically consists of a smaller group of participants who are committed to the sustainability and future of the network. Council members are responsible for proposing referenda, vetoing dangerous or malicious proposals, and representing passive token holders.
-
-**Token holders**, as members of the network, are part of its governance system. They vote on referenda using their tokens, propose changes to the network, and elect the council members.
-
-A unique feature of such ecosystems is their adaptive quorum biasing, which adjusts the passing threshold of proposals based on turnout. This ensures a fair representation and participation in the system.
-
-Furthermore, an integral part of these blockchain networks is their ability to upgrade and evolve over time without necessitating hard forks. Changes to the network protocol, like runtime upgrades, can be proposed, voted on, and enacted in a decentralized manner without interrupting network services.
-
-In essence, these on-chain governed blockchain networks exemplify how decision-making power can be shared amongst all participants. The ethos of transparency, collective intelligence, and broad participation in these systems pave the way for future blockchain technologies.
-
-# Tangle Network Governance Guide
-
-The governance system of Tangle Network is divided into two parts, the public referenda and the council. The public referenda allows any TNT token holder to propose and vote, given they provide a bond.
-
-## Public Referenda
-
-Proposals can be made by any token holder. Others can agree with the proposal by seconding it and providing tokens equivalent to the original bond. The most seconded proposal during every launch period is moved to the public referenda table for active voting. Voters can lock their tokens for a longer duration to amplify their vote.
-
-Detailed information on the governance system can be found [here](/docs/tangle-network/governance/overview/).
-
-## Important Parameters for Democracy Module
-
-Here are some important parameters when voting using the Democracy module:
-
-- Launch Period: Frequency of new public referenda launches.
-- Voting Period: Frequency of referenda vote tallying.
-- Emergency Voting Period: The minimum voting period for a fast-tracked emergency referendum.
-- Minimum Deposit: The minimum amount needed as a deposit for a public referendum proposal.
-- Enactment Period: The minimum time period for locking funds and the period between a proposal being approved and enacted.
-- Cool off Period: The time period when a proposal can't be re-submitted after being vetoed.
-
-These parameters may change based on governance. Refer to Network Parameters or the Node Runtime for the most authoritative reference.
-
-## Proposing an Action
-
-To propose an action, you need to bond some tokens. The Polkadot Apps' "Democracy" tab allows you to submit a new proposal by entering the preimage hash of the proposal. After submitting your proposal, it appears in the "proposals" column. At this point, it is only visible and can be seconded. The preimage must be submitted to make the proposal actionable.
-
-## Seconding a Proposal
-
-Seconding a proposal means you agree with the proposal and back it with a deposit equal to the original one. The most seconded proposal is tabled as a referendum to be voted on every launch period.
-
-## Voting on a Proposal
-
-To vote on a referendum, navigate to the "Democracy" tab. Any active referendum will appear in the "referenda" column. Click "Vote" to cast a vote for the referendum. Locking your tokens for a longer duration can weigh your vote more strongly. The lock duration doesn't compound upon consecutive votes.
-
-## Delegate a Vote
-
-If you're unable to keep up with and vote on upcoming referenda, you can delegate your vote to another account. The delegated tokens add to the vote the delegatee has made.
-
-## Undelegate a Vote
-
-You can remove your delegation at any point in the future. Your tokens will be locked for a duration in accordance with the conviction you set at the beginning of the delegation.
-
-## Proxies
-
-Proxies vote on behalf of a stash account. Setting a proxy involves submitting a "setProxy" transaction from the "democracy" pallet. You can also remove a proxy or resign as a proxy using the "removeProxy" and "resignProxy" transactions respectively.
diff --git a/pages/docs/tangle-network/governance/proposal-creation.mdx b/pages/docs/tangle-network/governance/proposal-creation.mdx
deleted file mode 100644
index bb57b5df..00000000
--- a/pages/docs/tangle-network/governance/proposal-creation.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-# Proposing an Action on the Tangle Network
-
-Proposing an referenda on the Tangle Network requires you to bond some TNT tokens. To ensure you have enough tokens to make the minimum deposit, you can check the parameter in the chain state.
-
-On Tangle Apps, you can use the "Democracy" tab to make a new proposal. To submit a proposal, you'll need to submit what's called the preimage hash. The preimage hash is the hash of the proposal to be enacted. You can easily get the preimage hash by clicking on the "Submit preimage" button and configuring the action you're proposing.
-
-Copy this preimage hash and save it for the next step. There's no need to click "Submit Preimage" at this point, though you could. We'll go over that in the next section.
-
-Now you'll click on the "Submit proposal" button, enter the preimage hash into the "preimage hash" field, and enter at least the minimum deposit into the "locked balance" field. Click on the blue "Submit proposal" button, confirm the transaction, and you should see your proposal appear in the "proposals" column on the page.
-
-Your proposal is now visible to anyone who accesses the Tangle Network, and others can second it or submit a preimage. However, it's hard to tell exactly what this proposal does since it shows the hash of the action. Other TNT holders won't be able to make a judgement about whether they second it or not until someone submits the actual preimage for this proposal. In the next step, you will submit the preimage.
-
-# Submitting a Preimage
-
-The act of making a proposal is separate from submitting the preimage for the proposal since the storage cost of submitting a large preimage could be quite high. Allowing the preimage submission to come as a separate transaction means another account could submit the preimage for you if you don't have the funds to do so. It also means that you don't have to pay so many funds right away as you can prove the preimage hash out-of-band.
-
-However, before the proposal passes, you'll need to submit the preimage, or else the proposal can't be enacted. The guide will now show you how to do this.
-
-Click on the blue "Submit preimage" button and configure it to be the same as what you did before to acquire the preimage hash. This time, instead of copying the hash to another tab, follow through and click "Submit preimage" and confirm the transaction.
-
-## Submit Preimage
-
-Once the transaction is included, you should see the UI update with the information for your already submitted proposal.
-
-# Seconding a Proposal
-
-Seconding a proposal means that you're agreeing with the proposal and backing it with an equal amount of deposit as was originally locked. By seconding a proposal, you'll move it higher up the rank of proposals. The most seconded proposal - in value, not the number of supporters - will be tabled as a referendum to be voted on every launch period.
-
-To second a proposal, navigate to the proposal you want to second and click on the "Second" button.
-
-You'll be prompted with the full details of the proposal (if the preimage has been submitted!), and you can then broadcast the transaction by clicking the blue "Second" button.
-
-Once successful, you'll see your second appear in the dropdown in the proposal details.
diff --git a/pages/docs/tangle-network/learn/_meta.json b/pages/docs/tangle-network/learn/_meta.json
deleted file mode 100644
index 08f2b574..00000000
--- a/pages/docs/tangle-network/learn/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "webb-protocol": "Webb Protocol on Tangle",
- "incentives": "Incentives, Staking and Slashing",
- "understanding-dkg-tangle": "Distributed Key Generation (DKG)"
-}
diff --git a/pages/docs/tangle-network/learn/incentives.mdx b/pages/docs/tangle-network/learn/incentives.mdx
deleted file mode 100644
index 68dcd187..00000000
--- a/pages/docs/tangle-network/learn/incentives.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# Tangle Network Incentives
-
-Tangle Network is built on, Substrate, a powerful framework developed by Parity Technologies for creating blockchains. It comes with many built-in functionalities, allowing developers to customize their blockchain with an array of features. Among these features, staking and slashing play crucial roles in ensuring the security and robustness of Substrate-based networks.
-
-## Staking
-
-Staking is a system used by proof-of-stake (PoS) blockchains to secure the network, incentivize behavior, and distribute rewards. In Substrate, a number of nodes, known as validators, are selected to produce blocks and confirm transactions. Users can participate as:
-
-- **Validators**: Validators are responsible for proposing new blocks and validating proposed blocks. They need to be highly secure and continuously online. Validators stake their own tokens and can receive tokens from nominators.
-
-- **Nominators**: Nominators support validators by staking tokens on them. They share in the rewards and risks of validators, without needing to run a node themselves.
-
-Validators and nominators are rewarded with new tokens generated by the network, proportional to the amount of tokens they have staked.
-
-## Slashing
-
-To ensure validators behave properly, Substrate uses a mechanism known as slashing. If a validator misbehaves, a portion (or all) of their staked tokens are confiscated ("slashed") by the network. There are several types of misbehavior that can lead to slashing:
-
-- **Equivocation**: Equivocation occurs when a validator signs two or more conflicting blocks at the same height in the chain. It is considered a severe misbehavior as it could cause a fork in the network.
-
-- **Offline Validators**: Validators are expected to be online and participating in consensus. Validators who are offline for an extended period can be slashed.
-
-- **Malicious Behavior**: Any malicious actions against the network, like trying to manipulate consensus or carry out an attack, can result in slashing.
-
-## Misbehavior Reporting
-
-Substrate also includes mechanisms for reporting misbehavior. Any network participant can submit proof of validator misbehavior to the chain. If the proof is valid, the validator is slashed, and the reporter is usually rewarded with a portion of the slashed amount.
-
-## Malicious Signing
-
-A validator signing arbitrary or malicious messages is another significant misbehavior. This can include signing a message that would lead to equivocation or attempting to manipulate consensus. The penalty for malicious signing is typically severe, often resulting in the slashing of the validator's entire stake.
diff --git a/pages/docs/tangle-network/learn/understanding-dkg-tangle.mdx b/pages/docs/tangle-network/learn/understanding-dkg-tangle.mdx
deleted file mode 100644
index 49933d3c..00000000
--- a/pages/docs/tangle-network/learn/understanding-dkg-tangle.mdx
+++ /dev/null
@@ -1,74 +0,0 @@
-# Understanding DKG in Tangle Network
-
-import DKGRotationImages from '/components/images/DKGRotation'
-import DKGReputationImages from '/components/images/DKGReputation'
-
-## Authority Selection
-
-The authority selection system for DKG authorities uses a simple reputation mechanism for selecting the best set
-of authorities to participate in the key generation and signing protocols. The integer thresholds for keygen authorities
-and signing authorities are set and governed on-chain and directly correspond to the number of DKG clients that will
-participate in either protocols.
-
-For a keygen threshold of **_n_** and a signing threshold of **_t_**, we take the top-**_n_** authorities on chain by
-reputation. Out of these **_n_** keygen authorities, **_t+1_** of them are selected for signing. The keygen set remains
-fixed over the course of the session, whereas the signing set can change amidst misbehaviours in signing protocol participation.
-
-### Reputation Calculation
-
-The `pallet-dkg-metadata` is responsible for calculating and maintaining the reputation scores of all active validators. Reputation serves as a metric to assess and evaluate the authorities, taking into account the number of valid keygen operations performed by the authority and any reported misbehaviors.
-
-The misbehaviour reporting process is based on an honest-threshold assumption. If DKG authorities misbehave off-chain, any observing authority can submit a report against the offending authority. Once a threshold number of reports are submitted, the offending authority will experience a loss of reputation. The reputation map is used by each DKG authority to ensure that every authority can generate a deterministic signing set for threshold signing protocols. Initially, the signing set consists of the top `t` DKG authorities according to their reputation.
-
-The formula for updating reputation based on misbehaviour is as follows:
-
-```
-AUTHORITY_REPUTATION = DECAY_PERCENTAGE * AUTHORITY_REPUTATION
-```
-
-## Jailing authorities
-
-For each instance of misconduct that is verified and reported to an authority, the implicated authority will be temporarily suspended or "jailed" for a specific number of sessions. During this suspension, the authority will not be included in the selection process for the authority selection set. This serves as a punitive measure against malicious behaviour. The authority may be reinstated after a predetermined period of time has passed.
-
-
-
-## Key rotation
-
-The DKG required network participants to rotate their shared private signing keys in an effort to keep the
-network secure. On a new session, the new authorities (from validators) are selected and the
-next authorities are selected.
-
-1. These next authorities run **_keygen_** protocol discussed above and output a new group keypair on-chain,
- denoted `next_dkg_public_key`.
-2. The current authorities (having already run this process in the step before) see this event and if it is time
- to refresh, they begin to sign the `next_dkg_public_key` with their key, the `dkg_public_key`.
-3. The signature from the active keypair of the next keypair is posted on-chain.
-4. Once this signature is posted, anyone can propagate it.
- - Any relayer.
- - Any user who wants to update the governor of their contract.
-
-### Key rotation flow
-
-The on-chain keys are rotated every session. This is done so that the DKG validators and network validators are
-aligned and new validators can leave and join as desired. At the end of the session's target period, the Tangle
-runtime triggers the process to generate a new key. A new distributed key generation protocol executes with the
-next on-chain authorities. These authorities then work together to generate a new key. The active (current) authorities
-then sign the newly generated key with a threshold signature and post it on-chain to complete a successful key rotation.
-
-
-
-## Misbehaviour Reporting & Reputation
-
-Misbehaviour reporting follows an oracle-based approach. The DKG protocol we utilize has identifiable aborts, meaning it is possible to identify the party misbehaving during the protocol’s execution. While every honest party sees this misbehaviour, it is tricky to identify on-chain, as it would require running the protocol itself on-chain and verifying the misbehaviour proof.
-
-Instead of pushing proofs of misbehaviour on-chain, we utilize a threshold voting-based approach. For a threshold **t** and offender **o**, if **t** parties report the same misbehaviour containing the type of misbehaviour, the round of misbehaviour, and the offending authority, then the party’s reputation reduces according to the function:
-
-```
-reputation(o) = α * reputation(o)
-```
-
-When a good action occurs, such as successfully rotating keys or signing a proposal and submitting it on-chain, the reputation increases according to the function:
-
-```
-reputation(o) = α * reputation(o) + 1,000,000,000
-```
diff --git a/pages/docs/tangle-network/learn/webb-protocol.mdx b/pages/docs/tangle-network/learn/webb-protocol.mdx
deleted file mode 100644
index 8b2a1e38..00000000
--- a/pages/docs/tangle-network/learn/webb-protocol.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
----
-title: "Tangle Network and Webb Protocol"
----
-
-# Guide to Tangle Network and Webb Protocol
-
-Tangle Network and Webb Protocol join forces to enhance privacy, interoperability, and scalability for decentralized applications (dApps). This guide provides a detailed overview of Webb Protocol, its core components, and its role in the Tangle Network ecosystem.
-
-## Overview
-
-Webb Protocol aims to resolve privacy and scalability issues that have become ubiquitous in the world of decentralized applications. It achieves this by offering a privacy-focused, interoperable, and scalable framework that interacts with various blockchains.
-
-Tangle Network, a Substrate-based platform fully compatible with Ethereum, hosts and supports the governance of this transformative protocol to offer advanced privacy options and scalability to developers and users.
-
-## Core Components of Webb Protocol
-
-Webb Protocol introduces two pivotal components - the Anchor System and the Distributed Key Generation (DKG) protocol.
-
-### Anchor System
-
-The Anchor System is a smart contract-based, graph-like framework that maintains an interconnected network of blockchains, referred to as anchors. Each anchor features an on-chain Merkle tree and an edge list to manage and update linked metadata, enabling cross-chain applications with potential privacy features.
-
-The system guarantees Liveness and Safety— ensuring the continuous update of all anchors and validating all anchor updates. Oracle and Relayer Networks in a partially-synchronous environment connect the various blockchains and preserve user privacy by delegating proof submission through an overlay network.
-
-### Distributed Key Generation (DKG) protocol
-
-The DKG protocol is the security foundation of Webb Protocol. It validates updates in the Anchor System using a shared key pair. This protocol employs a cryptographic multi-party mechanism to generate a shared public and private key for threshold signatures and threshold encryptions. It encompasses two significant protocols— the threshold-key-generation protocol, and the threshold-signature-scheme or the distributed signing protocol. This secure framework handles signed messages and ensures the absence of malicious proposals.
-
-## Operations within a Proof of Stake (PoS) Blockchain
-
-Webb Protocol operates within a PoS blockchain protocol. It adapts to changing validator set sizes and maintains security with the execution of the Key Rotation Protocol and Misbehaviour Protocol during each session. The system uses standardized messages to facilitate seamless communication within the system.
-
-## Unique Features
-
-Webb Protocol employs a hybrid threshold and light-client validation protocol. It allows for threshold-signed messages to be deemed valid while enforcing message signing only when they can be verified as true against a light client.
diff --git a/pages/docs/tangle-network/node/_meta.json b/pages/docs/tangle-network/node/_meta.json
deleted file mode 100644
index 0732c517..00000000
--- a/pages/docs/tangle-network/node/_meta.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "quickstart": "Quickstart",
- "hardware": "Hardware",
- "node-software": "Tangle Software",
- "systemd": "Deploy with systemd",
- "docker-node": "Deploy with Docker",
- "monitoring": "Node Monitoring",
- "validator": "Validate",
- "flags": "Flags",
- "troubleshooting": "Troubleshooting",
- "quicknode": "Launch a Quick Node"
-}
diff --git a/pages/docs/tangle-network/node/docker-node.mdx b/pages/docs/tangle-network/node/docker-node.mdx
deleted file mode 100644
index 52744cd4..00000000
--- a/pages/docs/tangle-network/node/docker-node.mdx
+++ /dev/null
@@ -1,295 +0,0 @@
----
-title: Deploying with Docker
-description: Deploy a Tangle node with only a few steps using Docker.
----
-
-import Callout from "../../../../components/Callout";
-import { Tabs } from 'nextra/components';
-
-# Deploying a Tangle Network Node with Docker
-
-A Tangle Network node can be spun up quickly using Docker. **This guide covers both Full Node and Validator Node deployment.** . For more information on installing Docker,
-please visit the official Docker [docs](https://docs.docker.com/get-docker/). Make sure that your system meets the requirements which can read [here](https://docs.webb.tools/docs/ecosystem-roles/validator/requirements/).
-
-## Setup the Docker Environment
-
-The quickest and easiest way to get started is to make use of our published Docker Tangle image. In doing so, users simply pull down the image from ghcr.io,
-set their keys, fetch the applicable chainspec and run the start command to get up and running.
-
-### 1. Pull the Tangle Docker image:
-
-```sh filename="pull" copy
-# Only use "main" if you know what you are doing, it will use the latest and maybe unstable version of the node.
-
-docker pull ghcr.io/webb-tools/tangle/tangle-standalone:main
-```
-
-### 2. Create a local directory to store the chain data:
-
-Let us create a directory where we will store all the data for our node. This includes the chain data, and logs.
-
-```sh filename="mkdir" copy
-mkdir /var/lib/tangle/
-```
-
-### 3. Fetch applicable chainspec(s):
-
-To join the Tangle Test network, we need to fetch the appropriate chainspec for the Tangle network.
-Download the latest chainspec for standalone testnet:
-
-```sh filename="get chainspec" copy
-# Fetches chainspec for Tangle network
-wget https://raw.githubusercontent.com/webb-tools/tangle/main/chainspecs/testnet/tangle-standalone.json
-```
-
-Please make a reference where you have stored this `json` file as we will need it in the next steps.
-
-### 4. Select and Start your Node Type
-
-
-
-**4. Start Tangle full node:**
-
-**Note:** Full nodes do not participate in block production or consensus so no required keys are necessary.
-
-To start the node run the following command:
-
-```sh filename="docker run" copy
-docker run --rm -it -v /var/lib/tangle/:/data ghcr.io/webb-tools/tangle/tangle-standalone:main \
- --chain tangle-testnet \
- --name="YOUR-NODE-NAME" \
- --base-path /data \
- --rpc-cors all \
- --port 9946 \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-```
-
-
- For an overview of the above flags, please refer to the [CLI Usage](/docs/ecosystem-roles/validator/api-reference/cli/) page of our documentation.
-
-
-Once Docker pulls the necessary images, your Tangle node will start, displaying lots of information,
-such as the chain specification, node name, role, genesis state, and more.
-
-If you followed the installation instructions for Tangle, once synced, you will be connected to peers and see
-blocks being produced on the Tangle network! Note that in this case you need to also sync to the Polkadot/Kusama
-relay chain, which might take a few days.
-
-
-
-
-### Generate and store keys:
-
-We need to generate the required keys for our node. For more information on these keys, please see the [Required Keys](/docs/ecosystem-roles/validator/required-keys) section.
-The keys we need to generate include the following:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-- ImOnline key (Sr25519)
-
-Let's now insert our required secret keys, we will not pass the SURI in the command, instead it will be interactive, where you
-should paste your SURI when the command asks for it.
-
-**Account Keys**
-
-```sh filename="Acco" copy
-# it will ask for your suri, enter it.
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type acco
-```
-
-**Aura Keys**
-
-```sh filename="Aura" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type aura
-```
-
-**Im-online Keys** - **these keys are optional (required if you are running as a validator)**
-
-```sh filename="Imonline" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type imon
-```
-
-**DKG Keys**
-
-```sh filename="DKG" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ecdsa \
- --key-type wdkg
-```
-
-**Grandpa Keys**
-
-```sh filename="Grandpa" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ed25519 \
- --key-type gran
-```
-
-To ensure you have successfully generated the keys correctly run:
-
-```sh filename="ls" copy
-ls ~/webb/tangle/chains/*/keystore
-# You should see a some file(s) there, these are the keys.
-```
-
-**Caution:** Ensure you insert the keys using the instructions at [generate keys](#generate-and-store-keys),
-if you want the node to auto generate the keys, add the `--auto-insert-keys` flag.
-
-### 5. Start Tangle Validator node:
-
-To start the node run the following command:
-
-```sh filename="docker run" copy
-docker run --platform linux/amd64 --network="host" -v "/var/lib/data" --entrypoint ./tangle-standalone \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
---base-path=/data \
---chain tangle-testnet \
---name="YOUR-NODE-NAME" \
---execution wasm \
---wasm-execution compiled \
---trie-cache-size 0 \
---validator \
---telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-```
-
-
- For an overview of the above flags, please refer to the [CLI Usage](/docs/ecosystem-roles/validator/api-reference/cli/) page of our documentation.
-
-
-Once Docker pulls the necessary images, your Tangle node will start, displaying lots of information,
-such as the chain specification, node name, role, genesis state, and more.
-
-If you followed the installation instructions for Tangle, once synced, you will be connected to peers and see
-blocks being produced on the Tangle network!
-
-```sh filename="logs"
-2023-03-22 14:55:51 Tangle Standalone Node
-2023-03-22 14:55:51 ✌️ version 0.1.15-54624e3-aarch64-macos
-2023-03-22 14:55:51 ❤️ by Webb Technologies Inc., 2017-2023
-2023-03-22 14:55:51 📋 Chain specification: Tangle Testnet
-2023-03-22 14:55:51 🏷 Node name: cooing-morning-2891
-2023-03-22 14:55:51 👤 Role: FULL
-2023-03-22 14:55:51 💾 Database: RocksDb at /Users/local/Library/Application Support/tangle-standalone/chains/local_testnet/db/full
-2023-03-22 14:55:51 ⛓ Native runtime: tangle-standalone-115 (tangle-standalone-1.tx1.au1)
-2023-03-22 14:55:51 Bn254 x5 w3 params
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 new validator set of size 5 has been processed for era 1
-2023-03-22 14:55:52 🔨 Initializing Genesis block/state (state: 0xfd16…aefd, header-hash: 0x7c05…a27d)
-2023-03-22 14:55:52 👴 Loading GRANDPA authority set from genesis on what appears to be first startup.
-2023-03-22 14:55:53 Using default protocol ID "sup" because none is configured in the chain specs
-2023-03-22 14:55:53 🏷 Local node identity is: 12D3KooWDaeXbqokqvEMqpJsKBvjt9BUz41uP9tzRkYuky1Wat7Z
-2023-03-22 14:55:53 💻 Operating system: macos
-2023-03-22 14:55:53 💻 CPU architecture: aarch64
-2023-03-22 14:55:53 📦 Highest known block at #0
-2023-03-22 14:55:53 〽️ Prometheus exporter started at 127.0.0.1:9615
-2023-03-22 14:55:53 Running JSON-RPC HTTP server: addr=127.0.0.1:9933, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 Running JSON-RPC WS server: addr=127.0.0.1:9944, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.0.125/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.0.125/tcp/30305
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.88.12/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.88.12/tcp/30305
-```
-
-### Run via Docker Compose
-
-The docker-compose file will spin up a container running Tangle standalone node, but you have to set the following environment variables. Remember to customize your the values depending on your environment and then copy paste this to CLI.
-
-```sh filename="set variables" copy
-RELEASE_VERSION=main
-CHAINSPEC_PATH=/tmp/chainspec/
-```
-
-After that run:
-
-```sh filename="compose up" copy
-docker compose up -d
-```
-
-
-
-
-### Update the Client
-
-As Tangle development continues, it will sometimes be necessary to upgrade your node software. Node operators will be notified
-on our Discord channel when upgrades are available and whether they are necessary (some client upgrades are optional).
-The upgrade process is straightforward and is the same for a full node.
-
-1. Stop the docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-2. Get the latest version of Tangle from the Tangle GitHub Release [page](https://github.com/webb-tools/tangle/pkgs/container/tangle%2Ftangle-standalone)
-
-3. Pull the latest version of Tangle binary by doing `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:{VERSION_CODE}`.
- Example, if the latest version of Tangle is v0.1.2, then the command would be `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:v0.1.12`
-
-4. Restart the tangle container and you should have the updated version of the client.
-
-### Purge Your Node
-
-If you need a fresh instance of your Tangle node, you can purge your node by removing the associated data directory.
-
-You'll first need to stop the Docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-If you did not use the `-v` flag to specify a local directory for storing your chain data when you spun up your node, then the data folder is related to the Docker container itself. Therefore, removing the Docker container will remove the chain data.
-
-If you did spin up your node with the `-v` flag, you will need to purge the specified directory. For example, for the suggested data directly, you can run the following command to purge your parachain node data:
-
-```sh filename="rm" copy
-# purges standalone data
-sudo rm -rf /data/chains/*
-```
-
-Now that your chain data has been purged, you can start a new node with a fresh data directory!
-
-## Logs
-
-If you'd like to run the node with verbose logs, you may add the following arguments during initial setup. Adjust the target for the desired logging level (debug | error | info| trace | warn):
-
-```bash
--ldkg=debug \
--ldkg_metadata=debug \
--lruntime::offchain=debug \
--ldkg_proposal_handler=debug \
--ldkg_proposals=debug
-```
-
-## Begin Validating
-
-Now that your node is setup, [continue onto our Validator guides to understand token bonding and more.](./validator/requirements.mdx)
-
-## Support and Questions
-
-Visit our [Discord's validator channel](https://webb.tools/community) for community assistance.
diff --git a/pages/docs/tangle-network/node/flags.mdx b/pages/docs/tangle-network/node/flags.mdx
deleted file mode 100644
index 3d73482e..00000000
--- a/pages/docs/tangle-network/node/flags.mdx
+++ /dev/null
@@ -1,100 +0,0 @@
----
-title: Flags
-description: Describes the flags necessary to run and customize a Tangle node.
----
-
-import Callout from '/components/Callout';
-
-# Flags and Subcommands
-
-Setting up a Tangle Network node involves various flags to configure its operation. This guide elucidates the commonly used flags and provides instructions on how to view the complete list.
-
-
-Always refer to the official Substrate documentation for exhaustive details on [flags](https://docs.substrate.io/reference/command-line-tools/node-template/), [subcommands](https://docs.substrate.io/reference/command-line-tools/node-template/#subcommands), and [CLI tools and commands.](https://docs.substrate.io/reference/command-line-tools/) for Substrate-based nodes, including Tangle.
-
-
-### Networking:
-
-- `--port`: Define the TCP port for peer-to-peer protocols.
-- `--rpc-port`: Unified port for both HTTP and WS connections.
-- `--in-peers`: Limit on accepted incoming connections (Default: 25).
-- `--out-peers`: Limit on maintained outgoing connections (Default: 25).
-
-### Execution:
-
-- `--execution`: Choose the execution strategy for all contexts based on the runtime compilation:
-
- - `native`: Use only the native build.
- - `wasm`: Use only the Wasm build.
- - `both`: Use both native and Wasm builds.
- - `nativeelsewasm`: Use native; if it fails, use Wasm.
-
-- `--wasm-execution`: Method for executing Wasm runtime code:
- - `compiled`: Uses the Wasmtime compiled runtime (default).
- - `interpreted-i-know-what-i-do`: Uses the wasmi interpreter.
-
-### State & Database:
-
-- `--state-pruning`: Define the state pruning mode:
-
- - `archive`: Retain the full state of all blocks.
- - ``: Retain state only for a specified number of blocks.
-
-- `--trie-cache-size`: Set the internal state cache size.
-- `--db-cache`: Limit the database cache's memory usage. Recommended: 50% of server RAM.
-
-### File Paths & Chain Spec:
-
-- `--base-path`: Path where chain data resides.
-- `--chain`: Chain specification to use; can be a file path.
-
-### Telemetry & Naming:
-
-- `--name`: Assign a name to the node for telemetry.
-- `--telemetry-url`: URL for the telemetry server. Can specify multiple URLs.
-
-### Ethereum Compatibility (Frontier):
-
-- `--eth-log-block-cache`: Limit for the LRU cache size for block data (Default: 300,000,000).
-- `--eth-statuses-cache`: Limit for the LRU cache size for transaction statuses (Default: 300,000,000).
-
-### Syncing:
-
-- `--sync`: Configure the blockchain syncing mode:
- - `full`: Download and validate the full blockchain history.
- - `fast`: Download blocks without execution and get the latest state with proofs.
- - `fast-unsafe`: As 'fast', but without downloading state proofs.
- - `warp`: Download only the latest state and proof.
-
-## Accessing All Flags
-
-To see a full list of flags:
-
-### Using Docker:
-
-Confirm the path and image with your image name:
-
-```
-docker run --network="host" -v "/var/lib/data:/data"
--u $(id -u ${USER}):$(id -g ${USER})
-ghcr.io/webb-tools/tangle/tangle-standalone:main --help
-```
-
-### Using Systemd:
-
-If you used the binary directly:
-
-`./tangle-standalone-YOUR-VERSION-HERE> --help`
-
-If you compiled the binary:
-
-`./target/release/tangle-standalone-YOUR-VERSION-HERE> --help`
-
-
-Currently, your release may be one the following.
-Refer to [the Releases page on our Github for more information](https://github.com/webb-tools/tangle/releases):
-- tangle-standalone-light-client-linux-amd64
-- tangle-standalone-linux-amd64
-- tangle-standalone-relayer-linux-amd64
-- tangle-standalone-txpool-linux-amd64
-
diff --git a/pages/docs/tangle-network/node/hardware.mdx b/pages/docs/tangle-network/node/hardware.mdx
deleted file mode 100644
index 9e4dd0f7..00000000
--- a/pages/docs/tangle-network/node/hardware.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Hardware Requirements
-description: An overview of Tangle Network hardware requirements.
----
-
-import { Tabs, Tab } from "/components/Tabs";
-import Callout from "/components/Callout";
-
-# Hardware
-
-The current Tangle testnet is a standalone network, meaning that it is not connected to the Polkadot or Kusama relay chain.
-Since the Tangle is not a parachain, the size of nodes are quite a small build as it only contains code to run the standalone Tangle network and not syncing
-the relay chain or communicate between the two. As such, the build is smaller, and does not require the same minumum spec requirements as a parachain node.
-
-The following specifications are the ideal or recommended, but nodes can be run with less. Testnet nodes have also been run using AWS t3.Large instances.
-
-| Component | Requirements |
-| --------- | ------------------------------------------------------------------------------------------------------ |
-| CPU | Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz |
-| Storage | An NVMe solid state drive of 500 GB (As it should be reasonably sized to deal with blockchain growth). |
-| Memory | 32GB ECC |
-| Firewall | P2P port must be open to incoming traffic:
- Source: Any
- Destination: 30333, 30334 TCP |
-
-### Running Ports
-
-As stated before, the standalone nodes will listen on multiple ports. The default Substrate ports are used in the standalone chain.
-
-The only ports that need to be open for incoming traffic are those designated for P2P.
-
-**Default Ports for a Tangle Full-Node:**
-
-| Description | Port |
-| ----------- | ----------- |
-| P2P | 30333 (TCP) |
-| RPC | 9933 |
-| WS | 9944 |
-| Prometheus | 9615 |
diff --git a/pages/docs/tangle-network/node/monitoring/_meta.json b/pages/docs/tangle-network/node/monitoring/_meta.json
deleted file mode 100644
index e8491a85..00000000
--- a/pages/docs/tangle-network/node/monitoring/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "quickstart": "Quickstart",
- "prometheus": "Prometheus",
- "alert-manager": "AlertManager",
- "grafana": "Grafana Dashboard",
- "loki": "Loki Log Manager"
-}
diff --git a/pages/docs/tangle-network/node/monitoring/alert-manager.mdx b/pages/docs/tangle-network/node/monitoring/alert-manager.mdx
deleted file mode 100644
index a6b4664a..00000000
--- a/pages/docs/tangle-network/node/monitoring/alert-manager.mdx
+++ /dev/null
@@ -1,342 +0,0 @@
----
-title: Alert Manager Setup
-description: Create alerts to notify the team when issues arise.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Alert Manager Setup
-
-The following is a guide outlining the steps to setup AlertManager to send alerts when a Tangle node or DKG is being disrupted. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/).
-
-In this guide we will configure the following modules to send alerts from a running Tangle node.
-
-- **Alert Manager** listens to Prometheus metrics and pushes an alert as soon as a threshold is crossed (CPU % usage for example).
-
-## What is Alert Manager?
-
-The Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of deduplicating, grouping,
-and routing them to the correct receiver integration such as email, PagerDuty, or OpsGenie. It also takes care of silencing and
-inhibition of alerts. To learn more about Alertmanager, please
-visit the official docs site [here](https://prometheus.io/docs/alerting/latest/alertmanager/).
-
-### Getting Started
-
-Let's first start by downloading the latest releases of the above mentioned modules (Alertmanager).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine. As well as,
- the user has already configured Prometheus on this machine.
-
-
-**1. Download Alertmanager**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.darwin-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.darwin-arm64.tar.gz
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.linux-arm64.tar.gz &&
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/prometheus/prometheus/releases).
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.windows-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.windows-arm64.tar.gz
- ```
-
-
-
-
-**2. Extract the Downloaded Files:**
-
-Run the following command:
-
-```sh filename="tar" copy
-tar xvf alertmanager-*.tar.gz
-```
-
-**3. Copy the Extracted Files into `/usr/local/bin`:**
-
-
- **Note:** The example below makes use of the `linux-amd64` installations, please update to make use of the target system you have installed.
-
-
-Copy the `alertmanager` binary and `amtool`:
-
-```sh filename="cp" copy
-sudo cp ./alertmanager-*.linux-amd64/alertmanager /usr/local/bin/ &&
-sudo cp ./alertmanager-*.linux-amd64/amtool /usr/local/bin/
-```
-
-**4. Create Dedicated Users:**
-
-Now we want to create dedicated users for the Alertmanager module we have installed:
-
-```sh filename="useradd" copy
-sudo useradd --no-create-home --shell /usr/sbin/nologin alertmanager
-```
-
-**5. Create Directories for `Alertmanager`:**
-
-```sh filename="mkdir" copy
-sudo mkdir /etc/alertmanager &&
-sudo mkdir /var/lib/alertmanager
-```
-
-**6. Change the Ownership for all Directories:**
-
-We need to give our user permissions to access these directories:
-
-**alertManager**:
-
-```sh filename="chown" copy
-sudo chown alertmanager:alertmanager /etc/alertmanager/ -R &&
-sudo chown alertmanager:alertmanager /var/lib/alertmanager/ -R &&
-sudo chown alertmanager:alertmanager /usr/local/bin/alertmanager &&
-sudo chown alertmanager:alertmanager /usr/local/bin/amtool
-```
-
-**7. Finally, let's clean up these directories:**
-
-```sh filename="rm" copy
-rm -rf ./alertmanager*
-```
-
-Great! You have now installed and setup your environment. The next series of steps will be configuring the service.
-
-## Configuration
-
-If you are interested to see how we configure the Tangle Network nodes for monitoring check out https://github.com/webb-tools/tangle/tree/main/monitoring.
-
-### Prometheus
-
-The first thing we need to do is add `rules.yml` file to our Prometheus configuration:
-
-Let’s create the `rules.yml` file that will give the rules for Alert manager:
-
-```sh filename="nano" copy
-sudo touch /etc/prometheus/rules.yml
-sudo nano /etc/prometheus/rules.yml
-```
-
-We are going to create 2 basic rules that will trigger an alert in case the instance is down or the CPU usage crosses 80%.
-You can create all kinds of rules that can triggered, for an exhausted list of rules see our rules list [here](https://github.com/webb-tools/tangle/blob/main/monitoring/prometheus/rules.yml).
-
-Add the following lines and save the file:
-
-```sh filename="group" copy
-groups:
- - name: alert_rules
- rules:
- - alert: InstanceDown
- expr: up == 0
- for: 5m
- labels:
- severity: critical
- annotations:
- summary: "Instance $labels.instance down"
- description: "[{{ $labels.instance }}] of job [{{ $labels.job }}] has been down for more than 1 minute."
-
- - alert: HostHighCpuLoad
- expr: 100 - (avg by(instance)(rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80
- for: 0m
- labels:
- severity: warning
- annotations:
- summary: Host high CPU load (instance bLd Kusama)
- description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
-```
-
-The criteria for triggering an alert are set in the `expr:` part. You can customize these triggers as you see fit.
-
-Then, check the rules file:
-
-```yaml filename="promtool rules" copy
-promtool check rules /etc/prometheus/rules.yml
-```
-
-And finally, check the Prometheus config file:
-
-```yaml filename="promtool check" copy
-promtool check config /etc/prometheus/prometheus.yml
-```
-
-### Gmail setup
-
-We can use a Gmail address to send the alert emails. For that, we will need to generate an app password from our Gmail account.
-
-Note: we recommend you here to use a dedicated email address for your alerts. Review Google's own guide for
-proper set up [here](https://support.google.com/mail/answer/185833?hl=en).
-
-### Slack notifications
-
-We can also utilize Slack notifications to send the alerts through. For that we need to a specific Slack channel to send the notifications to, and
-to install Incoming WebHooks Slack application.
-
-To do so, navigate to:
-
-1. Administration > Manage Apps.
-2. Search for "Incoming Webhooks"
-3. Install into your Slack workspace.
-
-### Alertmanager
-
-The Alert manager config file is used to set the external service that will be called when an alert is triggered. Here, we are going to use the Gmail and Slack notification created previously.
-
-Let’s create the file:
-
-```sh filename="nano" copy
-sudo touch /etc/alertmanager/alertmanager.yml
-sudo nano /etc/alertmanager/alertmanager.yml
-```
-
-And add the Gmail configuration to it and save the file:
-
-```sh filename="Gmail config" copy
-global:
- resolve_timeout: 1m
-
-route:
- receiver: 'gmail-notifications'
-
-receivers:
-- name: 'gmail-notifications'
- email_configs:
- - to: 'EMAIL-ADDRESS'
- from: 'EMAIL-ADDRESS'
- smarthost: 'smtp.gmail.com:587'
- auth_username: 'EMAIL-ADDRESS'
- auth_identity: 'EMAIL-ADDRESS'
- auth_password: 'EMAIL-ADDRESS'
- send_resolved: true
-
-
-# ********************************************************************************************************************************************
-# Alert Manager for Slack Notifications *
-# ********************************************************************************************************************************************
-
- global:
- resolve_timeout: 1m
- slack_api_url: 'INSERT SLACK API URL'
-
- route:
- receiver: 'slack-notifications'
-
- receivers:
- - name: 'slack-notifications'
- slack_configs:
- - channel: 'channel-name'
- send_resolved: true
- icon_url: https://avatars3.githubusercontent.com/u/3380462
- title: |-
- [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
- {{- if gt (len .CommonLabels) (len .GroupLabels) -}}
- {{" "}}(
- {{- with .CommonLabels.Remove .GroupLabels.Names }}
- {{- range $index, $label := .SortedPairs -}}
- {{ if $index }}, {{ end }}
- {{- $label.Name }}="{{ $label.Value -}}"
- {{- end }}
- {{- end -}}
- )
- {{- end }}
- text: >-
- {{ range .Alerts -}}
- *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
- *Description:* {{ .Annotations.description }}
- *Details:*
- {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
- {{ end }}
- {{ end }}
-```
-
-Of course, you have to change the email addresses and the auth_password with the one generated from Google previously.
-
-## Service Setup
-
-### Alert manager
-
-Create and open the Alert manager service file:
-
-```sh filename="create service" copy
-sudo tee /etc/systemd/system/alertmanager.service > /dev/null << EOF
-[Unit]
- Description=AlertManager Server Service
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=alertmanager
- Group=alertmanager
- Type=simple
- ExecStart=/usr/local/bin/alertmanager \
- --config.file /etc/alertmanager/alertmanager.yml \
- --storage.path /var/lib/alertmanager \
- --web.external-url=http://localhost:9093 \
- --cluster.advertise-address='0.0.0.0:9093'
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-## Starting the Services
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="daemon-reload" copy
-sudo systemctl daemon-reload
-```
-
-Next, we will want to start the alertManager service:
-
-**alertManager**:
-
-```sh filename="start service" copy
-sudo systemctl start alertmanager.service
-```
-
-And check that they are working fine:
-
-**alertManager**::
-
-```sh filename="status" copy
-sudo systemctl status alertmanager.service
-```
-
-If everything is working adequately, activate the services!
-
-**alertManager**:
-
-```sh filename="enable" copy
-sudo systemctl enable alertmanager.service
-```
-
-Amazing! We have now successfully added alert monitoring for our Tangle node!
diff --git a/pages/docs/tangle-network/node/monitoring/grafana.mdx b/pages/docs/tangle-network/node/monitoring/grafana.mdx
deleted file mode 100644
index 916cb9ac..00000000
--- a/pages/docs/tangle-network/node/monitoring/grafana.mdx
+++ /dev/null
@@ -1,193 +0,0 @@
----
-title: Grafana Dashboard Setup
-description: Create visual dashboards for the metrics captured by Prometheus.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Grafana Setup
-
-The following is a guide outlining the steps to setup Grafana Dashboard to visualize metric data for a Tangle node. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/).
-
-In this guide we will configure the following modules to visualize metric data from a running Tangle node.
-
-- **Grafana** is the visual dashboard tool that we access from the outside (through SSH tunnel to keep the node secure).
-
-## What are Grafana Dashboards?
-
-A dashboard is a set of one or more panels organized and arranged into one or more rows. Grafana ships with a variety of panels making it easy to
-construct the right queries, and customize the visualization so that you can create the perfect dashboard for your need. Each panel can interact
-with data from any configured Grafana data source. To learn more about Grafana Dashboards, please
-visit the official docs site [here](https://grafana.com/docs/grafana/latest/dashboards/).
-
-### Getting Started
-
-Let's first start by downloading the latest releases of the above mentioned modules (Grafana).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine. As well as,
- the user has already configured Prometheus on this machine.
-
-
-**1. Download Grafana**
-
-
-
-
- ```sh filename="brew" copy
- brew update
- brew install grafana
- ```
-
-
-
-
- ```sh filename="linux" copy
- sudo apt-get install -y apt-transport-https
- sudo apt-get install -y software-properties-common wget
- wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -
- ```
-
- For other linux distrubutions please visit official release page [here](https://grafana.com/grafana/download?edition=oss&platform=linux).
-
-
-
-
-**2. Add Grafana repository to APT sources:**
-
-
- This guide assumes the user is installing and configuring Grafana for a linux machine. For Macos instructions
- please visit the offical docs [here](https://grafana.com/docs/grafana/v9.0/setup-grafana/installation/mac/).
-
-
-```sh filename="add-apt" copy
-sudo add-apt-repository "deb https://packages.grafana.com/oss/deb stable main"
-```
-
-**3. Refresh your APT cache to update your package lists:**
-
-```sh filename="apt update" copy
-sudo apt update
-```
-
-**4. Next, make sure Grafana will be installed from the Grafana repository:**
-
-```sh filename="apt-cache" copy
-apt-cache policy grafana
-```
-
-The output of the previous command tells you the version of Grafana that you are about to install, and where you will retrieve the package from. Verify that the installation candidate at the top of the list will come from the official Grafana repository at `https://packages.grafana.com/oss/deb`.
-
-```sh filename="output"
-Output of apt-cache policy grafana
-grafana:
- Installed: (none)
- Candidate: 6.3.3
- Version table:
- 6.3.3 500
- 500 https://packages.grafana.com/oss/deb stable/main amd64 Packages
-...
-```
-
-**5. You can now proceed with the installation:**
-
-```sh filename="install grafana" copy
-sudo apt install grafana
-```
-
-**6. Install the Alert manager plugin for Grafana:**
-
-```sh filename="grafana-cli" copy
-sudo grafana-cli plugins install camptocamp-prometheus-alertmanager-datasource
-```
-
-## Service Setup
-
-### Grafana
-
-The Grafana’s service is automatically created during extraction of the deb package, you do not need to create it manually.
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="daemon-reload" copy
-sudo systemctl daemon-reload
-```
-
-**Start the Grafana service:**
-
-```sh filename="start service" copy
-sudo systemctl start grafana-server
-```
-
-And check that they are working fine, one by one:
-
-```sh filename="status" copy
-systemctl status grafana-server
-```
-
-If everything is working adequately, activate the services!
-
-```sh filename="enable" copy
-sudo systemctl enable grafana-server
-```
-
-## Run Grafana dashboard
-
-Now we are going to setup the dashboard to visiualize the metrics we are capturing.
-
-From the browser on your local machine, navigate to `http://localhost:3000/login`. You should be greeted with
-a login screen. You can login with the default credentials, `admin/admin`. Be sure to update your password afterwards.
-
-
- This guide assumes the user has configured Prometheus, AlertManager, and Loki as a data source.
-
-
-**Next, we need to add Prometheus as a data source.**
-
-1. Open the Settings menu
-2. Select **Data Sources**
-3. Select **Add Data Source**
-4. Select Prometheus
-5. Input the URL field with http://localhost:9090
-6. Click Save & Test
-
-**Next, we need to add AlertManager as a data source.**
-
-1. Open the Settings menu
-2. Select **Data Sources**
-3. Select **Add Data Source**
-4. Select AlertManager
-5. Input the URL field with http://localhost:9093
-6. Click Save & Test
-
-**Next, we need to add Loki as a data source.**
-
-1. Open the Settings menu
-2. Select **Data Sources**
-3. Select **Add Data Source**
-4. Select Loki
-5. Input the URL field with http://localhost:3100
-6. Click Save & Test
-
-We have our data sources connected, now its time to import the dashboard we want to use. You may
-create your own or import others, but the purposes of this guide we will use the Polkadot Essentials dashboard created
-by bLD nodes!
-
-**To import a dashboard:**
-
-1. Select the + button
-2. Select **Import**
-3. Input the dashboard number, **13840**
-4. Select Prometheus and AlertManager as data sources from the dropdown menu
-5. Click Load
-
-**In the dashboard selection, make sure you select:**
-
-- **Chain Metrics**: substrate
-- **Chain Instance Host**: localhost:9615 to point the chain data scrapper
-- **Chain Process Name**: the name of your node binary
-
-Congratulations!! You have now configured Grafana to visualize the metrics we are capturing. You now
-have monitoring setup for your node!
diff --git a/pages/docs/tangle-network/node/monitoring/loki.mdx b/pages/docs/tangle-network/node/monitoring/loki.mdx
deleted file mode 100644
index 31d92fa6..00000000
--- a/pages/docs/tangle-network/node/monitoring/loki.mdx
+++ /dev/null
@@ -1,334 +0,0 @@
----
-title: Loki Log Management
-description: A service dedidated to aggregate and query system logs.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Loki Log Management
-
-The following is a guide outlining the steps to setup Loki for log management of a Tangle node. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/).
-
-In this guide we will configure the following modules to scrape metrics from the running Tangle node.
-
-- **Loki** provides log aggregation system and metrics.
-- **Promtail** is the agent responsible for gathering logs, and sending them to Loki.
-
-Let's first start by downloading the latest releases of the above mentioned modules (Loki, Promtail download pages).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine.
-
-
-**1. Download Loki**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-darwin-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-darwin-arm64.zip"
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-linux-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-linux-arm64.zip"
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/grafana/loki/releases).
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-windows-amd64.exe.zip"
- ```
-
-
-
-
-**2. Download Promtail**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-darwin-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-darwin-arm64.zip"
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-linux-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-linux-arm64.zip"
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-windows-amd64.exe.zip"
- ```
-
-
-
-
-**3. Extract the Downloaded Files:**
-
-```sh filename="unzip" copy
-unzip "loki-linux-amd64.zip" &&
-unzip "promtail-linux-amd64.zip"
-```
-
-**4. Copy the Extracted Files into `/usr/local/bin`:**
-
-```sh filename="cp" copy
-sudo cp loki-linux-amd64 /usr/local/bin/ &&
-sudo cp promtail-linux-amd64 /usr/local/bin/
-```
-
-**5. Create Dedicated Users:**
-
-Now we want to create dedicated users for each of the modules we have installed:
-
-```sh filename="useradd" copy
-sudo useradd --no-create-home --shell /usr/sbin/nologin loki &&
-sudo useradd --no-create-home --shell /usr/sbin/nologin promtail
-```
-
-**6. Create Directories for `loki`, and `promtail`:**
-
-```sh filename="mkdir" copy
-sudo mkdir /etc/loki &&
-sudo mkdir /etc/promtail
-```
-
-**7. Change the Ownership for all Directories:**
-
-We need to give our user permissions to access these directories:
-
-```sh filename="chown" copy
-sudo chown loki:loki /usr/local/bin/loki-linux-amd64 &&
-sudo chown promtail:promtail /usr/local/bin/promtail-linux-amd64
-```
-
-**9. Finally, let's clean up these directories:**
-
-```sh filename="rm" copy
-rm -rf ./loki-linux-amd64* &&
-rm -rf ./promtail-linux-amd64*
-```
-
-The next series of steps will be configuring each service.
-
-## Configuration
-
-If you are interested to see how we configure the Tangle Network nodes for monitoring check out https://github.com/webb-tools/tangle/tree/main/monitoring.
-
-### Loki
-
-Loki's configuration details what ports to listen to, how to store the logs, and other configuration options.
-There are many other config options for Loki, and you can read more about Loki configuration at: https://grafana.com/docs/loki/latest/configuration/
-
-Let’s create the file:
-
-```sh filename="nano" copy
-sudo touch /etc/loki/config.yml
-sudo nano /etc/loki/config.yml
-```
-
-```yaml filename="config.yaml" copy
-auth_enabled: false
-
-server:
- http_listen_port: 3100
- grpc_listen_port: 9096
-
-ingester:
- lifecycler:
- address: 127.0.0.1
- ring:
- kvstore:
- store: inmemory
- replication_factor: 1
- final_sleep: 0s
- chunk_idle_period: 5m
- chunk_retain_period: 30s
- max_transfer_retries: 0
-
-schema_config:
- configs:
- - from: 2020-10-24
- store: boltdb-shipper
- object_store: filesystem
- schema: v11
- index:
- prefix: index_
- period: 168h
-
-
-storage_config:
- boltdb:
- directory: /data/loki/index
-
- filesystem:
- directory: /data/loki/chunks
-
-limits_config:
- enforce_metric_name: false
- reject_old_samples: true
- reject_old_samples_max_age: 168h
-
-chunk_store_config:
- max_look_back_period: 0s
-
-table_manager:
- retention_deletes_enabled: false
- retention_period: 0
-```
-
-### Promtail
-
-The Promtail configuration details what logs to send to Loki. In the below configuration we are indicating
-to send the logs to Loki from the `/var/log/dkg` directory. This directory can be changed based on what logs you
-want to pick up. There are many other config options for Promtail, and you can read more about Promtail configuration at: https://grafana.com/docs/loki/latest/clients/promtail/configuration/
-
-Let’s create the file:
-
-```sh filename="nano" copy
-sudo touch /etc/promtail/config.yml
-sudo nano /etc/promtail/config.yml
-```
-
-```yaml filename="config.yaml" copy
-server:
- http_listen_port: 9080
- grpc_listen_port: 0
-
-positions:
- filename: /data/loki/positions.yaml
-
-clients:
- - url: http://localhost:3100/loki/api/v1/push
-
-scrape_configs:
-- job_name: system
- static_configs:
- - targets:
- - localhost
- labels:
- job: varlogs
- __path__: /var/log/dkg/*log
-```
-
-## Service Setup
-
-### Loki
-
-Create and open the Loki service file:
-
-```sh filename="loki.service" copy
-sudo tee /etc/systemd/system/loki.service > /dev/null << EOF
-[Unit]
- Description=Loki Service
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=loki
- Group=loki
- Type=simple
- ExecStart=/usr/local/bin/loki-linux-amd64 -config.file /etc/loki/config.yml
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-### Promtail
-
-Create and open the Promtail service file:
-
-```sh filename="promtail.service" copy
-sudo tee /etc/systemd/system/promtail.service > /dev/null << EOF
-[Unit]
- Description=Promtail Service
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=promtail
- Group=promtail
- Type=simple
- ExecStart=/usr/local/bin/promtail-linux-amd64 -config.file /etc/promtail/config.yml
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-Great! You have now configured all the services needed to run Loki.
-
-## Starting the Services
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="daemon-reload" copy
-sudo systemctl daemon-reload
-```
-
-Next, we will want to start each service:
-
-```sh filename="start service" copy
-sudo systemctl start loki.service &&
-sudo systemctl start promtail.service
-```
-
-And check that they are working fine, one by one:
-
-**loki**:
-
-```sh filename="status" copy
-systemctl status loki.service
-```
-
-**promtail**:
-
-```sh filename="status" copy
-systemctl status promtail.service
-```
-
-If everything is working adequately, activate the services!
-
-```sh filename="enable" copy
-sudo systemctl enable loki.service &&
-sudo systemctl enable promtail.service
-```
-
-Amazing! You have now successfully configured Loki for log management. Check out the Grafana
-documentation to create a Loki log dashboard!
diff --git a/pages/docs/tangle-network/node/monitoring/prometheus.mdx b/pages/docs/tangle-network/node/monitoring/prometheus.mdx
deleted file mode 100644
index bbcb3f74..00000000
--- a/pages/docs/tangle-network/node/monitoring/prometheus.mdx
+++ /dev/null
@@ -1,435 +0,0 @@
----
-title: Prometheus Setup
-description: Setup Prometheus for scraping node metrics and more.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Prometheus Setup
-
-The following is a guide outlining the steps to setup Prometheus to monitor a Tangle node. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/). It is important to note that
-this guide's purpose is to help you get started with monitoring your Tangle node, not to advise on how to setup a node securely. Please
-take additional security and privacy measures into consideration.
-
-In this guide we will configure the following modules to scrape metrics from the running Tangle node.
-
-- **Prometheus** is the central module; it pulls metrics from different sources to provide them to the Grafana dashboard and Alert Manager.
-- **Node exporter** provides hardware metrics of the dashboard.
-- **Process exporter** provides processes metrics for the dashboard (optional).
-
-## What is Prometheus?
-
-Prometheus is an open-source systems monitoring and alerting toolkit originally built at SoundCloud. Since its inception in 2012,
-many companies and organizations have adopted Prometheus, and the project has a very active developer and user community.
-It is now a standalone open source project and maintained independently of any company. To learn more about Prometheus, please
-visit the official docs site [here](https://prometheus.io/docs/introduction/overview/).
-
-### Getting Started
-
-Let's first start by downloading the latest releases of the above mentioned modules (Prometheus, Process exporter, and Node exporter).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine.
-
-
-**1. Download Prometheus**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.darwin-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.darwin-arm64.tar.gz
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.linux-arm64.tar.gz
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/prometheus/prometheus/releases).
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.windows-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.windows-arm64.tar.gz
- ```
-
-
-
-
-**2. Download Node Exporter**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.darwin-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.darwin-arm64.tar.gz
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.linux-arm64.tar.gz
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/prometheus/node_exporter/releases).
-
-
-
-
-**3. Download Process Exporter**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/ncabatoff/process-exporter/releases/download/v0.7.10/process-exporter-0.7.10.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/ncabatoff/process-exporter/releases/download/v0.7.10/process-exporter-0.7.10.linux-arm64.tar.gz
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/ncabatoff/process-exporter/releases).
-
-
-
-
-**4. Extract the Downloaded Files:**
-
-Run the following command:
-
-```sh filename="tar" copy
-tar xvf prometheus-*.tar.gz &&
-tar xvf node_exporter-*.tar.gz &&
-tar xvf process-exporter-*.tar.gz
-```
-
-**5. Copy the Extracted Files into `/usr/local/bin`:**
-
-
- **Note:** The example below makes use of the `linux-amd64` installations, please update to make use of the target system you have installed.
-
-
-We are first going to copy the `prometheus` binary:
-
-```sh filename="cp" copy
-sudo cp ./prometheus-*.linux-amd64/prometheus /usr/local/bin/
-```
-
-Next, we are going to copy over the `prometheus` console libraries:
-
-```sh filename="cp" copy
-sudo cp -r ./prometheus-*.linux-amd64/consoles /etc/prometheus &&
-sudo cp -r ./prometheus-*.linux-amd64/console_libraries /etc/prometheus
-```
-
-We are going to do the same with `node-exporter` and `process-exporter`:
-
-```sh filename="cp" copy
-sudo cp ./node_exporter-*.linux-amd64/node_exporter /usr/local/bin/ &&
-sudo cp ./process-exporter-*.linux-amd64/process-exporter /usr/local/bin/
-```
-
-**6. Create Dedicated Users:**
-
-Now we want to create dedicated users for each of the modules we have installed:
-
-```sh filename="useradd" copy
-sudo useradd --no-create-home --shell /usr/sbin/nologin prometheus &&
-sudo useradd --no-create-home --shell /usr/sbin/nologin node_exporter &&
-sudo useradd --no-create-home --shell /usr/sbin/nologin process-exporter
-```
-
-**7. Create Directories for `Prometheus`, and `Process exporter`:**
-
-```sh filename="mkdir" copy
-sudo mkdir /var/lib/prometheus &&
-sudo mkdir /etc/process-exporter
-```
-
-**8. Change the Ownership for all Directories:**
-
-We need to give our user permissions to access these directories:
-
-**prometheus**:
-
-```sh filename="chown" copy
-sudo chown prometheus:prometheus /etc/prometheus/ -R &&
-sudo chown prometheus:prometheus /var/lib/prometheus/ -R &&
-sudo chown prometheus:prometheus /usr/local/bin/prometheus
-```
-
-**node_exporter**:
-
-```sh filename="chwon" copy
-sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
-```
-
-**process-exporter**:
-
-```sh filename="chown" copy
-sudo chown process-exporter:process-exporter /etc/process-exporter -R &&
-sudo chown process-exporter:process-exporter /usr/local/bin/process-exporter
-```
-
-**9. Finally, let's clean up these directories:**
-
-```sh filename="rm" copy
-rm -rf ./prometheus* &&
-rm -rf ./node_exporter* &&
-rm -rf ./process-exporter*
-```
-
-Great! You have now installed and setup your environment. The next series of steps will be configuring each service.
-
-## Configuration
-
-If you are interested to see how we configure the Tangle Network nodes for monitoring check out https://github.com/webb-tools/tangle/tree/main/monitoring.
-
-### Prometheus
-
-Let’s edit the Prometheus config file and add all the modules in it:
-
-```sh filename="nano" copy
-sudo nano /etc/prometheus/prometheus.yml
-```
-
-Add the following code to the file and save:
-
-```yaml filename="promtheus.yml" copy
-global:
- scrape_interval: 15s
- evaluation_interval: 15s
-
-rule_files:
- - 'rules.yml'
-
-alerting:
- alertmanagers:
- - static_configs:
- - targets:
- - localhost:9093
-
-scrape_configs:
- - job_name: "prometheus"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9090"]
- - job_name: "substrate_node"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9615"]
- - job_name: "node_exporter"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9100"]
- - job_name: "process-exporter"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9256"]
-```
-
-- **scrape_interval** defines how often Prometheus scrapes targets, while evaluation_interval controls how often the software will evaluate rules.
-- **rule_files** set the location of Alert manager rules we will add next.
-- **alerting** contains the alert manager target.
-- **scrape_configs** contain the services Prometheus will monitor.
-
-You can notice the first scrap where Prometheus monitors itself.
-
-### Process exporter
-
-Process exporter needs a config file to be told which processes they should take into account:
-
-```sh filename="nano" copy
-sudo touch /etc/process-exporter/config.yml
-sudo nano /etc/process-exporter/config.yml
-```
-
-Add the following code to the file and save:
-
-```sh filename="config.yml" copy
-process_names:
- - name: "{{.Comm}}"
- cmdline:
- - '.+'
-```
-
-## Service Setup
-
-### Prometheus
-
-Create and open the Prometheus service file:
-
-```sh filename="promtheus.service" copy
-sudo tee /etc/systemd/system/prometheus.service > /dev/null << EOF
-[Unit]
- Description=Prometheus Monitoring
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=prometheus
- Group=prometheus
- Type=simple
- ExecStart=/usr/local/bin/prometheus \
- --config.file /etc/prometheus/prometheus.yml \
- --storage.tsdb.path /var/lib/prometheus/ \
- --web.console.templates=/etc/prometheus/consoles \
- --web.console.libraries=/etc/prometheus/console_libraries
- ExecReload=/bin/kill -HUP $MAINPID
-
-[Install]
- WantedBy=multi-user.target
-EOF
-```
-
-### Node exporter
-
-Create and open the Node exporter service file:
-
-```sh filename="node_exporter.service" copy
-sudo tee /etc/systemd/system/node_exporter.service > /dev/null << EOF
-[Unit]
- Description=Node Exporter
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=node_exporter
- Group=node_exporter
- Type=simple
- ExecStart=/usr/local/bin/node_exporter
-
-[Install]
- WantedBy=multi-user.target
-EOF
-```
-
-### Process exporter
-
-Create and open the Process exporter service file:
-
-```sh filename="process-exporter.service" copy
-sudo tee /etc/systemd/system/process-exporter.service > /dev/null << EOF
-[Unit]
- Description=Process Exporter
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=process-exporter
- Group=process-exporter
- Type=simple
- ExecStart=/usr/local/bin/process-exporter \
- --config.path /etc/process-exporter/config.yml
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-## Starting the Services
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="deamon-reload" copy
-sudo systemctl daemon-reload
-```
-
-Next, we will want to start each service:
-
-**prometheus**:
-
-```sh filename="start serive" copy
-sudo systemctl start prometheus.service
-```
-
-**node_exporter**:
-
-```sh filename="start serive" copy
-sudo systemctl start node_exporter.service
-```
-
-**process-exporter**:
-
-```sh filename="start serive" copy
-sudo systemctl start process-exporter.service
-```
-
-And check that they are working fine:
-
-**prometheus**:
-
-```sh filename="status" copy
-systemctl status prometheus.service
-```
-
-**node_exporter**:
-
-```sh filename="status" copy
-systemctl status node_exporter.service
-```
-
-**process-exporter**:
-
-```sh filename="status" copy
-systemctl status process-exporter.service
-```
-
-If everything is working adequately, activate the services!
-
-**prometheus**:
-
-```sh filename="enable" copy
-sudo systemctl enable prometheus.service
-```
-
-**node_exporter**:
-
-```sh filename="enable" copy
-sudo systemctl enable node_exporter.service
-```
-
-**process-exporter**:
-
-```sh filename="enable" copy
-sudo systemctl enable process-exporter.service
-```
-
-Amazing! We have now completely setup our Prometheus monitoring and are scraping metrics from our
-running Tangle node.
-
-You can view those metrics on the Prometheus dashboard by going to `http://localhost:9090/metrics` !
diff --git a/pages/docs/tangle-network/node/monitoring/quickstart.mdx b/pages/docs/tangle-network/node/monitoring/quickstart.mdx
deleted file mode 100644
index a39eae5b..00000000
--- a/pages/docs/tangle-network/node/monitoring/quickstart.mdx
+++ /dev/null
@@ -1,59 +0,0 @@
----
-title: Quickstart
-description: Creating monitoring stack for Tangle node.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Monitoring Tangle Node
-
-The following is a guide outlining the steps to setup monitoring for an Tangle node. If you do not have Tangle node setup yet, please
-review the **How to run an Tangle node** setup guide [here](https://docs.webb.tools/v1/node-operators/run-tangle-node). It is important to note that
-this guide's purpose is to help you get started with monitoring your Tangle node, not to advise on how to setup a node securely. Please
-take additional security and privacy measures into consideration.
-
-Here is how our final configuration will look like at the end of this guide.
-
-- **Prometheus** is the central module; it pulls metrics from different sources to provide them to the Grafana dashboard and Alert Manager.
-- **Grafana** is the visual dashboard tool that we access from the outside (through SSH tunnel to keep the node secure).
-- **Alert Manager** listens to Prometheus metrics and pushes an alert as soon as a threshold is crossed (CPU % usage for example).
-- **Tangle Node** natively provides metrics for monitoring.
-- **Process exporter** provides processes metrics for the dashboard (optional).
-- **Loki** provides log aggregation system and metrics.
-- **Promtail** is the agent responsible for gathering logs, and sending them to Loki.
-
-
- Running the monitoring stack requires that you are already running the tangle network node with at least the following ports exports:
- - Prometheus : `https://localhost:9615`
-
-
-## Docker usage
-
-The quickest way to setup monitoring for your node is to use our provided `docker-compose` file. The docker image starts all the above monitoring
-tools with the exception of `Node exporter`. `node-exporter` is ommitted since some metrics are not available when running inside a docker container.
-
-Follow the instructions [here](/prometheus) to start the prometheus node exporter.
-
-### Prerequisites
-
-Before starting the monitoring stack, ensure the configs are setup correctly,
-
-- (Optional) Set the `__SLACK_WEBHOOK_URL__` in `alertmanager.yml` to receive slack alerts
-- Ensure the promtail mount path matches your log directory
-
-Note : All containers require connection to the localhost, this behaviour is different in Linux/Windows/Mac, the configs within the `docker-compose` and yml
-files assume a linux environment. Refer [this](https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach) to make necessary adjustments for your environment.
-
-### Usage
-
-**To start the monitoring stack, run:**
-
-```sh filename="compose up" copy
-cd monitoring
-docker compose up -d
-```
-
-You can then navigate to `http://localhost:3000` to access the Grafana dashboard!
-
-![Tangle Dashboard](../../../../../components/images/tangle-metrics.png)
diff --git a/pages/docs/tangle-network/node/node-software.mdx b/pages/docs/tangle-network/node/node-software.mdx
deleted file mode 100644
index 885d46b1..00000000
--- a/pages/docs/tangle-network/node/node-software.mdx
+++ /dev/null
@@ -1,180 +0,0 @@
----
-title: Node Software, Binaries and Sources
-description: An overview of Tangle Network software.
----
-
-import { Tabs, Tab } from "/components/Tabs";
-import Callout from "/components/Callout";
-
-# Software and Dependencies
-
-## Binaries
-
-Every release of tangle node includes a Precompiled binary, its currently limited to amd-64 architecture but we plan to
-support more soon. You can view all releases [here](https://github.com/webb-tools/tangle/releases).
-
-In the below commands, substitute `LATEST_RELEASE` with the version you want to use, the current latest version is `0.4.6`
-
-**Get tangle binary**
-
-```sh filename="Get binary" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-linux-amd64
-```
-
-**Get tangle binary with txpool feature**
-
-```sh filename="Get binary txpool" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-txpool-linux-amd64
-```
-
-**Get tangle binary with relayer feature**
-
-```sh filename="Get binary relayer" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-relayer-linux-amd64
-```
-
-**Get tangle binary with light-client feature**
-
-```sh filename="Get binary light" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-light-client-linux-amd64
-```
-
-### Synchronize Chain Data
-
-Once installed, you can begin syncing your node by running the following command. **Your path may vary depend on whether you used the binary, or built from source**:
-
-```sh filename="Syncing node" copy
-./tangle-standalone
-```
-
-Once your node has fully syncronized with Tangle Network you may proceed to setup the
-necessary accounts to operate a validator or other operations.
-
-## Alternative: Build From Source
-
-In order to build a Tangle node from source your machine must have specific dependencies installed. This guide
-outlines those requirements.
-
-#### Rust Dependency
-
-This guide uses [https://rustup.rs](https://rustup.rs) installer and the `rustup` tool to manage the Rust toolchain. Rust is required to
-compile a Tangle node.
-
-First install and configure `rustup`:
-
-```sh filename="Install Rust" copy
-# Install
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-
-# Configure
-source ~/.cargo/env
-```
-
-Configure the Rust toolchain to default to the latest stable version, add nightly and the nightly wasm target:
-
-```sh filename="Configure Rust" copy
-rustup default nightly
-rustup update
-rustup update nightly
-rustup target add wasm32-unknown-unknown --toolchain nightly
-```
-
-#### Substrate Dependencies
-
-
-
-
- Debian version:
- ```sh filename=" Debian" copy
- sudo apt install --assume-yes git clang curl libssl-dev llvm libudev-dev make protobuf-compiler
- ```
- Arch version:
- ```sh filename="Arch" copy
- pacman -Syu --needed --noconfirm curl git clang make protobuf
- ```
- Fedora version:
- ```sh filename="Fedora" copy
- sudo dnf update
- sudo dnf install clang curl git openssl-devel make protobuf-compiler
- ```
- Opensuse version:
- ```sh filename="Opensuse" copy
- sudo zypper install clang curl git openssl-devel llvm-devel libudev-devel make protobuf
- ```
-
- Remember that different distributions might use different package managers and bundle packages in different ways.
- For example, depending on your installation selections, Ubuntu Desktop and Ubuntu Server might have different packages
- and different requirements. However, the packages listed in the command-line examples are applicable for many common Linux
- distributions, including Debian, Linux Mint, MX Linux, and Elementary OS.
-
-
-
-
- Assumes user has Homebrew already installed.
-
- ```sh filename="Brew" copy
- brew update
- brew install openssl gmp protobuf cmake
- ```
-
-
-
-
- For Windows users please refer to the official Substrate documentation:
- [Windows](https://docs.substrate.io/install/windows/)
-
-
-
-
----
-
-Once the development environment is set up, you can build the Tangle node from source.
-
-```sh filename="Clone repo" copy
-git clone https://github.com/webb-tools/tangle.git
-```
-
-```sh filename="Build" copy
-cargo build --release
-```
-
-> NOTE: You _must_ use the release builds! The optimizations here are required
-> as in debug mode, it is expected that nodes are not able to run fast enough to produce blocks.
-
-You will now have the `tangle-standalone` binary built in `target/release/` dir
-
-### Synchronize Chain Data
-
-Once installed, you can begin syncing your node by running the following command. **Your path may vary depend on whether you used the binary, or built from source**:
-
-```sh filename="Syncing node" copy
-./target/release/tangle-standalone
-```
-
-Once your node has fully syncronized with Tangle Network you may proceed to setup the
-necessary accounts to operate a validator or other operations.
-
-### Configuring your Build from Source with Feature Flags
-
-Some features of tangle node are setup behind feature flags, to enable these features you will have to build the binary with these flags enabled
-
-1. **txpool**
- This feature flag is useful to help trace and debug evm transactions on the chain, you should build node with this flag if you intend to use the node for any evm transaction following
-
-```sh filename="Build txpool" copy
-cargo build --release --features txpool
-```
-
-2. **relayer**
- This feature flag is used to start the embedded tx relayer with tangle node, you should build node with this flag if you intend to run a node with a relayer which can be used for transaction relaying or data querying
-
-```sh filename="Build relayer" copy
-cargo build --release --features relayer
-```
-
-3. **light-client**
- This feature flag is used to start the embedded light client with tangle node, you should build node with this flag if you intend to run a node with a light client relayer to sync EVM data on Tangle
-
-```sh filename="Build light" copy
-cargo build --release --features light-client
-```
diff --git a/pages/docs/tangle-network/node/quicknode.mdx b/pages/docs/tangle-network/node/quicknode.mdx
deleted file mode 100644
index d6088d24..00000000
--- a/pages/docs/tangle-network/node/quicknode.mdx
+++ /dev/null
@@ -1,90 +0,0 @@
----
-title: Quickstart
-description: Run a Tangle Validator node using systemd.
----
-
-# Tangle Validator Quickstart
-
-**Caution:** The following guide is only meant as a quickstart for anyone looking to run a tangle node with minimal
-config, this guide uses automated keys and it is not recommended to run a validator using this setup long term, refer to [advanced](/docs/ecosystem-roles/validator/systemd/validator-node/) guide
-for a more secure long term setup.
-
-Before following this guide you should have already set up your machines environment, installed the dependencies, and
-compiled the Tangle binary. If you have not done so, please refer to the [Requirements](/docs/ecosystem-roles/validator/requirements/) page.
-
-## Standalone Testnet
-
-### 1. Fetch the tangle binary
-
-Use the latest release version in the url in place of ``, you can visit [releases](https://github.com/webb-tools/tangle/releases) page to view the latest info
-
-```
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-linux-amd64
-```
-
-For example, at the time of writing this document, the latest release is v0.4.7 and the link would be as follows
-
-```
-wget https://github.com/webb-tools/tangle/releases/download/v0.4.7/tangle-standalone-linux-amd64
-```
-
-### 2. Start the node binary
-
-To start the binary you can run the following command (ensure you are in the same folder where tangle-standalone is downloaded)
-
-Make sure to change the following params before executing the command
-
-1. `` : This is the path where your chain DB will live
-2. `` : This is a unique node name for your node, use a unique name here to help identity your node to other validators and telemetry data
-
-```
-./tangle-standalone-linux-amd64 \
- --base-path \
- --name \
- --chain tangle-testnet \
- --port 9944 \
- --validator \
- --auto-insert-keys \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-```
-
-If the node is running correctly, you should see an output similar to below:
-
-```
-2023-03-22 14:55:51 Tangle Standalone Node
-2023-03-22 14:55:51 ✌️ version 0.1.15-54624e3-aarch64-macos
-2023-03-22 14:55:51 ❤️ by Webb Technologies Inc., 2017-2023
-2023-03-22 14:55:51 📋 Chain specification: Tangle Testnet
-2023-03-22 14:55:51 🏷 Node name: cooing-morning-2891
-2023-03-22 14:55:51 👤 Role: FULL
-2023-03-22 14:55:51 💾 Database: RocksDb at /Users/local/Library/Application Support/tangle-standalone/chains/local_testnet/db/full
-2023-03-22 14:55:51 ⛓ Native runtime: tangle-standalone-115 (tangle-standalone-1.tx1.au1)
-2023-03-22 14:55:51 Bn254 x5 w3 params
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 new validator set of size 5 has been processed for era 1
-2023-03-22 14:55:52 🔨 Initializing Genesis block/state (state: 0xfd16…aefd, header-hash: 0x7c05…a27d)
-2023-03-22 14:55:52 👴 Loading GRANDPA authority set from genesis on what appears to be first startup.
-2023-03-22 14:55:53 Using default protocol ID "sup" because none is configured in the chain specs
-2023-03-22 14:55:53 🏷 Local node identity is: 12D3KooWDaeXbqokqvEMqpJsKBvjt9BUz41uP9tzRkYuky1Wat7Z
-2023-03-22 14:55:53 💻 Operating system: macos
-2023-03-22 14:55:53 💻 CPU architecture: aarch64
-2023-03-22 14:55:53 📦 Highest known block at #0
-2023-03-22 14:55:53 〽️ Prometheus exporter started at 127.0.0.1:9615
-2023-03-22 14:55:53 Running JSON-RPC HTTP server: addr=127.0.0.1:9933, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 Running JSON-RPC WS server: addr=127.0.0.1:9944, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.0.125/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.0.125/tcp/30305
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.88.12/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.88.12/tcp/30305
-```
-
-**Note** : Since the `--auto-insert-keys` flag was used the logs will print out the keys automatically generated for you,
-make sure to note down and keep this safely, in case you need to migrate or restart your node, these keys are essential.
-
-Congratulations! You have officially setup an Tangle Network node. The quickstart is only meant as a quickstart for anyone looking to run a tangle node with minimal
-config, this guide uses automated keys and it is not recommended to run a validator using this setup long term, refer to [advanced](/docs/ecosystem-roles/validator/systemd/validator-node/) guide
-for a more secure long term setup.. If you are interested
-in learning how to setup monitoring for your node, please refer to the [monitoring](../monitoring/quickstart.mdx) page.
diff --git a/pages/docs/tangle-network/node/quickstart.mdx b/pages/docs/tangle-network/node/quickstart.mdx
deleted file mode 100644
index 1d290a43..00000000
--- a/pages/docs/tangle-network/node/quickstart.mdx
+++ /dev/null
@@ -1,45 +0,0 @@
----
-title: Node Operator Quickstart
-description: Participate in the Webb ecosystem by deploying a Tangle node, to validate, serve data or more.
----
-
-import { QuickDeployArea, DeployArea, SupportArea, MonitoringArea } from "../../../../components/TangleQuickstart"
-import { RepoArea } from "../../../../components/RepoArea";
-import FullWebbCTA from "../../../../components/FullWebbCTA";
-
-# Node Operator Quickstart
-
-Becoming a node operator on the Tangle Network requires some technical skills, trust, and support from the community. Below
-is a collection of quick links for quick setups!
-
-## Validators
-
-Please see our new **[Start Validating guide.](/docs/tangle-network/node/validator/requirements)**
-
-## Quick Setup for Node Operators
-
-
-
-## Advanced Setup
-
-
-
-## Monitoring
-
-Monitoring and troubleshooting your Tangle node is essential, and we provide setup instructions to make it incredibly easy to get started!
-
-
-
-## Support Channels
-
-Run into weird issues? Or have questions about the Tangle Network? Join the Webb community and become connected to the entire Webb ecosystem.
-
-
-
-## Repositories
-
-Interested in what we are building at Webb? Clone the below repositories, and start contributing to a private cross-chain future!
-
-
-
-
diff --git a/pages/docs/tangle-network/node/systemd.mdx b/pages/docs/tangle-network/node/systemd.mdx
deleted file mode 100644
index 39ee712c..00000000
--- a/pages/docs/tangle-network/node/systemd.mdx
+++ /dev/null
@@ -1,375 +0,0 @@
----
-title: Systemd Node Operation
-description: Run a Tangle full node or Validator node using systemd.
----
-
-import { Tabs } from 'nextra/components'
-
-# Running with Systemd
-
-You can run your **full** or **validator** node as a systemd process so that it will automatically restart on server reboots or crashes, helping to avoid getting slashed. This guide now includes additional steps for setting up dependencies and Rust configuration, ensuring a smoother setup process.
-
-Before following this guide, ensure that your machine's environment is set up and the Tangle binary is compiled. If you haven't done so, please refer to the [Requirements](https://docs.webb.tools/docs/ecosystem-roles/validator/requirements/) page.
-
-## Setup
-
-### 1. Fetch the Tangle Network Binary
-
-Use the latest release version in the url in place of ``, you can visit [releases](https://github.com/webb-tools/tangle/releases) page to view the latest info
-
-```
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-linux-amd64
-```
-
-For example, at the time of writing this document, the latest release is v0.4.7 and the link would be as follows
-
-```
-wget https://github.com/webb-tools/tangle/releases/download/v0.4.7/tangle-standalone-linux-amd64
-```
-
-### 2. Install Dependencies
-
-Ensure all necessary dependencies are installed:
-
-```sh
-sudo apt update && sudo apt upgrade -y
-sudo apt install curl iptables build-essential git wget jq make gcc nano tmux htop nvme-cli pkg-config libssl-dev libleveldb-dev libgmp3-dev tar clang bsdmainutils ncdu unzip llvm libudev-dev make protobuf-compiler -y
-```
-
-### 3. Install and Configure Rust
-
-```sh
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-# choose option 1
-source $HOME/.cargo/env
-rustup default nightly
-rustup update
-rustup update nightly
-rustup target add wasm32-unknown-unknown --toolchain nightly
-```
-
-### 4. Select and Run Your Node Type
-
-
-
-
- #### Create the Service Configuration File
- Run the following commands to create the service configuration file:
-
- ```sh filename="mv" copy
- # Move the tangle-standalone binary to the bin directory (assumes you are in repo root directory)
- sudo mv ./target/release/tangle-standalone /usr/bin/
- ```
-
- Add the following contents to the service configuration file. Make sure to replace the **USERNAME** with the username you created in the previous step, add your own node name, and update
- any paths or ports to your own preference.
-
- **Note:** The below configuration assumes you are targeting the Tangle Network chainspec.
-
- **Full Node Service Configuration File**
-
- ```sh filename="full.service" copy
- sudo tee /etc/systemd/system/full.service > /dev/null << EOF
- [Unit]
- Description=Tangle Full Node
- After=network-online.target
- StartLimitIntervalSec=0
-
- [Service]
- User=
- Restart=always
- RestartSec=3
- ExecStart=/usr/bin/tangle-standalone \
- --base-path /data/full-node \
- --name \
- --chain tangle-testnet \
- --node-key-file "/home//node-key" \
- --rpc-cors all \
- --port 9946 \
- --no-mdns \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-
- [Install]
- WantedBy=multi-user.target
- EOF
- ```
- #### Enable the services
-
- After ensuring the config is correctly written to /etc/systemd/system/full.service, enable and start the service:
-
- ```sh filename="enable service" copy
- sudo systemctl daemon-reload
- sudo systemctl enable full
- sudo systemctl start full
- ```
- **Check the Status of the Service**
- ```sh filename="status" copy
- sudo systemctl status full
- ```
- You should see the node connecting to the network and syncing with the latest blocks.
-
- **Tail the Latest Outputs**
- ```sh filename="logs" copy
- sudo journalctl -u full.service -f
- ```
-
- #### Network sync
-
- After a full node is started, it will start syncing with the current chain state. Depending on the size of the chain when you do this, this step may take anywhere from a few minutes to a few hours.
-
- Example of node sync :
-
- ```sh filename="output after synced" copy
- 2021-06-17 03:07:39 🔍 Discovered new external address for our node: /ip4/10.26.16.1/tcp/30333/ws/p2p/12D3KooWLtXFWf1oGrnxMGmPKPW54xWCHAXHbFh4Eap6KXmxoi9u
- 2021-06-17 03:07:40 ⚙️ Syncing 218.8 bps, target=#5553764 (17 peers), best: #24034 (0x08af…dcf5), finalized #23552 (0xd4f0…2642), ⬇ 173.5kiB/s ⬆ 12.7kiB/s
- 2021-06-17 03:07:45 ⚙️ Syncing 214.8 bps, target=#5553765 (20 peers), best: #25108 (0xb272…e800), finalized #25088 (0x94e6…8a9f), ⬇ 134.3kiB/s ⬆ 7.4kiB/s
- 2021-06-17 03:07:50 ⚙️ Syncing 214.8 bps, target=#5553766 (21 peers), best: #26182 (0xe7a5…01a2), finalized #26112 (0xcc29…b1a9), ⬇ 5.0kiB/s ⬆ 1.1kiB/s
- 2021-06-17 03:07:55 ⚙️ Syncing 138.4 bps, target=#5553767 (21 peers), best: #26874 (0xcf4b…6553), finalized #26624 (0x9dd9…27f8), ⬇ 18.9kiB/s ⬆ 2.0kiB/s
- 2021-06-17 03:08:00 ⚙️ Syncing 37.0 bps, target=#5553768 (22 peers), best: #27059 (0x5b73…6fc9), finalized #26624 (0x9dd9…27f8), ⬇ 14.3kiB/s ⬆ 4.4kiB/s
- ```
-
-
-
-
- #### Generate and Store Keys
-
- We need to generate the required keys for our node. For more information on these keys, please see the [Required Keys](https://wiki.polkadot.network/docs/learn-cryptography) section.
- The keys we need to generate include the following:
-
- - DKG key (Ecdsa)
- - Aura key (Sr25519)
- - Account key (Sr25519)
- - Grandpa key (Ed25519)
- - ImOnline key (Sr25519)
-
- Let's now insert our required secret keys, we will not pass the SURI in the command, instead it will be interactive, where you
- should paste your SURI when the command asks for it.
-
- **Account Keys**
-
- ```sh filename="Acco" copy
- # it will ask for your suri, enter it.
- ./target/release/tangle-standalone key insert --base-path /data/validator/ \
- --chain ./chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --suri <"12-MNEMONIC-PHARSE"> \
- --key-type acco
- ```
-
- **Aura Keys**
-
- ```sh filename="Aura" copy
- # it will ask for your suri, enter it.
- ./target/release/tangle-standalone key insert --base-path /data/validator/ \
- --chain ./chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --suri <"12-MNEMONIC-PHARSE"> \
- --key-type aura
- ```
-
- **Im-online Keys** - **these keys are optional**
-
- ```sh filename="Imonline" copy
- # it will ask for your suri, enter it.
- ./target/release/tangle-standalone key insert --base-path /data/validator/ \
- --chain ./chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --suri <"12-MNEMONIC-PHARSE"> \
- --key-type imon
- ```
-
- **DKG Keys**
-
- ```sh filename="DKG" copy
- # it will ask for your suri, enter it.
- ./target/release/tangle-standalone key insert --base-path /data/validator/ \
- --chain ./chainspecs/tangle-standalone.json \
- --scheme Ecdsa \
- --suri <"12-MNEMONIC-PHARSE"> \
- --key-type wdkg
- ```
-
- **Grandpa Keys**
-
- ```sh filename="Grandpa" copy
- # it will ask for your suri, enter it.
- ./target/release/tangle-standalone key insert --base-path /data/validator/ \
- --chain ./chainspecs/tangle-standalone.json \
- --scheme Ed25519 \
- --suri <"12-MNEMONIC-PHARSE"> \
- --key-type gran
- ```
-
- To ensure you have successfully generated the keys correctly run:
-
- ```sh filename="ls" copy
- ls ~/data/validator//keystore
- # You should see a some file(s) there, these are the keys.
- ```
-
- ## System service setup
-
- Run the following commands to create the service configuration file:
-
- ```sh filename="mv" copy
- # Move the tangle-standalone binary to the bin directory (assumes you are in repo root directory)
- sudo mv ./target/release/tangle-standalone /usr/bin/
- ```
-
- Add the following contents to the service configuration file. Make sure to replace the **USERNAME** with the username you created in the previous step, add your own node name, and update any paths or ports to your own preference.
-
- **Note:** The below configuration assumes you are targeting the Tangle Network chainspec.
-
- **Caution:** Ensure you insert the keys using the instructions at [generate keys](#generate-and-store-keys),
- if you want the node to auto generate the keys, add the `--auto-insert-keys` flag.
-
- **Validator Node**
-
- ```sh filename="validator.service" copy
- sudo tee /etc/systemd/system/validator.service > /dev/null << EOF
- [Unit]
- Description=Tangle Validator Node
- After=network-online.target
- StartLimitIntervalSec=0
-
- [Service]
- User=
- Restart=always
- RestartSec=3
- ExecStart=/usr/bin/tangle-standalone \
- --base-path /data/validator/ \
- --name \
- --chain tangle-testnet \
- --node-key-file "/home//node-key" \
- --port 30333 \
- --validator \
- --no-mdns \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-
- [Install]
- WantedBy=multi-user.target
- EOF
- ```
- #### Enable Validator Node
-
- Double check that the config has been written to `/etc/systemd/system/validator.service` correctly.
- If so, enable the service so it runs on startup, and then try to start it now:
-
- ```sh filename="enable service" copy
- sudo systemctl daemon-reload
- sudo systemctl enable validator
- sudo systemctl start validator
- ```
-
- Check the status of the service:
-
- ```sh filename="status" copy
- sudo systemctl status validator
- ```
-
- You should see the node connecting to the network and syncing the latest blocks.
- If you need to tail the latest output, you can use:
-
- ```sh filename="logs" copy
- sudo journalctl -u validator.service -f
- ```
-
- If the node is running correctly, you should see an output similar to below:
-
- ```sh filename="output"
- 2023-03-22 14:55:51 Tangle Standalone Node
- 2023-03-22 14:55:51 ✌️ version 0.1.15-54624e3-aarch64-macos
- 2023-03-22 14:55:51 ❤️ by Webb Technologies Inc., 2017-2023
- 2023-03-22 14:55:51 📋 Chain specification: Tangle Testnet
- 2023-03-22 14:55:51 🏷 Node name: cooing-morning-2891
- 2023-03-22 14:55:51 👤 Role: FULL
- 2023-03-22 14:55:51 💾 Database: RocksDb at /Users/local/Library/Application Support/tangle-standalone/chains/local_testnet/db/full
- 2023-03-22 14:55:51 ⛓ Native runtime: tangle-standalone-115 (tangle-standalone-1.tx1.au1)
- 2023-03-22 14:55:51 Bn254 x5 w3 params
- 2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
- 2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
- 2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
- 2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
- 2023-03-22 14:55:51 [0] 💸 new validator set of size 5 has been processed for era 1
- 2023-03-22 14:55:52 🔨 Initializing Genesis block/state (state: 0xfd16…aefd, header-hash: 0x7c05…a27d)
- 2023-03-22 14:55:52 👴 Loading GRANDPA authority set from genesis on what appears to be first startup.
- 2023-03-22 14:55:53 Using default protocol ID "sup" because none is configured in the chain specs
- 2023-03-22 14:55:53 🏷 Local node identity is: 12D3KooWDaeXbqokqvEMqpJsKBvjt9BUz41uP9tzRkYuky1Wat7Z
- 2023-03-22 14:55:53 💻 Operating system: macos
- 2023-03-22 14:55:53 💻 CPU architecture: aarch64
- 2023-03-22 14:55:53 📦 Highest known block at #0
- 2023-03-22 14:55:53 〽️ Prometheus exporter started at 127.0.0.1:9615
- 2023-03-22 14:55:53 Running JSON-RPC HTTP server: addr=127.0.0.1:9933, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
- 2023-03-22 14:55:53 Running JSON-RPC WS server: addr=127.0.0.1:9944, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
- 2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.0.125/tcp/30304
- 2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.0.125/tcp/30305
- 2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.88.12/tcp/30304
- 2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.88.12/tcp/30305
- ```
-
- #### Network sync
-
- After a validator node is started, it will start syncing with the current chain state. Depending on the size of the chain when you do this, this step may take anywhere from a few minutes to a few hours.
-
- Example of node sync :
-
- ```sh filename="output after synced" copy
- 2021-06-17 03:07:39 🔍 Discovered new external address for our node: /ip4/10.26.16.1/tcp/30333/ws/p2p/12D3KooWLtXFWf1oGrnxMGmPKPW54xWCHAXHbFh4Eap6KXmxoi9u
- 2021-06-17 03:07:40 ⚙️ Syncing 218.8 bps, target=#5553764 (17 peers), best: #24034 (0x08af…dcf5), finalized #23552 (0xd4f0…2642), ⬇ 173.5kiB/s ⬆ 12.7kiB/s
- 2021-06-17 03:07:45 ⚙️ Syncing 214.8 bps, target=#5553765 (20 peers), best: #25108 (0xb272…e800), finalized #25088 (0x94e6…8a9f), ⬇ 134.3kiB/s ⬆ 7.4kiB/s
- 2021-06-17 03:07:50 ⚙️ Syncing 214.8 bps, target=#5553766 (21 peers), best: #26182 (0xe7a5…01a2), finalized #26112 (0xcc29…b1a9), ⬇ 5.0kiB/s ⬆ 1.1kiB/s
- 2021-06-17 03:07:55 ⚙️ Syncing 138.4 bps, target=#5553767 (21 peers), best: #26874 (0xcf4b…6553), finalized #26624 (0x9dd9…27f8), ⬇ 18.9kiB/s ⬆ 2.0kiB/s
- 2021-06-17 03:08:00 ⚙️ Syncing 37.0 bps, target=#5553768 (22 peers), best: #27059 (0x5b73…6fc9), finalized #26624 (0x9dd9…27f8), ⬇ 14.3kiB/s ⬆ 4.4kiB/s
- ```
-
- #### Bond TNT and setup validator Account
-
- After your node is synced, you are ready to setup keys and onboard as a validator, make sure to complete the steps
- at [Start Validating](/docs/tangle-network/node/validator/requirements) to start validating.
-
-
-
- The following is the service configuration file, use this while completing the Full Node guide.
- **Note:** To run with evm trace, you should use a binary built with `txpool` flag, refer [requirements](/docs/ecosystem-roles/validator/requirements/) page for more details.
-
- ```sh filename="full.service" copy
- sudo tee /etc/systemd/system/full.service > /dev/null << EOF
- [Unit]
- Description=Tangle Full Node
- After=network-online.target
- StartLimitIntervalSec=0
-
- [Service]
- User=
- Restart=always
- RestartSec=3
- ExecStart=/usr/bin/tangle-standalone \
- --base-path /data/full-node \
- --name \
- --chain tangle-testnet \
- --node-key-file "/home//node-key" \
- --rpc-cors all \
- --port 9946 \
- --no-mdns --ethapi trace,debug,txpool
-
- [Install]
- WantedBy=multi-user.target
- EOF
- ```
-
-
-
-
-Congratulations! You have officially setup a Tangle Network node using Systemd.
-
-## Monitoring
-
-To setup monitoring for your node, please refer to the [monitoring](./monitoring/quickstart.mdx) page.
-
-## Begin Validating
-
-Now that your node is setup, [continue onto our Validator guides to understand token bonding and more.](./validator/requirements.mdx)
-
-## Support and Questions
-
-Visit our [Discord's validator channel](https://webb.tools/community) for community assistance.
diff --git a/pages/docs/tangle-network/node/troubleshooting.mdx b/pages/docs/tangle-network/node/troubleshooting.mdx
deleted file mode 100644
index d46c751c..00000000
--- a/pages/docs/tangle-network/node/troubleshooting.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
----
-title: Troubleshooting
-description: Provides a series of suggestive fixes that are common issues when starting a Tangle node.
----
-
-import Callout from '/components/Callout';
-
-## Troubleshooting
-
-
-**Logs**
-
-If you'd like to run the node with verbose logs, you may add the following arguments during initial setup. Adjust the target for the desired logging level (debug | error | info| trace | warn):
-
-```bash
--ldkg=debug \
--ldkg_metadata=debug \
--lruntime::offchain=debug \
--ldkg_proposal_handler=debug \
--ldkg_proposals=debug
-```
-
-
-
-### P2P Ports Not Open
-
-If you don't see an "Imported" message (without the [Relaychain] tag), check the P2P port configuration. Ensure the P2P port is open to incoming traffic.
-
-### In Sync
-
-Both chains must be in sync at all times. Look for "Imported" or "Idle" messages and ensure you have connected peers.
-
-### Genesis Mismatching
-
-If you notice log messages like:
-
-```bash
-DATE [Relaychain] Bootnode with peer id ID is on a different chain (our genesis: 0x3f5... theirs: 0x45j...)
-```
-
-You may be running an older version and need to upgrade.
-
-### Troubleshooting for Apple Silicon users
-
-#### Homebrew and PATH Configuration
-
-If you haven't installed Homebrew: https://brew.sh/
-
-```bash
-/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
-```
-
-Make sure Homebrew is up-to-date, install openssl
-
-```bash
-brew update
-brew install openssl
-```
-
-After installation, ensure you've added /opt/homebrew/bin to your PATH:
-
-```bash
-echo 'export PATH=/opt/homebrew/bin:$PATH' >> ~/.bash_profile
-```
-
-#### Building dkg-substrate
-
-To build dkg-substrate in --release mode using the aarch64-apple-darwin Rust toolchain, set the required environment variables:
-
-```bash
-echo 'export RUSTFLAGS="-L /opt/homebrew/lib"' >> ~/.bash_profile
-```
-
-#### Dependencies
-
-1. GMP: Ensure the gmp dependency is correctly installed.
-
-```bash
-brew install gmp
-```
-
-2. If you're still facing an issue with gmp, adjust your path to the gmp lib:
-
-```bash
-cargo clean
-export LIBRARY_PATH=$LIBRARY_PATH:$(brew --prefix)/lib:$(brew --prefix)/opt/gmp/lib
-```
-
-Add the above export to your bash_profile as well.
-
-3. Ensure the Protobuf dependency is correctly installed:
-
-```bash
-brew install protobuf
-```
diff --git a/pages/docs/tangle-network/node/validator/_meta.json b/pages/docs/tangle-network/node/validator/_meta.json
deleted file mode 100644
index 149c229c..00000000
--- a/pages/docs/tangle-network/node/validator/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "requirements": "Start Validating",
- "proxyaccount": "Proxy Accounts",
- "validator-rewards": "Rewards"
-}
diff --git a/pages/docs/tangle-network/node/validator/proxyaccount.mdx b/pages/docs/tangle-network/node/validator/proxyaccount.mdx
deleted file mode 100644
index b2ede238..00000000
--- a/pages/docs/tangle-network/node/validator/proxyaccount.mdx
+++ /dev/null
@@ -1,64 +0,0 @@
-# Setting Up a Proxy Account
-
-A proxy account allows you to delegate some functionalities to another account, which can act on behalf of the primary account. Polkadot.js Apps also provides an option to create a time-delayed proxy, enhancing security by giving the primary account time to review and potentially cancel transactions before they are executed.
-
-## Creating a Proxy Account
-
-### Using the Extrinsics Page
-
-1. **Navigate to the Extrinsics Page**:
-
- - Click on the `Developer` tab.
- - Select `Extrinsics` from the dropdown.
-
-2. **Input Proxy Details**:
-
- - Select your primary account.
- - From the dropdown, choose `proxy` > `addProxy`.
- - Specify the delegate account for the proxy.
- - Choose `Balances` from the `proxyType` dropdown.
- - Optionally, set a time delay (in block numbers) to add a waiting period before the proxy can act.
-
-3. **Finalize the Proxy**:
- - Click `Submit Transaction`.
- - Authorize and sign the transaction to establish the proxy relationship.
- - A confirmation will appear once the transaction is successful.
-
-### Using the Accounts Page
-
-1. **Navigate to Your Accounts**:
-
- - Go to the `Accounts` page.
- - Find your primary account and click on the three vertical dots next to it.
- - Select `Add proxy` (If the account already has a proxy, you'll see `Manage proxies`).
-
-2. **Specify Proxy Details**:
- - A pop-up will appear.
- - Choose the account you wish to set as a proxy.
- - Define the type of proxy.
- - Click `Add Proxy`, then `Submit`, and sign the transaction.
-
-## Verifying Your Proxy Account
-
-Once you've set up a proxy account, it's essential to verify that it's configured correctly.
-
-### Using the Chain State Page
-
-1. **Navigate to Chain State**:
-
- - From the dropdown, choose `proxy` > `proxies`.
- - Select your primary/proxied account.
- - Click on the `+` button to send the query.
-
-2. **Review Proxy Details**:
- - Results will display information about your proxies, including the proxy account address, type, delay period (if set), and the total bond amount.
-
-### Using the Accounts Page
-
-1. **Go to Your Accounts**:
-
- - On the `Accounts` page, find the Proxy symbol next to your primary account.
- - Hover over the icon and click `Manage proxies` to see your proxies.
-
-2. **Inspect Proxy Overview**:
- - A pop-up will show an overview of all your proxy accounts.
diff --git a/pages/docs/tangle-network/node/validator/requirements.mdx b/pages/docs/tangle-network/node/validator/requirements.mdx
deleted file mode 100644
index e3a9a9e1..00000000
--- a/pages/docs/tangle-network/node/validator/requirements.mdx
+++ /dev/null
@@ -1,211 +0,0 @@
----
-title: Start Validating on Tangle
-description: An overview of Tangle Network's validator registration process.
----
-
-import { Tabs, Tab } from "/components/Tabs";
-import Callout from "/components/Callout";
-
-# Introduction
-
-Becoming a validator on a decentralized network like Tangle is a big responsibility. You are accountable for both your stake and the stake of your nominators. Any errors could lead to slashing of tokens, impacting your balance and reputation. However, there are also rewards - you help secure a decentralized network and can grow your stake through nominations.
-
-To become a validator, you need substantial system administration skills to set up infrastructure and resolve anomalies independently. Follow security best practices, as this is crucial for success. The validator role involves more than just running a node.
-
-You don't have to go it alone. Connect with experienced teams and fellow validators in communities like the Tangle [Discord Validator channel.](https://webb.tools/community) They can provide invaluable insights and support. Carefully weigh the risks and rewards, prepare thoroughly, and leverage the community.
-
-Generally, the process for becoming a validator involves three steps:
-
-1. **Bonding Tokens:** Before a node can become a validator, the node operator usually needs to bond (or stake) a certain amount of tokens. This is a way of putting up collateral that can be slashed (or forfeited) if the validator behaves maliciously or fails to properly validate transactions and blocks. Bonding is a way of ensuring that validators have a vested interest in properly maintaining the network.
-
-
-**How much TNT do I need to be an active Validator?**
-
-To be elected to the active validator set (to recieve block rewards), you need a minimum stake behind your validator. This can come from yourself or nominators. This means at a minimum, you'll need enough TNT for stash and staking accounts with the existential deposit, plus extra for fees. The rest can come from nominators. To understand validator election, check the [NPoS election algorithms page.](https://wiki.polkadot.network/docs/learn-phragmen)
-
-In the future, validators may be able to participate in other forms of reward-winning activities without participating in block rewards.
-
-
-
-2. **Setting Up Validator Infrastructure:** This includes ensuring that the node is properly configured, connected to the network, has the necessary keys set up, etc. Part of this setup will involve generating and injecting session keys (like DKG, Aura, Grandpa, etc.) which are crucial for various consensus and validation processes.
-
-3. **Nominating or Registering as a Validator:** After bonding tokens and setting up the validator node, the operator then registers or nominate their node as a validator candidate. This involves submitting a transaction to the network indicating their intention to validate.
-
-# Launch a Validator
-
-
-The following guide assumes you have a node operating and synced with the network. If not, see the following:
-1. [Hardware Specifications](../hardware/)
-2. [Node Software](../node-software/)
-3. [Run Node with Docker](../deploy-with-docker/validator-node.mdx)
-4. **or** [Run Node with Systemd](../systemd/validator-node.mdx)
-
-Once your node is operational, proceed.
-
-
-
-## 1. Bond TNT or tTNT
-
-To validate, you will use a 'Stash' account and a 'staking proxy.' Make sure this account has enough funds to pay the fees for making transactions. Keep most of your funds in the stash account since it is meant to be the custodian of your staking funds. While you are not required to use a staking proxy, it is highly recommended. For this, you will create two accounts and make sure each of them have at least enough funds to pay the fees for making transactions.
-
-
-Controller accounts are deprecated in Substrate. For more information, [see this discussion.](https://forum.polkadot.network/t/staking-controller-deprecation-plan-staking-ui-leads-comms/2748)
-
-
-It is now time to set up our validator. We will do the following:
-
-1. Bond the TNT of the Stash account on the Tangle Network. These TNT will be put at stake for the security of the network and can be slashed.
-2. Select the [staking proxy account](./proxyaccount) account. This is the account that will decide when to start or stop validating.
-
-
-See how to setup a [proxy account](./proxyaccount)
-
-
-First, go to the Staking section. Click on "Account Actions", and then the "+ Stash" button.
-
-
-Make sure not to bond all your TNT balance since you will be unable to pay transaction fees from your bonded balance. Always maintain an unbonded amount for fees.
-
-
-**Stash account** - Select your Stash account. In this example, we will bond 1 DOT, where the minimum bonding amount is 1. Make sure that your Stash account contains at least this much. You can, of course, stake more than this.
-
-**Staking proxy account** - Select the staking [proxy account](./proxyaccount) created earlier. This account will also need a small amount of DOT in order to start and stop validating.
-
-**Value bonded** - How much DOT from the Stash account you want to bond/stake. Note that you do not need to bond all of the DOT in that account. Also note that you can always bond more DOT later. However, withdrawing any bonded amount requires the duration of the unbonding period. On Kusama, the unbonding period is 7 days. On Polkadot, the planned unbonding period is 28 days.
-
-**Payment destination** - The account where the rewards from validating are sent. More info here. Payouts can go to any custom address. If you'd like to redirect payments to an account that is neither the staking [proxy account](./proxyaccount) nor the stash account, set one up. Note that it is extremely unsafe to set an exchange address as the recipient of the staking rewards.
-
-Once everything is filled in properly, click `Bond` and sign the transaction with your Stash account.
-
-After a few seconds, you should see an `ExtrinsicSuccess` message.
-
-Your bonded account will available under `Stashes.` You should now see a new card with all your accounts (note: you may need to refresh the screen). The bonded amount on the right corresponds to the funds bonded by the Stash account.
-
-## 2. Generate your Keys and Import them to the Keystore
-
-In order to participate in the distributed key generation (DKG) protocol, block production, and block finalization, you will be required to set up several keys. These keys include:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-- ImOnline key (Sr25519)
-
-See the guides for [launching Tangle Network with Docker](/docs/tangle-network/validator/deploy-with-docker/validator-node/) and [Launching with Systemd](/docs/tangle-network/validator/systemd/validator-node/) for exact guides on this step.
-
-Once your node and keys are setup and your node is synced, proceed to the following:
-
-## 3. Register with the Network
-
-Session keys are a critical aspect of the Tangle Network's consensus mechanism, are are composes of the several keys we generate immediately previous, each with a different function. These keys enable your validator node to participate in consensus, and a misconfiguration can lead to missed rewards or even penalties. You can use RPC methods like `hasKey` to check for a specific key or `hasSessionKeys` to check the full session key public key string.
-
-**Starting Your Node**
-
-After your node is fully synchronized with the network, stop the process using Ctrl-C. You'll now start your node by designating itself as a validator:
-
-```
-tangle-standalone --validator --name "YourNodeNameOnTelemetry"
-```
-
-The output will be similar to:
-
-```
-[timestamp] Tangle Network Standalone
-[timestamp] ✌️ version x.x.x
-[timestamp] ❤️ by Webb Technologies
-[timestamp] 📋 Chain specification: Tangle Network
-[timestamp] 🏷 Node name: YourNodeName
-... and so on
-```
-
-Note that you can give your validator any name that you like. However, since this name will appear on telemetry and is visible to others, choose a unique name.
-
-### Register your Session Key with the Network for your Node
-
-To participate in consensus, you need to inform the chain about your session keys and map them to your node.
-
-**Option 1: PolkadotJS Apps**
-
-1. Connect to Your Validator Node:
- Start by connecting the PolkadotJS explorer to your validator node.
-2. Access the Toolbox:
- Navigate to the `Toolbox` tab.
-3. Select `RPC Calls`.
- Rotate the Keys:
- From the dropdown menu, choose `author > rotateKeys()``.
-4. Execute the RPC call.
- **Important: Save the output.** This represents your session keys and will be essential for the next steps.
-
-**Option 2: CLI**
-
-If you're working on a remote server and need to rotate your session keys, you can use the following command:
-
-```sh
-curl -H "Content-Type: application/json" -d '{"id":1, "jsonrpc":"2.0", "method": "author_rotateKeys", "params":[]}' http://localhost:9933
-```
-
-**Note:** Adjust http://localhost:9933 if your node's address differs.
-
-This command prompts your node to generate a new set of session keys. The concatenated public parts of these keys will be returned as the result.
-
-### Submitting the setKeys Transaction
-
-To inform the chain about your session keys:
-
-1. Navigate to `Staking > Account Actions`
-2. Set Session Key
-
-- Click on `Set Session Key` for the account you've designated as your staking proxy.
-- Enter the output from `author_rotateKeys` in the appropriate field.
-- Click `Set Session Key`.
-
-Once you've submitted this transaction, your node is now recognized as a validator and is prepared to engage in the Tangle network's consensus process.
-
-
-**Verify Node Status**
-To confirm that your node is live and synchronized, head to Tangle Telemetry- [currently you can view Testnet Telemetry only,](https://telemetry.polkadot.io/#list/0xea63e6ac7da8699520af7fb540470d63e48eccb33f7273d2e21a935685bf1320) and locate your node. Given the myriad of nodes on the Tangle Network, ensure you've chosen a unique name for easier identification. For instance, if you've named your node `tangletechtest`, it should be visible when searched.
-
-
-## Setup via Validator Tab
-
-This step finalizes and enters you into the validator queue.
-
-1. Navigate to `Staking>Account Actions`
-2. Look for your validator node, and click `Validate`
-
-Here, you'll need to input the keys generated from the `rotateKeys` step, which is the hexadecimal output from the `author_rotateKeys` process. These keys will remain in a "pending" state until they are incorporated at the beginning of a new era.
-
-**Define your "reward commission percentage."** This denotes the commission rate applicable to your validator's rewards. **Note on Commission:** A commission rate of 100% indicates that you intend for your validator not to receive any nominations. This could discourage nominators and should be set judiciously.
-
-**Payment Preferences:** Specify the percentage of rewards you wish to claim. The remaining balance will be divided among your nominators.
-
-You also have the option to accept or decline new nominations via the "allows new nominations" setting.
-
-Click on `Bond & Validate` to enter the set of validators.
-
-**Confirm your Validator**
-Navigating to the "Staking" tab will display a list of active validators operating on the network. At the top, you'll see available validator slots and the count of nodes that have expressed their intent to validate. To check your node's status, switch to the "Waiting" tab.
-
-
-**"Waiting" on the Staking Queue**
-
-The validator roster is updated every era (roughly 6 hours in Tangle Testnet). In the subsequent era, if there's an available slot and your node is chosen to join the validator set, it will be activated as a validator. Otherwise, it will stay in the waiting queue. If your validator doesn't get selected for a specific era, it remains in the queue for the next one. No restart is required. However, you might consider increasing the staked Tangle tokens or seeking more nominators to enhance the chances of your validator's selection.
-
-
-
-# Additional Setup
-
-## Setting identity
-
-While not required, we highly recommend that validators and node operators set an identity, which is critical for receiving nominations and being seen as a trustworthy node.
-
-1. Go to the Polkadot.js portal: `Accounts`
-2. Open the 3 dots next to your address: `Set on-chain Identity`
-3. Enter all fields you want to set.
-4. Send the transaction.
-
-#### Request judgment
-
-1. Go to the Polkadot.js portal: `Developer > Extrinsic`
-2. Select your account and extrinsic type: `identity / requestJudgment`
-3. Send the transaction.
diff --git a/pages/docs/tangle-network/node/validator/validator-rewards.mdx b/pages/docs/tangle-network/node/validator/validator-rewards.mdx
deleted file mode 100644
index 3168e931..00000000
--- a/pages/docs/tangle-network/node/validator/validator-rewards.mdx
+++ /dev/null
@@ -1,125 +0,0 @@
----
-title: Validator Rewards
-description: A brief overview of Tangle Network rewards and their payout scheme.
----
-
-# Validator Rewards
-
-Running a [validator](validation.mdx) node on the Tangle Network allows you to connect to the network, sync with a bootnode, obtain local access to RPC endpoints, and also author blocks. The network rewards successful validators (users running validator nodes and actively producing blocks) by paying a set amount of network tokens as rewards. Validators are chosen using an algorithm [AURA](https://docs.substrate.io/reference/glossary/#authority-round-aura) that works to give every validator in the active set, a chance at authoring a block.
-
-## How Rewards are Calculated
-
-## Era Points
-
-For every era (a period of time approximately 6 hours in length in Tangle), validators are paid proportionally to the amount of _era points_ they have collected. Era
-points are reward points earned for payable actions like:
-
-- producing a non-uncle block in the Chain.
-- producing a reference to a previously unreferenced uncle block.
-- producing a referenced uncle block.
-
-An uncle block is a block that is valid in every regard, but which failed to become
-canonical. This can happen when two or more validators are block producers in a single slot, and the
-block produced by one validator reaches the next block producer before the others. We call the
-lagging blocks uncle blocks.
-
-Payments occur at the end of every era.
-
-Era points create a probabilistic component for staking rewards.
-
-If the _mean_ of staking rewards is the average rewards per era, then the _variance_ is the
-variability from the average staking rewards. The exact TNT value of each era point is not known in
-advance since it depends on the total number of points earned by all validators in a given era. This
-is designed this way so that the total payout per era depends on Tangle's inflation model, and not on the number of payable
-actions (f.e., authoring a new block) executed.
-
-In this case, analyzing the _expected value_ of staking rewards will paint a better picture as the
-weight of era points of validators and para-validators in the reward average are taken into
-consideration.
-
-#### High-level breakdown of reward variance
-
-This should only serve as a high-level overview of the probabilistic nature for staking rewards.
-
-Let:
-
-- `pe` = para-validator era points,
-- `ne` = non-para-validator era points,
-- `EV` = expected value of staking rewards,
-
-Then, `EV(pe)` has more influence on the `EV` than `EV(ne)`.
-
-Since `EV(pe)` has a more weighted probability on the `EV`, the increase in variance against the
-`EV` becomes apparent between the different validator pools (aka. validators in the active set and
-the ones chosen to para-validate).
-
-Also, let:
-
-- `v` = the variance of staking rewards,
-- `p` = number of para-validators,
-- `w` = number validators in the active set,
-- `e` = era,
-
-Then, `v` ↑ if `w` ↑, as this reduces `p` : `w`, with respect to `e`.
-
-Increased `v` is expected, and initially keeping `p` ↓ using the same para-validator set for
-all parachains ensures availability and approval voting. In addition, despite `v` ↑ on an `e` to `e`
-basis, over time, the amount of rewards each validator receives will equal out based on the
-continuous selection of para-validators.
-
-## Payout Scheme
-
-No matter how much total stake is behind a validator, all validators split the block authoring
-payout essentially equally. The payout of a specific validator, however, may differ based on
-era points, as described above. Although there is a probabilistic component to
-receiving era points, and they may be impacted slightly depending on factors such as network
-connectivity, well-behaving validators should generally average out to having similar era point
-totals over a large number of eras.
-
-Validators may also receive "tips" from senders as an incentive to include transactions in their
-produced blocks. Validators will receive 100% of these tips directly.
-
-For simplicity, the examples below will assume all validators have the same amount of era points,
-and received no tips.
-
-```
-Validator Set Size (v): 4
-Validator 1 Stake (v1): 18 tokens
-Validator 2 Stake (v2): 9 tokens
-Validator 3 Stake (v3): 8 tokens
-Validator 4 Stake (v4): 7 tokens
-Payout (p): 8 TNT
-
-Payout for each validator (v1 - v4):
-p / v = 8 / 4 = 2 tokens
-```
-
-Note that this is different than most other Proof-of-Stake systems such as Cosmos. As long as a
-validator is in the validator set, it will receive the same block reward as every other validator.
-Validator `v1`, who had 18 tokens staked, received the same reward (2 tokens) in this era as `v4`
-who had only 7 tokens staked.
-
-## Slashing
-
-Although rewards are paid equally, slashes are relative to a validator's stake. Therefore, if you do
-have enough TNT to run multiple validators, it is in your best interest to do so. A slash of 30%
-will, of course, be more TNT for a validator with 18 TNT staked than one with 9 TNT staked.
-
-Running multiple validators does not absolve you of the consequences of misbehavior. Polkadot
-punishes attacks that appear coordinated more severely than individual attacks. You should not, for
-example, run multiple validators hosted on the same infrastructure. A proper multi-validator
-configuration would ensure that they do not fail simultaneously.
-
-Nominators have the incentive to nominate the lowest-staked validator, as this will result in the
-lowest risk and highest reward. This is due to the fact that while their vulnerability to slashing
-remains the same (since it is percentage-based), their rewards are higher since they will be a
-higher proportion of the total stake allocated to that validator.
-
-To clarify this, let us imagine two validators, `v1` and `v2`. Assume both are in the active set,
-have commission set to 0%, and are well-behaved. The only difference is that `v1` has 90 TNT
-nominating it and `v2` only has 10. If you nominate `v1`, it now has `90 + 10 = 100` TNT, and you
-will get 10% of the staking rewards for the next era. If you nominate `v2`, it now has
-`10 + 10 = 20` TNT nominating it, and you will get 50% of the staking rewards for the next era. In
-actuality, it would be quite rare to see such a large difference between the stake of validators,
-but the same principle holds even for smaller differences. If there is a 10% slash of either
-validator, then you will lose 1 TNT in each case.
diff --git a/pages/docs/tangle-network/overview.mdx b/pages/docs/tangle-network/overview.mdx
deleted file mode 100644
index e1d5686f..00000000
--- a/pages/docs/tangle-network/overview.mdx
+++ /dev/null
@@ -1,140 +0,0 @@
----
-title: Overview and Use Cases
-description: General overview of the Tangle Network, use cases like including interoperable shielded pools, identity-based systems, decentralized private finance, and secure multi-party computation, as well as features, and roadmap.
----
-
-import { UseCasesArea, ParticipateArea, TestNetworkArea } from "../../../components/UseCasesTangle";
-import FullWebbCTA from "../../../components/FullWebbCTA";
-import SvgComponent from "../../../components/TangleOverviewSVG.tsx";
-
-# Tangle Network Overview
-
-## Introduction
-
-The Tangle Network is a specialized platform engineered to support and govern cross-chain Zero-Knowledge (ZK) applications. Leveraging the robust Substrate blockchain framework, the Tangle Network forms the core infrastructure to facilitate a new generation of privacy-enhancing and governance-decentralized applications.
-
-The Tangle Network's unique offering centers around the intersection of cross-chain functionality, familiar EVM tooling and compatibility, advanced governance systems, and the world of ZK applications. Our network seamlessly merges these distinct elements, creating an ecosystem that amplifies the strengths of each component.
-
-## Key Features
-
-**Cross-Chain Functionality and EVM Compatibility**
-
-The Tangle Network breaks traditional chain boundaries, allowing seamless interaction, data exchange, and operation of ZK applications across different blockchain networks. Leveraging the power of the Ethereum Virtual Machine (EVM) on Substrate, developers can utilize familiar EVM tooling and compatibility to build decentralized applications (DApps), create Non-Fungible Tokens (NFTs), and utilize ERC20 tokens across multiple networks.
-
-**Advanced Governance**
-
-The Tangle Network implements an innovative governance model based on Distributed Key Generation (DKG) protocol. The DKG protocol serves as a security instrument for the Tangle Network's cross-chain Anchor zkApps, ensuring the validity of bridge updates with crypto-economically secured threshold-signatures.
-
-DKG is a cryptographic method where multiple entities collaboratively produce a shared public and private key. In the Tangle Network, DKG fortifies the governance of distributed applications, particularly the Anchor System. It bolsters the security of the network by ensuring the integrity of signed messages via threshold signatures. This not only provides resistance against potential threats but also amplifies the Tangle Network's credibility.
-
-**Privacy-Enhancing ZK Applications**
-
-Privacy is a paramount concern in the Tangle Network. By providing an infrastructure for Zero-Knowledge (ZK) applications, we enable users to experience a new generation of privacy-enhancing functionalities. ZK applications empower users to transact, communicate, and interact privately while maintaining the security and immutability of blockchain technology.
-
-**Built on Substrate**
-
-The Tangle Network is built on Substrate, an advanced blockchain framework renowned for its flexibility, scalability, and cutting-edge features. This strategic choice ensures that our platform remains at the forefront of speed, security, and scalability, serving as a reliable backbone for the Tangle Network. Leveraging Substrate's modular architecture, we enable seamless interaction and interoperability with other blockchain networks.
-
----
-
-## Use Cases
-
-The Tangle Network and Webb Protocol have been designed to serve a variety of use cases. A majority of these applications revolve around enhancing privacy and improving cross-chain interoperability. Here are some key proposed applications for the Tangle Network and Webb Protocol:
-
-**Interoperable Shielded Pools**
-
-The most immediate application is the creation of private bridges for assets, or "interoperable shielded pools." The decentralized, updatable Tangle Network is ideal for maintaining the state of a set of bridge anchors, making it possible to design data to be inserted into these anchors around that of an asset system. Unspent transaction outputs (UTXOs) are inserted into anchors, and users can spend these UTXOs privately across the chains the bridge exists over.
-
-**Interoperable Membership Groups**
-
-The Tangle Network and Webb Protocol can support interoperable membership groups. These are communities that exist across chains and leverage privacy. A natural implementation might look like an interoperable Semaphore system, where anyone in the Semaphore membership group can relay a vote or response to a poll from any chain privately, and potentially without even needing a wallet on that chain.
-
-**Interoperable Badge System**
-
-An identity-based application, an interoperable badge system, could use expressive data blobs for arbitrary proofs of ownership, participation, and identity. Using Webb's technology stack, these badges can be proven to exist from anywhere, enabling new types of composable application development due to the zero-knowledge and private nature of data disclosure.
-
-**Variable Asset Anchor System**
-
-This system allows for interoperable shielded pools, wherein users can transfer arbitrary amounts of assets privately between blockchains. The Variable Asset Anchor System uses zero-knowledge proofs and is similar to a shielded UTXO system, but with cross-chain capabilities.
-
-**Semaphore Anchor System**
-
-Semaphore is a popular zero-knowledge protocol that enables members of an on-chain community to create anonymous signals using zero-knowledge proofs of membership in the community’s identity set. This concept can be extended to a cross-chain identity set, allowing any member of a set of non-fungible token (NFT) communities to register.
-
-**Identity-Based Variable Asset Anchor System**
-
-By combining the Semaphore Identity protocol and the Variable Asset protocol, a cross-chain shielded pool application over a restricted identity set can be designed. This creates a private transaction system where only users with proofs of membership in a cross-chain identity system can transact. This opens up possibilities for even more diverse use cases in the realm of zero-knowledge applications.
-
-In addition, with the integration of threshold Elliptic Curve Digital Signature Algorithm (ECDSA), the Tangle Network can sign arbitrary Bitcoin transactions, Ethereum transactions, and more. It can also be extended to include a Bitcoin bridge into the system with a proper custody rotation, or to sign messages for other smart contracts across chains for governance purposes.
-
-**Decentralized Private Finance (Private Defi)**
-
-Decentralized Finance, or DeFi, has experienced significant growth over the past few years, facilitating peer-to-peer financial transactions without intermediaries. However, DeFi transactions on public blockchains are often not private, posing privacy concerns for users. Leveraging Tangle Network's and Webb Protocol's privacy-preserving capabilities, we can establish Decentralized Private DefFi applications.
-
-These allow users to make transactions while keeping their financial activities private. They can engage in yield farming, liquidity provision, lending, borrowing, and other DeFi operations while remaining anonymous. The added privacy benefits could attract users that prefer to keep their financial activities private due to security concerns, thus expanding the overall user base of DeFi.
-
-**Secure Multi-Party Computation (SMPC)**
-
-Secure Multi-Party Computation (SMPC) is an area of cryptography concerned with enabling multiple parties to jointly compute a function over their inputs while keeping those inputs private. With its cryptographic properties, Tangle Network can be extended to facilitate SMPC.
-
-For example, consider a consortium of companies willing to collaboratively train a machine learning model using their data without exposing sensitive information to each other. By leveraging SMPC on the Tangle Network, these companies can jointly compute the machine learning model without revealing their individual datasets.
-
-This not only preserves privacy but also fosters collaboration between different entities that would otherwise be reluctant to share sensitive data.
-
-**Cross-chain Atomic Swaps**
-
-In the current state of the blockchain ecosystem, transferring assets between different blockchains (cross-chain) often involves centralized exchanges or trusted intermediaries. With the Tangle Network and Webb Protocol, we can enable cross-chain atomic swaps with enhanced privacy.
-
-An atomic swap is a smart contract technology that enables the exchange of one cryptocurrency for another without using centralized intermediaries. Users will be able to privately and securely exchange tokens between different blockchains directly. For instance, a user can exchange Bitcoin for Ethereum directly from their wallets without an exchange, ensuring privacy and reducing the reliance on intermediaries.
-
-**Private and Secure Messaging Systems**
-
-In today's digital age, privacy and security in communication are paramount. With the Tangle Network's zero-knowledge proofs and privacy-oriented architecture, we can develop a private and secure messaging system.
-
-In this system, all communications would be encrypted and can be securely stored across multiple blockchains. This would ensure that only the intended recipients can read the messages. Additionally, the decentralized nature of the system would make it resistant to censorship and control by any single entity.
-
-**Privacy-Preserving Data Marketplace**
-
-Data is often referred to as the "new oil." However, data transactions can be challenging due to privacy and trust concerns. By leveraging the Tangle Network, we can establish a privacy-preserving data marketplace.
-
-In this marketplace, data sellers can list their datasets without revealing the actual data. Using zero-knowledge proofs, they can provide evidence of the data's authenticity and other characteristics. Buyers, on the other hand, can verify these proofs and make purchases without exposing their identities. The entire transaction can be managed on-chain, ensuring fairness and transparency while preserving privacy.
-
-**Decentralized Identity Systems (DID)**
-
-Identity is fundamental in both the physical and digital worlds. However, traditional identity systems are often centralized and vulnerable to attacks. The Tangle Network can support Decentralized Identity Systems (DID), offering privacy, control, and cross-chain compatibility.
-
-In a DID system on the Tangle Network, each user can generate a self-sovereign identity that could be used across different blockchains. With zero-knowledge proofs, users can prove certain attributes of their identity without revealing any unnecessary personal information. This would not only enhance privacy but also give users full control over their identities, avoiding reliance on any single authority.
-
-These use cases showcase the versatility and potential of the Tangle Network and Webb Protocol in various sectors, underscoring its ability to drive forward both privacy and interoperability in the blockchain space.
-
-## Roadmap
-
-The following is subject to change as DAO governance supports different initiatives.
-
-**Phase 1**
-
-- Test Runtime Upgrade
-- Finalize Tangle Token Distribution
-- Launch Incentivized testnet
-
-**Phase 2**
-
-- Update Tangle Genesis for Launch
-- Distribute TNT Tokens
-- Launch Democracy Governance
-- Launch OFAC VAnchor Bridges
-
-**Phase 3**
-
-- Launch Cross-chain Transfers
-- Validator Staking Upgrades
-- Launch Semaphore VAnchor bridges
-
-**Phase 4**
-
-- Remove Sudo
-- Improve Relayer & Proposer Security
-
-## Participate
-
-
diff --git a/yarn.lock b/yarn.lock
index ad6fdc3d..4c2d65bc 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1357,9 +1357,9 @@ camelcase@^1.0.2:
integrity sha512-wzLkDa4K/mzI1OSITC+DUyjgIl/ETNHE9QvYgy6J6Jvqyyz4C0Xfd+lQhb19sX2jMpZV4IssUn0VDVmglV+s4g==
caniuse-lite@^1.0.30001406, caniuse-lite@^1.0.30001426, caniuse-lite@^1.0.30001449:
- version "1.0.30001480"
- resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001480.tgz"
- integrity sha512-q7cpoPPvZYgtyC4VaBSN0Bt+PJ4c4EYRf0DrduInOz2SkFpHD5p3LnvEpqBp7UnJn+8x1Ogl1s38saUxe+ihQQ==
+ version "1.0.30001576"
+ resolved "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001576.tgz"
+ integrity sha512-ff5BdakGe2P3SQsMsiqmt1Lc8221NR1VzHj5jXN5vBny9A6fpze94HiVV/n7XRosOlsShJcvMv5mdnpjOGCEgg==
ccount@^2.0.0:
version "2.0.1"