diff --git a/articleRedirects.js b/articleRedirects.js index 813d2965e9d..f2efc146abf 100644 --- a/articleRedirects.js +++ b/articleRedirects.js @@ -480,10 +480,6 @@ exports.articleRedirects = [ from: '/shimmer/smart-contracts/contribute', to: '/learn/smart-contracts/introduction', }, - { - from: '/shimmer/smart-contracts/guide/example_projects/fair_roulette', - to: '/wasp-wasm/tutorials/fair_roulette', - }, { from: '/shimmer/smart-contracts/metrics', to: '/wasp/metrics', diff --git a/cli/package.json b/cli/package.json index a6d1a14ffb3..b6819d3d94d 100644 --- a/cli/package.json +++ b/cli/package.json @@ -22,7 +22,7 @@ }, "dependencies": { "@babel/generator": "^7.21.5", - "@babel/parser": "^7.21.2", + "@babel/parser": "^7.23.0", "@babel/types": "^7.21.5", "@iota-wiki/core": "workspace:^", "@yarnpkg/shell": "^3.2.0", diff --git a/common/community/research/iota-devnet-wallet.md b/common/community/research/iota-devnet-wallet.md deleted file mode 100644 index 4210a2a8358..00000000000 --- a/common/community/research/iota-devnet-wallet.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: IOTA 2.0 DevNet Wallet -description: A Wallet for the current IOTA 2.0 Development - and Testnet. Offers - a lot of Functions that will become available in the IOTA 2.0 Mainnet. No real - IOTA Tokens are used here. ---- - -# IOTA 2.0 DevNet Wallet - -With a wallet for the IOTA 2.0 DevNet you can request tokens from the faucet, transfer them and create or manage NFTs and digital assets. Currently, there exist two wallets for the IOTA 2.0 DevNet. A Command Line Wallet (cli-wallet) and a GUI Wallet (IOTA 2.0 DevNet GUI Wallet) - -## cli-wallet - -The cli wallet is the most feature complete wallet. You can find a guide for it [here](/goshimmer/tutorials/wallet_library). -The cli wallet is located directly in the Goshimmer repo and pre-build binaries are released with each new GoShimmer version [here](https://github.com/iotaledger/goshimmer/releases). - -## IOTA 2.0 DevNet GUI Wallet - -![DevNet-Wallet](https://github.com/iotaledger/IOTA-2.0-DevNet-wallet/blob/master/images/devnet-wallet.png?raw=true) - -With the GUI wallet you can transfer token and manage your digital assets. It has its own [repo](https://github.com/iotaledger/IOTA-2.0-DevNet-wallet) and you can get prebuild binaries from the [releases](https://github.com/iotaledger/IOTA-2.0-DevNet-wallet/releases). diff --git a/common/community/the-community/how-to-support.md b/common/community/the-community/how-to-support.md index d8a49da8982..838e46076ed 100644 --- a/common/community/the-community/how-to-support.md +++ b/common/community/the-community/how-to-support.md @@ -95,7 +95,6 @@ After you have fulfilled the requirements, open the following repositories and _ ![Add a star to IOTA's repositories on Github](/img/participate/how-to-support/github_iota_star.png) - [Firefly](https://github.com/iotaledger/firefly) -- [GoShimmer](https://github.com/iotaledger/goshimmer) - [Hornet](https://github.com/iotaledger/hornet) - [Identity](https://github.com/iotaledger/identity.rs) - [iota.rs](https://github.com/iotaledger/iota.rs) diff --git a/docs/build/getting-started/networks-endpoints.mdx b/docs/build/getting-started/networks-endpoints.mdx index fdcda1851c4..bf48c1bf4cf 100644 --- a/docs/build/getting-started/networks-endpoints.mdx +++ b/docs/build/getting-started/networks-endpoints.mdx @@ -45,7 +45,7 @@ token. Stardust
A span of time.
+ An implementor of `IJwsVerifier` that can handle the
+ EdDSA
algorithm.
+
Return after the first error occurs.
+ Verify a JWS signature secured with the EdDSA
algorithm and
+ curve Ed25519
.
+
+ This function is useful when one is composing a `IJwsVerifier` that
+ delegates
+ EdDSA
verification with curve Ed25519
to this function.
+
+ This function does not check whether alg = EdDSA
in the
+ protected header. Callers are expected to assert this prior to calling the
+ function.
+
Decode the given url-safe base64-encoded slice into its raw bytes.
- Verify a JWS signature secured with the JwsAlgorithm::EdDSA
{' '}
- algorithm. Only the EdCurve::Ed25519
variant is supported for
- now.
-
- This function is useful when one is building an IJwsVerifier
{' '}
- that extends the default provided by the IOTA Identity Framework.
-
- This function does not check whether alg = EdDSA
in the
- protected header. Callers are expected to assert this prior to calling the
- function.
-
Promise.<string>
- [.purgeMethod(storage, id)](#CoreDocument+purgeMethod) ⇒ Promise.<void>
- [.createJws(storage, fragment, payload, options)](#CoreDocument+createJws) ⇒ [Promise.<Jws>
](#Jws)
- - [.createCredentialJwt(storage, fragment, credential, options)](#CoreDocument+createCredentialJwt) ⇒ [Promise.<Jwt>
](#Jwt)
+ - [.createCredentialJwt(storage, fragment, credential, options, custom_claims)](#CoreDocument+createCredentialJwt) ⇒ [Promise.<Jwt>
](#Jwt)
- [.createPresentationJwt(storage, fragment, presentation, signature_options, presentation_options)](#CoreDocument+createPresentationJwt) ⇒ [Promise.<Jwt>
](#Jwt)
- _static_
- [.fromJSON(json)](#CoreDocument.fromJSON) ⇒ [CoreDocument
](#CoreDocument)
@@ -1046,7 +1055,8 @@ Regardless of which options are passed the following conditions must be met in o
take place.
- The JWS must be encoded according to the JWS compact serialization.
-- The `kid` value in the protected header must be an identifier of a verification method in this DID document.
+- The `kid` value in the protected header must be an identifier of a verification method in this DID document,
+ or set explicitly in the `options`.
**Kind**: instance method of [CoreDocument
](#CoreDocument)
@@ -1054,7 +1064,7 @@ take place.
| ----------------- | -------------------------------------------------------------- |
| jws | [Jws
](#Jws) |
| options | [JwsVerificationOptions
](#JwsVerificationOptions) |
-| signatureVerifier | IJwsVerifier
\| undefined
|
+| signatureVerifier | IJwsVerifier
|
| detachedPayload | string
\| undefined
|
@@ -1179,32 +1189,37 @@ See [RFC7515 section 3.1](https://www.rfc-editor.org/rfc/rfc7515#section-3.1).
-### coreDocument.createCredentialJwt(storage, fragment, credential, options) ⇒ [Promise.<Jwt>
](#Jwt)
+### coreDocument.createCredentialJwt(storage, fragment, credential, options, custom_claims) ⇒ [Promise.<Jwt>
](#Jwt)
Produces a JWT where the payload is produced from the given `credential`
-in accordance with [VC-JWT version 1.1](https://w3c.github.io/vc-jwt/#version-1.1).
+in accordance with [VC Data Model v1.1](https://www.w3.org/TR/vc-data-model/#json-web-token).
-The `kid` in the protected header is the `id` of the method identified by `fragment` and the JWS signature will be
-produced by the corresponding private key backed by the `storage` in accordance with the passed `options`.
+Unless the `kid` is explicitly set in the options, the `kid` in the protected header is the `id`
+of the method identified by `fragment` and the JWS signature will be produced by the corresponding
+private key backed by the `storage` in accordance with the passed `options`.
+
+The `custom_claims` can be used to set additional claims on the resulting JWT.
**Kind**: instance method of [CoreDocument
](#CoreDocument)
-| Param | Type |
-| ---------- | -------------------------------------------------------- |
-| storage | [Storage
](#Storage) |
-| fragment | string
|
-| credential | [Credential
](#Credential) |
-| options | [JwsSignatureOptions
](#JwsSignatureOptions) |
+| Param | Type |
+| ------------- | ----------------------------------------------------------------- |
+| storage | [Storage
](#Storage) |
+| fragment | string
|
+| credential | [Credential
](#Credential) |
+| options | [JwsSignatureOptions
](#JwsSignatureOptions) |
+| custom_claims | Record.<string, any>
\| undefined
|
### coreDocument.createPresentationJwt(storage, fragment, presentation, signature_options, presentation_options) ⇒ [Promise.<Jwt>
](#Jwt)
Produces a JWT where the payload is produced from the given presentation.
-in accordance with [VC-JWT version 1.1](https://w3c.github.io/vc-jwt/#version-1.1).
+in accordance with [VC Data Model v1.1](https://www.w3.org/TR/vc-data-model/#json-web-token).
-The `kid` in the protected header is the `id` of the method identified by `fragment` and the JWS signature will be
-produced by the corresponding private key backed by the `storage` in accordance with the passed `options`.
+Unless the `kid` is explicitly set in the options, the `kid` in the protected header is the `id`
+of the method identified by `fragment` and the JWS signature will be produced by the corresponding
+private key backed by the `storage` in accordance with the passed `options`.
**Kind**: instance method of [CoreDocument
](#CoreDocument)
@@ -1708,6 +1723,7 @@ It does not imply anything about a potentially present proof property on the cre
- [DecodedJwtCredential](#DecodedJwtCredential)
- [.credential()](#DecodedJwtCredential+credential) ⇒ [Credential
](#Credential)
- [.protectedHeader()](#DecodedJwtCredential+protectedHeader) ⇒ [JwsHeader
](#JwsHeader)
+ - [.customClaims()](#DecodedJwtCredential+customClaims) ⇒ Record.<string, any>
\| undefined
- [.intoCredential()](#DecodedJwtCredential+intoCredential) ⇒ [Credential
](#Credential)
@@ -1726,6 +1742,14 @@ Returns a copy of the protected header parsed from the decoded JWS.
**Kind**: instance method of [DecodedJwtCredential
](#DecodedJwtCredential)
+
+
+### decodedJwtCredential.customClaims() ⇒ Record.<string, any>
\| undefined
+
+The custom claims parsed from the JWT.
+
+**Kind**: instance method of [DecodedJwtCredential
](#DecodedJwtCredential)
+
### decodedJwtCredential.intoCredential() ⇒ [Credential
](#Credential)
@@ -1756,6 +1780,7 @@ It does not imply anything about a potentially present proof property on the pre
- [.expirationDate()](#DecodedJwtPresentation+expirationDate) ⇒ [Timestamp
](#Timestamp) \| undefined
- [.issuanceDate()](#DecodedJwtPresentation+issuanceDate) ⇒ [Timestamp
](#Timestamp) \| undefined
- [.audience()](#DecodedJwtPresentation+audience) ⇒ string
\| undefined
+ - [.customClaims()](#DecodedJwtPresentation+customClaims) ⇒ Record.<string, any>
\| undefined
@@ -1807,6 +1832,14 @@ The `aud` property parsed from JWT claims.
**Kind**: instance method of [DecodedJwtPresentation
](#DecodedJwtPresentation)
+
+
+### decodedJwtPresentation.customClaims() ⇒ Record.<string, any>
\| undefined
+
+The custom claims parsed from the JWT.
+
+**Kind**: instance method of [DecodedJwtPresentation
](#DecodedJwtPresentation)
+
## DomainLinkageConfiguration
@@ -2054,6 +2087,49 @@ Deserializes an instance from a JSON object.
| ----- | ---------------- |
| json | any
|
+
+
+## EdDSAJwsVerifier
+
+An implementor of `IJwsVerifier` that can handle the
+`EdDSA` algorithm.
+
+**Kind**: global class
+
+- [EdDSAJwsVerifier](#EdDSAJwsVerifier)
+ - [new EdDSAJwsVerifier()](#new_EdDSAJwsVerifier_new)
+ - [.verify(alg, signingInput, decodedSignature, publicKey)](#EdDSAJwsVerifier+verify)
+
+
+
+### new EdDSAJwsVerifier()
+
+Constructs an EdDSAJwsVerifier.
+
+
+
+### edDSAJwsVerifier.verify(alg, signingInput, decodedSignature, publicKey)
+
+Verify a JWS signature secured with the `EdDSA` algorithm.
+Only the `Ed25519` curve is supported for now.
+
+This function is useful when one is building an `IJwsVerifier` that extends the default provided by
+the IOTA Identity Framework.
+
+# Warning
+
+This function does not check whether `alg = EdDSA` in the protected header. Callers are expected to assert this
+prior to calling the function.
+
+**Kind**: instance method of [EdDSAJwsVerifier
](#EdDSAJwsVerifier)
+
+| Param | Type |
+| ---------------- | ------------------------- |
+| alg | JwsAlgorithm
|
+| signingInput | Uint8Array
|
+| decodedSignature | Uint8Array
|
+| publicKey | [Jwk
](#Jwk) |
+
## IotaDID
@@ -2353,7 +2429,7 @@ Deserializes an instance from a JSON object.
- [.generateMethod(storage, keyType, alg, fragment, scope)](#IotaDocument+generateMethod) ⇒ Promise.<string>
- [.purgeMethod(storage, id)](#IotaDocument+purgeMethod) ⇒ Promise.<void>
- [.createJwt(storage, fragment, payload, options)](#IotaDocument+createJwt) ⇒ [Promise.<Jws>
](#Jws)
- - [.createCredentialJwt(storage, fragment, credential, options)](#IotaDocument+createCredentialJwt) ⇒ [Promise.<Jwt>
](#Jwt)
+ - [.createCredentialJwt(storage, fragment, credential, options, custom_claims)](#IotaDocument+createCredentialJwt) ⇒ [Promise.<Jwt>
](#Jwt)
- [.createPresentationJwt(storage, fragment, presentation, signature_options, presentation_options)](#IotaDocument+createPresentationJwt) ⇒ [Promise.<Jwt>
](#Jwt)
- _static_
- [.newWithId(id)](#IotaDocument.newWithId) ⇒ [IotaDocument
](#IotaDocument)
@@ -2590,7 +2666,7 @@ take place.
| ----------------- | -------------------------------------------------------------- |
| jws | [Jws
](#Jws) |
| options | [JwsVerificationOptions
](#JwsVerificationOptions) |
-| signatureVerifier | IJwsVerifier
\| undefined
|
+| signatureVerifier | IJwsVerifier
|
| detachedPayload | string
\| undefined
|
@@ -2845,32 +2921,37 @@ See [RFC7515 section 3.1](https://www.rfc-editor.org/rfc/rfc7515#section-3.1).
-### iotaDocument.createCredentialJwt(storage, fragment, credential, options) ⇒ [Promise.<Jwt>
](#Jwt)
+### iotaDocument.createCredentialJwt(storage, fragment, credential, options, custom_claims) ⇒ [Promise.<Jwt>
](#Jwt)
Produces a JWS where the payload is produced from the given `credential`
-in accordance with [VC-JWT version 1.1](https://w3c.github.io/vc-jwt/#version-1.1).
+in accordance with [VC Data Model v1.1](https://www.w3.org/TR/vc-data-model/#json-web-token).
-The `kid` in the protected header is the `id` of the method identified by `fragment` and the JWS signature will be
-produced by the corresponding private key backed by the `storage` in accordance with the passed `options`.
+Unless the `kid` is explicitly set in the options, the `kid` in the protected header is the `id`
+of the method identified by `fragment` and the JWS signature will be produced by the corresponding
+private key backed by the `storage` in accordance with the passed `options`.
+
+The `custom_claims` can be used to set additional claims on the resulting JWT.
**Kind**: instance method of [IotaDocument
](#IotaDocument)
-| Param | Type |
-| ---------- | -------------------------------------------------------- |
-| storage | [Storage
](#Storage) |
-| fragment | string
|
-| credential | [Credential
](#Credential) |
-| options | [JwsSignatureOptions
](#JwsSignatureOptions) |
+| Param | Type |
+| ------------- | ----------------------------------------------------------------- |
+| storage | [Storage
](#Storage) |
+| fragment | string
|
+| credential | [Credential
](#Credential) |
+| options | [JwsSignatureOptions
](#JwsSignatureOptions) |
+| custom_claims | Record.<string, any>
\| undefined
|
### iotaDocument.createPresentationJwt(storage, fragment, presentation, signature_options, presentation_options) ⇒ [Promise.<Jwt>
](#Jwt)
Produces a JWT where the payload is produced from the given presentation.
-in accordance with [VC-JWT version 1.1](https://w3c.github.io/vc-jwt/#version-1.1).
+in accordance with [VC Data Model v1.1](https://www.w3.org/TR/vc-data-model/#json-web-token).
-The `kid` in the protected header is the `id` of the method identified by `fragment` and the JWS signature will be
-produced by the corresponding private key backed by the `storage` in accordance with the passed `options`.
+Unless the `kid` is explicitly set in the options, the `kid` in the protected header is the `id`
+of the method identified by `fragment` and the JWS signature will be produced by the corresponding
+private key backed by the `storage` in accordance with the passed `options`.
**Kind**: instance method of [IotaDocument
](#IotaDocument)
@@ -3451,6 +3532,7 @@ Returns a clone of the JWS string.
- [.setAlg(value)](#JwsHeader+setAlg)
- [.b64()](#JwsHeader+b64) ⇒ boolean
\| undefined
- [.setB64(value)](#JwsHeader+setB64)
+ - [.custom()](#JwsHeader+custom) ⇒ Record.<string, any>
\| undefined
- [.has(claim)](#JwsHeader+has) ⇒ boolean
- [.isDisjoint(other)](#JwsHeader+isDisjoint) ⇒ boolean
- [.jku()](#JwsHeader+jku) ⇒ string
\| undefined
@@ -3528,6 +3610,14 @@ Sets a value for the base64url-encode payload claim (b64).
| ----- | -------------------- |
| value | boolean
|
+
+
+### jwsHeader.custom() ⇒ Record.<string, any>
\| undefined
+
+Additional header parameters.
+
+**Kind**: instance method of [JwsHeader
](#JwsHeader)
+
### jwsHeader.has(claim) ⇒ boolean
@@ -3835,7 +3925,9 @@ Deserializes an instance from a JSON object.
- [.setCty(value)](#JwsSignatureOptions+setCty)
- [.serUrl(value)](#JwsSignatureOptions+serUrl)
- [.setNonce(value)](#JwsSignatureOptions+setNonce)
+ - [.setKid(value)](#JwsSignatureOptions+setKid)
- [.setDetachedPayload(value)](#JwsSignatureOptions+setDetachedPayload)
+ - [.setCustomHeaderParameters(value)](#JwsSignatureOptions+setCustomHeaderParameters)
- [.toJSON()](#JwsSignatureOptions+toJSON) ⇒ any
- [.clone()](#JwsSignatureOptions+clone) ⇒ [JwsSignatureOptions
](#JwsSignatureOptions)
- _static_
@@ -3921,6 +4013,18 @@ Replace the value of the `nonce` field.
| ----- | ------------------- |
| value | string
|
+
+
+### jwsSignatureOptions.setKid(value)
+
+Replace the value of the `kid` field.
+
+**Kind**: instance method of [JwsSignatureOptions
](#JwsSignatureOptions)
+
+| Param | Type |
+| ----- | ------------------- |
+| value | string
|
+
### jwsSignatureOptions.setDetachedPayload(value)
@@ -3933,6 +4037,18 @@ Replace the value of the `detached_payload` field.
| ----- | -------------------- |
| value | boolean
|
+
+
+### jwsSignatureOptions.setCustomHeaderParameters(value)
+
+Add additional header parameters.
+
+**Kind**: instance method of [JwsSignatureOptions
](#JwsSignatureOptions)
+
+| Param | Type |
+| ----- | --------------------------------------- |
+| value | Record.<string, any>
|
+
### jwsSignatureOptions.toJSON() ⇒ any
@@ -3971,7 +4087,8 @@ Deserializes an instance from a JSON object.
- [new JwsVerificationOptions(options)](#new_JwsVerificationOptions_new)
- _instance_
- [.setNonce(value)](#JwsVerificationOptions+setNonce)
- - [.setScope(value)](#JwsVerificationOptions+setScope)
+ - [.setMethodScope(value)](#JwsVerificationOptions+setMethodScope)
+ - [.setMethodId(value)](#JwsVerificationOptions+setMethodId)
- [.toJSON()](#JwsVerificationOptions+toJSON) ⇒ any
- [.clone()](#JwsVerificationOptions+clone) ⇒ [JwsVerificationOptions
](#JwsVerificationOptions)
- _static_
@@ -3999,9 +4116,9 @@ Set the expected value for the `nonce` parameter of the protected header.
| ----- | ------------------- |
| value | string
|
-
+
-### jwsVerificationOptions.setScope(value)
+### jwsVerificationOptions.setMethodScope(value)
Set the scope of the verification methods that may be used to verify the given JWS.
@@ -4011,6 +4128,18 @@ Set the scope of the verification methods that may be used to verify the given J
| ----- | ---------------------------------------- |
| value | [MethodScope
](#MethodScope) |
+
+
+### jwsVerificationOptions.setMethodId(value)
+
+Set the DID URl of the method, whose JWK should be used to verify the JWS.
+
+**Kind**: instance method of [JwsVerificationOptions
](#JwsVerificationOptions)
+
+| Param | Type |
+| ----- | ------------------------------ |
+| value | [DIDUrl
](#DIDUrl) |
+
### jwsVerificationOptions.toJSON() ⇒ any
@@ -4183,9 +4312,9 @@ Creates a new [JwtCredentialValidator](#JwtCredentialValidator). If a `signature
verifying decoded JWS signatures, otherwise the default which is only capable of handling the `EdDSA`
algorithm will be used.
-| Param | Type |
-| ----------------- | --------------------------------------------------- |
-| signatureVerifier | IJwsVerifier
\| undefined
|
+| Param | Type |
+| ----------------- | ------------------------- |
+| signatureVerifier | IJwsVerifier
|
@@ -4372,9 +4501,9 @@ Creates a new [JwtDomainLinkageValidator](#JwtDomainLinkageValidator). If a `sig
verifying decoded JWS signatures, otherwise the default which is only capable of handling the `EdDSA`
algorithm will be used.
-| Param | Type |
-| ----------------- | --------------------------------------------------- |
-| signatureVerifier | IJwsVerifier
\| undefined
|
+| Param | Type |
+| ----------------- | ------------------------- |
+| signatureVerifier | IJwsVerifier
|
@@ -4555,9 +4684,9 @@ Creates a new [JwtPresentationValidator](#JwtPresentationValidator). If a `signa
verifying decoded JWS signatures, otherwise the default which is only capable of handling the `EdDSA`
algorithm will be used.
-| Param | Type |
-| ----------------- | --------------------------------------------------- |
-| signatureVerifier | IJwsVerifier
\| undefined
|
+| Param | Type |
+| ----------------- | ------------------------- |
+| signatureVerifier | IJwsVerifier
|
@@ -5610,6 +5739,7 @@ Obtain the wrapped `JwkStorage`.
**Kind**: global class
- [Timestamp](#Timestamp)
+ - [new Timestamp()](#new_Timestamp_new)
- _instance_
- [.toRFC3339()](#Timestamp+toRFC3339) ⇒ string
- [.checkedAdd(duration)](#Timestamp+checkedAdd) ⇒ [Timestamp
](#Timestamp) \| undefined
@@ -5620,6 +5750,12 @@ Obtain the wrapped `JwkStorage`.
- [.nowUTC()](#Timestamp.nowUTC) ⇒ [Timestamp
](#Timestamp)
- [.fromJSON(json)](#Timestamp.fromJSON) ⇒ [Timestamp
](#Timestamp)
+
+
+### new Timestamp()
+
+Creates a new [Timestamp](#Timestamp) with the current date and time.
+
### timestamp.toRFC3339() ⇒ string
@@ -5949,18 +6085,6 @@ Deserializes an instance from a JSON object.
| ----- | ---------------- |
| json | any
|
-
-
-## StateMetadataEncoding
-
-**Kind**: global variable
-
-
-
-## MethodRelationship
-
-**Kind**: global variable
-
## StatusCheck
@@ -6059,6 +6183,41 @@ Return after the first error occurs.
**Kind**: global variable
+
+
+## StateMetadataEncoding
+
+**Kind**: global variable
+
+
+
+## MethodRelationship
+
+**Kind**: global variable
+
+
+
+## verifyEd25519(alg, signingInput, decodedSignature, publicKey)
+
+Verify a JWS signature secured with the `EdDSA` algorithm and curve `Ed25519`.
+
+This function is useful when one is composing a `IJwsVerifier` that delegates
+`EdDSA` verification with curve `Ed25519` to this function.
+
+# Warning
+
+This function does not check whether `alg = EdDSA` in the protected header. Callers are expected to assert this
+prior to calling the function.
+
+**Kind**: global function
+
+| Param | Type |
+| ---------------- | ------------------------- |
+| alg | JwsAlgorithm
|
+| signingInput | Uint8Array
|
+| decodedSignature | Uint8Array
|
+| publicKey | [Jwk
](#Jwk) |
+
## start()
@@ -6090,27 +6249,3 @@ Decode the given url-safe base64-encoded slice into its raw bytes.
| Param | Type |
| ----- | ----------------------- |
| data | Uint8Array
|
-
-
-
-## verifyEdDSA(alg, signingInput, decodedSignature, publicKey)
-
-Verify a JWS signature secured with the `JwsAlgorithm::EdDSA` algorithm.
-Only the `EdCurve::Ed25519` variant is supported for now.
-
-This function is useful when one is building an `IJwsVerifier` that extends the default provided by
-the IOTA Identity Framework.
-
-# Warning
-
-This function does not check whether `alg = EdDSA` in the protected header. Callers are expected to assert this
-prior to calling the function.
-
-**Kind**: global function
-
-| Param | Type |
-| ---------------- | ------------------------- |
-| alg | JwsAlgorithm
|
-| signingInput | Uint8Array
|
-| decodedSignature | Uint8Array
|
-| publicKey | [Jwk
](#Jwk) |
diff --git a/docs/build/wasp-wasm/0.7/docs/tutorials/fair_roulette.md b/docs/build/wasp-wasm/0.7/docs/tutorials/fair_roulette.md
deleted file mode 100644
index 871b60eb09d..00000000000
--- a/docs/build/wasp-wasm/0.7/docs/tutorials/fair_roulette.md
+++ /dev/null
@@ -1,329 +0,0 @@
----
-description: An example game project with frontend and contract, demonstrating the development, setup, and interaction with a smart contract.
-image: /img/logo/WASP_logo_dark.png
-keywords:
- - Smart Contracts
- - Rust
- - poc
- - proof of concept
- - node
- - nvm
- - JavaScript
- - TypeScript
- - Wasm
- - tutorial
----
-
-# Fair Roulette
-
-Fair roulette is an example reference implementation which demonstrates the development, setup, and interaction with a smart contract.
-
-## Introduction
-
-The Fair roulette example project is a simple betting game in which players can bet on a number within a certain range.
-
-The game consists of many rounds in which the player will try to bet on the right number to win a share of the bet funds.
-
-A round is running for a certain amount of time. In the example its 60 seconds. In this timeframe, incoming bets will be added to a list of bets. After 60 seconds have passed, a winning number will be randomly generated and all players who made the right guess will receive their share of the pot.
-
-If no round is _active_ when a bet gets placed, the round gets initiated immediately.
-
-The random number is generated by the native randomness of the IOTA Smart Contracts consensus.
-It is unpredictable by anybody, including an individual validator node.
-Therefore the roulette is Fair.
-
-## Mandatory Setup
-
-The mandatory setup consists out of:
-
-- 1 [GoShimmer](/goshimmer/welcome) node >= 0.7.5v ([25c827e8326a](https://github.com/iotaledger/goshimmer/commit/25c827e8326a))
-- 1 Beta [Wasp node](/wasp/running-a-node).
-- 1 Static file server (nginx, Apache, fasthttp)
-
-## Technicalities
-
-Before you dive into the contents of the project, you should take a look at important fundamentals.
-
-### Fundamentals
-
-Wasp is part of the IOTA ecosystem that enables the execution of smart contracts. These contracts run logic and are allowed to do state (change) requests towards the Tangle. You will need a GoShimmer node to be able to store state. It receives state change requests and, if valid, saves them onto the Tangle.
-
-There are two ways to interact with smart contracts.
-
-#### On Ledger Requests
-
-See: [On-ledger Requests](/learn/smart-contracts/invocation#on-ledger)
-
-On-ledger requests are sent to GoShimmer nodes. Wasp periodically requests new On-ledger requests from GoShimmer nodes, and handles them accordingly. These messages are validated through the network and take some time to be processed.
-
-#### Off Ledger Requests
-
-See: [Off-ledger Requests](/learn/smart-contracts/invocation#off-ledger)
-
-Off-ledger requests are directly sent to Wasp nodes and do not require validation through GoShimmer nodes. They are therefore faster. However, they require an initial deposit of funds to a chain account as this account will initiate required On-ledger requests on behalf of the desired contract or player.
-
-:::note
-This example uses On-ledger requests to initiate a betting request. A method to invoke Off-ledger requests is implemented inside the frontend.
-
-See: [placeBetOffLedger](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/frontend/src/lib/fairroulette_client/fair_roulette_service.ts#L133)
-:::
-
-#### Funds
-
-As these requests cost some fees, and to be able to bet with real tokens, the player will need a source of funds.
-
-As the game runs on a testnet, you can request funds from the GoShimmer faucets inside the network.
-
-See: [How to Obtain Tokens From the Faucet](/goshimmer/tutorials/obtain_tokens)
-
-After you have acquired some funds, they will reside inside an address that is handled by a wallet.
-
-For this PoC, we have implemented a very narrowed-down wallet that runs inside the browser itself, mostly hidden from the player.
-
-In the future, we want to provide a solution that enables the use of [Firefly](https://firefly.iota.org/) or MetaMask as a secure external wallet.
-
-#### Conclusion
-
-To interact with a smart contract, you will need:
-
-- A Wasp node that hosts the contract
-- A GoShimmer node to interact with the tangle
-- Funds from a GoShimmer faucet
-- A client that invokes the contract by either an On Ledger request or Off Ledger request. In this example, the Frontend acts as the client.
-
-### Implementation
-
-The PoC consists of two projects residing in `contracts/wasm/fairroulette`.
-
-One is the smart contract itself. Its boilerplate was generated using the new [Schema tool](/wasp-wasm/introduction/) which is shipped with this beta release.
-The contract logic is written in Rust, but the same implementation can be achieved
-interchangeably with Golang and Assemblyscript which is demonstrated in the root folder
-and `./src`.
-
-The second project is an interactive frontend written in TypeScript, made reactive with the light Svelte framework. You can find it in the sub-folder `./frontend`.
-This frontend sends On-ledger requests to place bets towards the fair roulette smart contract and makes use of the GoShimmer faucet to request funds.
-
-### The Smart Contract
-
-See: [Anatomy of a Smart Contract](/learn/smart-contracts/smart-contract-anatomy)
-
-As the smart contract is the only actor that is allowed to modify state in the context of the game, it needs to handle a few tasks such as:
-
-- Validating and accepting placed bets
-- Starting and ending a betting round
-- Generating a **random** winning number
-- Sending payouts to the winners
-- Emitting status updates through the event system
-
-Any incoming bet will be validated. This includes the amount of tokens which have been bet and also the number on which the player bet on. For example, any number over 8 or under 1 will be rejected.
-
-If the bet is valid and no round is active, the round state will be changed to `1`, marking an active round. The bet will be the first of a list of bets.
-
-A delayed function call will be activated which executes **after 60 seconds**.
-
-This function is the payout function that generates a random winning number, and pays out the winners of the round. After this, the round state will be set to `0` indicating the end of the round.
-
-If a round is already active, the bet will be appended to the list of bets and await processing.
-
-All state changes such as the `round started` ,`round ended`, `placed bets`, and the `payout of the winners` are published as events. Events are published as messages through a public web socket.
-
-#### Dependencies
-
-- [wasm-pack](https://rustwasm.github.io/docs/wasm-pack/quickstart.html)
-
-#### Building the Contract
-
-```shell
-cd contracts/wasm/fairroulette
-wasm-pack build
-```
-
-### The Frontend
-
-The frontend has two main tasks.
-
-1. **Visualize the contract's state**: This includes the list of all placed bets, if a round is currently active and how long it's still going. Any payouts will be shown as well, including a fancy animation in case the player has won. The player can also see his current available funds, his seed, and his current address.
-
-:::danger
-The seed is the key to your funds. We display the seed for demonstration purposes only in this PoC.
-**Never share your seed with anyone under any circumstance.**
-:::
-
-2. **Enable the player to request funds and participate in the game by placing bets**: This is done by showing the player a list of eight numbers, a selection of the amount of funds to bet, and a bet placing button.
-
-As faucet requests require minimal proof of work, the calculation happens inside a web worker to prevent freezing the browser UI.
-
-To provide the frontend with the required events, it subscribes to the public web socket of Wasp to receive state changes.
-
-These state change events look like this:
-
-`vmmsg kUUCRkhjD4EGAxv3kM77ay3KMbo51UhaEoCr14TeVHc5 df79d138: fairroulette.bet.placed 2sYqEZ5GM1BnqkZ88yJgPH3CdD9wKqfgGKY1j8FYDSZb3ao5wu 531819 2`
-
-This event displays a placed bet from the address `12sYqEZ5GM1BnqkZ88yJgPH3CdD9wKqfgGKY1j8FYDSZb3ao5wu`, a bet of `531819i` on the number `2`. Originating from the smart contract ID `df79d138`.
-
-However, there is a bit more to the concept than to simply subscribe to a web socket and "perform requests".
-
-### The Communication Layer
-
-On and Off Ledger requests have a predefined structure. They need to get encoded strictly and include a list of transactions provided by Goshimmer. They also need to get signed by the client using the private key originating from a seed.
-
-Wasp uses the [ExtendedLockedOutput](/goshimmer/protocol_specification/components/advanced_outputs) message type, which enables certain additional properties such as:
-
-- A fallback address and a fallback timeout
-- Unlockable by AliasUnlockBlock (if address is of Misaddress type)
-- A time lock (execution after deadline)
-- A data payload for arbitrary metadata (size limits apply)
-
-This data payload is required to act on smart contracts as it contains:
-
-- The smart contract ID to be selected
-- The function ID to be executed
-- A list of arguments to be passed into the function
-
-As we do not expect contract and frontend developers to write their own implementation, we have separated the communication layer into two parts:
-
-- [The fairroulette_service](#the-fairroulette-service)
-- [The wasp_client](#the-wasp-client)
-
-#### The Wasp Client
-
-The wasp client is an example implementation of the communication protocol.
-
-It provides:
-
-- A basic wallet functionality
-- Hashing algorithms
-- A web worker to provide proof of work
-- Construction of On/Off Ledger requests
-- Construction of smart contract arguments and payloads
-- Generation of seeds (including their private keys and addresses)
-- Serialization of data into binary messages
-- Deserialization of smart contract state
-
-This wasp_client can be seen as a soon-to-be external library. For now, this is a PoC client library shipped with the project. However, in the future , we want to provide a library you can simply include in your project.
-
-#### The Fairroulette Service
-
-This service is meant to be a high-level implementation of the actual app. In other words: it's the service that app or frontend developers would concentrate on.
-
-It does not construct message types, nor does it interact with GoShimmer directly. Besides subscribing to the web socket event system of Wasp, it does not interact directly with Wasp either. Such communications are handled by the [`wasp_client`](#the-wasp-client).
-
-The fairroulette service is a mere wrapper around smart contract invocation calls. It accesses the smart contract state through the `wasp_client` and does minimal decoding of data.
-
-Let's take a look into three parts of this service to make this more clear.
-
-This service comprises two parts:
-
-- [PlaceBetOnLedger](#placebetonledger)
-- [CallView](#callview)
-
-##### PlaceBetOnLedger
-
-The [placeBetOnLedger](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/frontend/src/lib/fairroulette_client/fair_roulette_service.ts#L149) function is responsible for sending On-Ledger bet requests. It constructs a simple OnLedger object containing:
-
-- The smart contract ID: `fairroulette`
-- The function to invoke: `placeBet`
-- An argument: `-number`
- - this is the number the player would bet on, the winning number
-
-This transaction also requires an address to send the request to, and also a variable amount of funds over `0i`.
-
-:::note
-For Wasp, the address to send funds to is the chainId.
-:::
-
-See: [Invoking](/wasp-wasm/how-tos/solo/invoking-sc)
-
-##### CallView
-
-The [callView](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/frontend/src/lib/fairroulette_client/fair_roulette_service.ts#L165) function is responsible for calling smart contract view functions.
-
-See: [Calling a view](/wasp-wasm/how-tos/solo/view-sc)
-
-To give access to the smart contracts state, you can use view functions to return selected parts of the state.
-
-In this use case, you can poll the state of the contract at the initial page load of the frontend.
-State changes that happen afterwards are published through the websocket event system.
-
-You can find examples to guide you in building similar functions in:
-
-- Frontend: [getRoundStatus](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/frontend/src/lib/fairroulette_client/fair_roulette_service.ts#L181)
-
-- Smart Contract: [view_round_status](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/src/fairroulette.rs#L312)
-
-Since data returned by the views is encoded in Base64, the frontend needs to decode this by using simple `Buffer` methods.
-The `view_round_status` view returns an `UInt16` which has a state of either `0` or `1`.
-
-This means that to get a proper value from a view call, you should use `readUInt16LE` to decode the matching value.
-
-#### Dependencies
-
-- [NodeJS >= 14](https://nodejs.org/en/download/)
- If you use a different version of node, you can use [nvm](https://github.com/nvm-sh/nvm) to switch node versions.
-- [NPM](https://www.npmjs.com/)
-
-#### Install Dependencies
-
-1. Go to your frontend directory ( contracts/wasm/fairroulette/frontend for example)
-
- ```shell
- cd contracts/wasm/fairroulette/frontend
- ```
-
-2. Install dependencies running:
-
- ```shell
- npm install
- ```
-
-#### Configuration
-
-The frontend requires that you create a config file. You can copy the template from `contracts/wasm/fairroulette/frontend/config.dev.sample.js`, and rename it to `config.dev.js` inside the same folder.
-
-```shell
-cp config.dev.sample.js config.dev.js
-```
-
-Make sure to update the config values according to your setup.
-
-The `chainId` is the chainId which gets defined after [deploying a chain](/wasp-cli/how-tos/setting-up-a-chain/#deploy-the-isc-chain). You can get your chain id from your dashboard, or list all chains by running:
-
-```shell
-wasp-cli chain list
-```
-
-`waspWebSocketUrl`, `waspApiUrl`, and `goShimmerApiUrl` are dependent on the location of your Wasp and GoShimmer nodes. Make sure to keep the path of the `waspWeb SocketUrl` (`/chain/%chainId/ws`) at the end.
-
-`seed` can be either `undefined` or a predefined 44 length seed. If `seed` is set to `undefined` a new seed will be generated as soon a user opens the page. A predefined seed will be set for all users. This can be useful for development purposes.
-
-#### Building The Frontend
-
-You can build the frontend by running the following commands:
-
-```shell
-cd contracts/wasm/fairroulette/frontend
-npm run build_worker
-```
-
-After this, you can run `npm run dev` which will run a development server that exposes the transpiled frontend on [`http://localhost:5000`](http://localhost:5000).
-
-If you want to expose the dev server to the public, it might be required to bind the server to any endpoint like `HOST=0.0.0.0 PORT=5000 npm run dev`.
-
-## Deployment
-
-You should follow the [Deployment](/wasp-cli/how-tos/setting-up-a-chain/#deploy-the-isc-chain) documentation until you reach the `deploy-contract` command.
-
-The deployment of a contract requires funds to be deposited to the **chain**.
-You can do this by executing the following command from the directory where your Wasp node was configured:
-
-```shell
-wasp-cli chain deposit IOTA:10000
-```
-
-Make sure to [Build](#building-the-contract) the contract before deploying it.
-
-Now, you can deploy the contract with a wasmtime configuration.
-
-```shell
-wasp-cli chain deploy-contract wasmtime fairroulette "fairroulette" contracts/wasm/fairroulette/pkg/fairroulette_bg.wasm
-```
diff --git a/docs/build/wasp-wasm/0.7/sidebars.js b/docs/build/wasp-wasm/0.7/sidebars.js
index 77e9b238512..5fa60186648 100644
--- a/docs/build/wasp-wasm/0.7/sidebars.js
+++ b/docs/build/wasp-wasm/0.7/sidebars.js
@@ -279,16 +279,5 @@ module.exports = {
},
],
},
- {
- type: 'category',
- label: 'Tutorials',
- items: [
- {
- type: 'doc',
- label: 'Fair Roulette',
- id: 'tutorials/fair_roulette',
- },
- ],
- },
],
};
diff --git a/docs/get-started/community-links.md b/docs/get-started/community-links.md
index 8b33e9ef074..6dfa1dfc22d 100644
--- a/docs/get-started/community-links.md
+++ b/docs/get-started/community-links.md
@@ -63,7 +63,6 @@ Starring repositories on GitHub helps indicate the popularity and quality of a p
repositories related to IOTA:
- [Firefly](https://github.com/iotaledger/firefly)
-- [GoShimmer](https://github.com/iotaledger/goshimmer)
- [Hornet](https://github.com/iotaledger/hornet)
- [Identity](https://github.com/iotaledger/identity.rs)
- [iota.rs](https://github.com/iotaledger/iota.rs)
diff --git a/docs/get-started/faq.md b/docs/get-started/faq.md
index 87ef9405317..68476e84a8a 100644
--- a/docs/get-started/faq.md
+++ b/docs/get-started/faq.md
@@ -3,7 +3,6 @@ title: FAQ's
description: Questions and answers collections. Clear outlined information for the most common questions around IOTA.
---
-- [Chrysalis mainnet FAQ](/introduction/explanations/faq/)
+- [Mainnet FAQ](/introduction/explanations/faq/)
- [Firefly wallet FAQ](/use/wallets/firefly/faq-and-troubleshooting)
- [Identity FAQ](/identity.rs/faq/)
-- [GoShimmer devnet FAQ](/goshimmer/faq/)
diff --git a/docs/get-started/glossary.md b/docs/get-started/glossary.md
index 8a76db08ac9..ba65256bc8a 100644
--- a/docs/get-started/glossary.md
+++ b/docs/get-started/glossary.md
@@ -8,7 +8,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and
## A
- **Address Checksum:** Checksum validation is a way to determine if an address is valid and does not contain typos.
-- **Dynamic PoW** (In development for IOTA 1.5 and Shimmer): With this feature, the coordinator can issue a milestone and simultaneously set the future PoW score. This means that if the network is not fully utilized, the PoW will be reduced to the point where it can be executed by microdevices. Accordingly, the coordinator can also raise the PoW difficulty in case of high utilization and thus make an attack very expensive.
- **Auto peering:** A mechanism that allows nodes to automatically select their neighbors without manual intervention by the node operator.
- **API(Application Programming Interfaces):** The way for applications to interact with the Tangle.
- **Atomic Transactions:** Instead of the bundle construct, IOTA and Shimmer use simpler Atomic Transactions. An Atomic Transaction includes everything related to a transaction in a single message instead of splitting it up (Bundles). This reduces network overhead and signature verification load, improves spam protection and rate control, and shortens the length of Merkle proofs (for future sharding). It also reduces implementation overhead and increases maintainability of the core node software.
@@ -22,12 +21,10 @@ description: Glossary of all specialized names and phrases used in the IOTA and
- **Balance:** Funds on the addresses (account). These are always available and cannot be deleted or forgotten.
- **Blockchain Bottleneck:** The more transactions are issued, the more the block rate and size become a bottleneck in the system. It is no longer possible to capture all incoming transactions in a prompt manner. Attempts to speed up block rates result in more orphaned blocks (blocks are left behind) and reduce the security of the blockchain.
- **Branch (IOTA 2.0):** A version of the ledger that temporarily coexists with other versions, each spawned by conflicting transactions.
-- **Bee:** Node software developed by the IOTA foundation using the Rust programming language.
- **Bootstrapping attack:** An attack in which a node downloads malicious snapshot files, including invalid transactions and balances.
## C
-- **Curl:** This is one of the hash functions currently in use. It is based on the “sponge” construction of the Keccak inventors (SHA-3).
- **Confirmed:** Confirmed transactions. Messages in the Tangle are considered for confirmation only when they are directly or indirectly referenced by a milestone that the Coordinator node has validated. To allow the nodes to recognize the milestones, all nodes that participate in the same network are configured with the Merkle root address of a Coordinator that they trust to confirm messages. Using this address, nodes can validate the signatures in milestones to verify whether the trusted Coordinator signs them. To make sure that new messages always have a chance of being confirmed, the Coordinator sends indexed milestones at regular intervals. This way, nodes can compare the indexes of their milestones to check whether they are synchronized with the rest of the network.
- **CTPS:** Confirmed transactions per second.
- **Cumulative Weight:** A system for valuing transactions. Each additional transaction that references a transaction increases its cumulative weight. When tips are selected, a path through transactions that has a higher cumulative weight is preferred.
@@ -35,8 +32,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and
- **Consensus:** Agreement on a specific date or value in distributed multi-agent systems, in the presence of faulty processes.
- **Coordinator (only up to IOTA 2.0):** A trusted entity, as protection against malicious transactions. The Tangle is not yet a final product, it is still in beta. The network currently relies on a kind of shield, the so-called coordinator. It is open-source and runs on a Hornet node. The COO acts as a centralized, voluntary, and temporary alternative consensus mechanism for the Tangle. To do this, the COO sends honest transactions to the full nodes at regular intervals. These packets contain a signed message with no value, called a milestone. The full nodes in the Tangle consider a transaction as confirmed only if it is approved by a milestone. Important: The coordinator can only confirm transactions, but he cannot bypass the consensus rules. To create, freeze or steal tokens is not possible for him. This fixed rule and the COO address is hardcoded on each full node, so the coordinator’s influence on the tangle is very limited, since the tangle is also constantly monitored by all the other full nodes. > The Coo will be switched off with the IOTA 2.0 upgrade.
- **Communication Layer (IOTA 2.0):** This layer stores and communicates information. This layer contains the distributed ledger or tangle. The rate control and timestamps are also located in this layer.
-- **Core Object type (IOTA 2.0):** An object type that must be parsed by all nodes. Parsers are computer programs responsible for decomposing and converting an input into a format more suitable for further processing.
-- **Core Application (IOTA 2.0):** Core application that must be executed by all nodes, for example the value transfer application.
- **Child (IOTA 2.0):** A transaction that gets referenced by Parents.
- **Chrysalis:** The name of the IOTA 1.5 network upgrade.
- **Stardust:** The name of the first Shimmer network upgrade.
@@ -65,24 +60,20 @@ description: Glossary of all specialized names and phrases used in the IOTA and
- **Faucet:** A pool of tokens (funds). Upon uncomplicated request, one gets a limited number of tokens for testing, especially for developers of own apps this is a great help.
- **Firefly:** Firefly is a wallet, intended to serve as a platform for the current and future IOTA and Shimmer ecosystem.
- **Finality:** The property that once a transaction has been completed, there is no way to reverse or change it. This is the moment when the parties involved in a transfer can consider the transaction completed. Finality can be deterministic or probabilistic.
-- **Full nodes (Hornet, Bee):** They form the core (infrastructure) of the network. In order to participate in the peer-to-peer network, the full node must always be online and connected to neighbors (other full nodes). In addition, the transaction database must be synchronized with all other full nodes in the network. The role of full nodes is to interact with clients (wallets, DApps, etc.) and attach their transactions to the ledger, make transactions known to all other full nodes in the network, validate transactions and store them in the ledger.
+- **Full nodes (Hornet):** They form the core (infrastructure) of the network. In order to participate in the peer-to-peer network, the full node must always be online and connected to neighbors (other full nodes). In addition, the transaction database must be synchronized with all other full nodes in the network. The role of full nodes is to interact with clients (wallets, DApps, etc.) and attach their transactions to the ledger, make transactions known to all other full nodes in the network, validate transactions and store them in the ledger.
- **Future Cone:** All messages that directly or indirectly reference a message are called its future cone.
- **Fork:** In IT, this is a new development branch after a project is split into a second follow-on project; the source code or parts of it are developed independently of the original parent project.
-- **FPC(Fast Probabilistic Consensus):** Consensus that uses a random number and node opinions to reach consensus. In On-Tangle Voting, it is only used in a specific edge case. Check out OTVFPCS.
## G
- **Genesis transaction:** The Genesis transaction is the first transaction that created all IOTA and Shimmer tokens and distributed them to the addresses of the buyers.
-- **GoShimmer (No Main net):** Prototype of the coordinator less version of IOTA written in the Go programming language. GoShimmer implements the various modules of Coordicide, such as auto peering, node identities, Mana, etc. GoShimmer serves as a test environment for the first alpha version and the test network. Everything tested here will be gradually merged with Hornet and Bee.
-- **Generic Data Object (IOTA 2.0):** The most basic object type. All unrecognized data objects are defined this way.
## H
- **History:** The list of transactions that were directly or indirectly authorized by a particular transaction.
- **Hash values:** Checksums that are applied to the encryption of messages of variable length. Hash values are like fingerprints of a very long data set. Each message is assigned a very specific hash value.
- **Hooks:** An interface that allows foreign program code to be integrated into an existing application to extend it, change its flow, or intercept certain events.
-- **Hornet Node (IOTA 1.5):** Community-developed IOTA Node written in the Go programming language. In addition, the coordinator also runs as a plugin via Hornet.
-- **Hornet Node (Shimmer):** Shimmer Node written in the Go programming language. In addition, the coordinator also runs as a plugin via Hornet.
+- **Hornet Node:** Node software written in the Go programming language.
## I
@@ -119,7 +110,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and
- **Object (IOTA 2.0):** the most basic unit of information in the IOTA protocol. Each object has a type and size and contains data.
- **Oracles:** Oracles are designed to build a secure bridge between the digital and physical worlds in a decentralized, permissionless way. They bring off-chain data to decentralized applications and smart contracts on the network.
- **OTV (IOTA 2.0):** On Tangle Voting is the official name for the multiverse consensus described by Hans Moog. It is a new consensus mechanism that allows nodes to vote on conflicts directly by publishing a message to the tangle.
-- **OTVFPCS (IOTA 2.0):** On Tangle Voting with FPCS (Fast Probabilistic Consensus on a Set) is a mechanism for breaking metastability, which can be used in addition to OTV (On Tangle Voting). Generally, in IOTA2.0, reaching a high approval weight is the finality criteria. If the approval weight is high enough, the message / transaction is finalized. With OTVFPC the initial opinion is created with OTV, if after some time the opinions of the nodes are still split, for whatever reason, FPC is activated to break this metastable state. The finality of value transactions should be reached faster this way.
## P
@@ -133,7 +123,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and
- **Peer to Peer Network:** A decentralized network of different network nodes that are connected to each other and exchange data.
- **Peering:** The process of discovering and connecting to other network nodes.
- **Payload (IOTA 2.0):** A field in a message that determines the type. Examples are value payload (TransactionType type), FPC opinion payload (StatementType type), dRNG payload (Payload), Salt declaration payload, generic data payload.
-- **Private Tangle:** A private tangle is comparable to a test network under complete control of the operator. This allows companies and developers to test their applications under self-defined environment variables without external influences and protected from prying eyes. There is no interoperability between a private Tangle and the IOTA or Shimmer Tangle. So, sending from one to the other does not work either. Each private Tangle is an independent network with its own nodes, tokens, and coordinator.
- **Proof of Work (PoW):** A time-consuming (expensive) mathematical calculation that uses computational power to prevent spam attacks. It consists of a difficult cryptographic puzzle that is easy to verify.
- **Proof of Inclusion (PoI):** With PoI, one is able to provide evidence that a transaction was indirectly referenced by another transaction without having to present the full chain of actual transactions between the two transactions. This is done by using a sequence of hashes instead of the actual transaction data to prove the inclusion of a transaction (inclusion) in the referenced subtangle.
- **Pruning:** In computer science, this is a term for simplifying, shortening, and optimizing decision trees. In Shimmer, this is done by local snapshots on each full node. Old transactions that have already been confirmed are deleted from the database, leaving only a file (list) of credits on each address.
@@ -167,7 +156,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and
- **Tangle:** The Tangle is the underlying core data structure. In mathematical terms it is a directed acyclic graph (DAG). The Tangle is the distributed ledger that stores all transactions.
- **Ternary system:** A trit (trinary digit) can have exactly three states (3 x 1 = 3): -1, 0 and 1. Three trits result in one tryte (33 = 27) and can thus represent 27 combinations. In IOTA, the letters A-Z (26 pieces) and the number 9 are used for this purpose.
- **Token:** The digital currency form (cryptocurrency). It is a powerful tool for value transfer between people and machines. Total number: 2,779,530,283,277,761 IOTA. The base units are IOTA and micros.
-- **Trinity (IOTA 1.0):** Depreciated IOTA Wallet
- **Tip:** A transaction that has not yet been approved.
- **Tip Selection:** The process of selecting previous transactions to be referenced by a new transaction. In these references, a transaction ties into the existing data structure. IOTA and Shimmer only enforces that a transaction approves up to eight other transactions, the tip selection strategy is left to the user (with a good default provided by Shimmer).
- **Tip Transaction:** A solid end transaction that is not yet a parent.
@@ -179,7 +167,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and
## V
-- **Value Layer (IOTA 2.0):** The Value layer builds on the Communication layer. It works exclusively with payloads of type Value object. This layer has several tasks: Forming the ledger state, processing, validation and output of transactions, conflict detection, conflict resolution via FPC, forming a DAG from value objects, tip selection (on value object tips).
- **Value Transactions:** Value transactions either withdraw tokens from an address or deposit them to an address. Nodes must verify these transactions to ensure that the sender actually owns the Shimmer tokens and that additional tokens are never generated. To do this, the following checks are performed: All Shimmer tokens withdrawn from an address are also deposited into one or more other addresses; the value of each transaction does not exceed the total global supply; signatures are valid.
- **Version Number (IOTA 2.0):** Indicates the correct format of each type.
diff --git a/docs/maintain/getting-started/welcome.md b/docs/maintain/getting-started/welcome.md
index 09b7d3bce60..b5e2718a22a 100644
--- a/docs/maintain/getting-started/welcome.md
+++ b/docs/maintain/getting-started/welcome.md
@@ -17,11 +17,6 @@ IOTA network as it is made entirely of Hornet nodes. You can use the documentati
[Chronicle](/chronicle/welcome) is a permanode solution that allows you to store and retrieve IOTA
messages and data in real time.
-### GoShimmer
-
-[GoShimmer](/goshimmer/welcome) is an experimental node software for IOTA's Coordicide aimed at
-removing the Coordinator from the IOTA networks.
-
## Layer 2
### WASP - IOTA Smart Contracts
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/autopeering.md b/docs/maintain/goshimmer/0.9/docs/apis/autopeering.md
deleted file mode 100644
index ff4409e801e..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/autopeering.md
+++ /dev/null
@@ -1,122 +0,0 @@
----
-description: The peering API allows retrieving basic information about autopeering using the /autopeering/neighbors endpoint or the GetAutopeeringNeighbors() function in the client lib.
-image: /img/logo/goshimmer_light.png
-keywords:
- - client library
- - HTTP API
- - peering api methods
- - neighbors
- - accepted neighbors
- - known peer
----
-
-# Peering API Methods
-
-The peering API allows retrieving basic information about autopeering.
-
-The API provides the following functions and endpoints:
-
-- [/autopeering/neighbors](#autopeeringneighbors)
-
-Client lib APIs:
-
-- [GetAutopeeringNeighbors()](#client-lib---getautopeeringneighbors)
-
-## `/autopeering/neighbors`
-
-Returns the chosen and accepted neighbors of the node.
-
-### Parameters
-
-| **Parameter** | `known` |
-| ------------------------ | ------------------------------------------------- |
-| **Required or Optional** | optional |
-| **Description** | Return all known peers, set to `1` (default: `0`) |
-| **Type** | int |
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location 'http://localhost:8080/autopeering/neighbors?known=1'
-```
-
-#### Client lib - `GetAutopeeringNeighbors`
-
-Blocks can be retrieved via `GetAutopeeringNeighbors(knownPeers bool) (*jsonmodels.GetNeighborsResponse, error)`
-
-```go
-neighbors, err := goshimAPI.GetAutopeeringNeighbors(false)
-if err != nil {
- // return error
-}
-
-// will print the response
-fmt.Println(string(neighbors))
-```
-
-#### Response examples
-
-```json
-{
- "chosen": [
- {
- "id": "PtBSYhniWR2",
- "publicKey": "BogpestCotcmbB2EYKSsyVMywFYvUt1MwGh6nUot8g5X",
- "services": [
- {
- "id": "peering",
- "address": "178.254.42.235:14626"
- },
- {
- "id": "gossip",
- "address": "178.254.42.235:14666"
- }
- ]
- }
- ],
- "accepted": [
- {
- "id": "CRPFWYijV1T",
- "publicKey": "GUdTwLDb6t6vZ7X5XzEnjFNDEVPteU7tVQ9nzKLfPjdo",
- "services": [
- {
- "id": "peering",
- "address": "35.214.101.88:14626"
- },
- {
- "id": "gossip",
- "address": "35.214.101.88:14666"
- }
- ]
- }
- ]
-}
-```
-
-#### Results
-
-- Returned type
-
-| Return field | Type | Description |
-| :----------- | :----------- | :-------------------------------------------------------- |
-| `known` | `[]Neighbor` | List of known peers. Only returned when parameter is set. |
-| `chosen` | `[]Neighbor` | List of chosen peers. |
-| `accepted` | `[]Neighbor` | List of accepted peers. |
-| `error` | `string` | Error block. Omitted if success. |
-
-- Type `Neighbor`
-
-| field | Type | Description |
-| :---------- | :-------------- | :------------------------------------ |
-| `id` | `string` | Comparable node identifier. |
-| `publicKey` | `string` | Public key used to verify signatures. |
-| `services` | `[]PeerService` | List of exposed services. |
-
-- Type `PeerService`
-
-| field | Type | Description |
-| :-------- | :------- | :------------------------------ |
-| `id` | `string` | Type of service. |
-| `address` | `string` | Network address of the service. |
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/client_lib.md b/docs/maintain/goshimmer/0.9/docs/apis/client_lib.md
deleted file mode 100644
index c861672f72b..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/client_lib.md
+++ /dev/null
@@ -1,43 +0,0 @@
----
-description: GoShimmer ships with a client Go library which communicates with the HTTP API.
-image: /img/logo/goshimmer_light.png
-keywords:
- - client library
- - api
- - HTTP API
- - golang
----
-
-# Client Lib: Interaction With Layers
-
-:::info
-
-This guide is meant for developers familiar with the Go programming language.
-
-:::
-
-GoShimmer ships with a client Go library which communicates with the HTTP API. Please refer to the [godoc.org docs](https://godoc.org/github.com/iotaledger/goshimmer/client) for function/structure documentation. There is also a set of APIs which do not directly have anything to do with the different layers. Since they are so simple, simply extract their usage from the GoDocs.
-
-# Use the API
-
-Simply `go get` the lib via:
-
-```shell
-go get github.com/iotaledger/goshimmer/client
-```
-
-Init the API by passing in the API URI of your GoShimmer node:
-
-```go
-goshimAPI := client.NewGoShimmerAPI("http://mynode:8080")
-```
-
-Optionally, define your own `http.Client` to use, in order for example to define custom timeouts:
-
-```go
-goshimAPI := client.NewGoShimmerAPI("http://mynode:8080", client.WithHTTPClient{Timeout: 30 * time.Second})
-```
-
-#### A note about errors
-
-The API issues HTTP calls to the defined GoShimmer node. Non 200 HTTP OK status codes will reflect themselves as `error` in the returned arguments. Meaning that for example calling for attachments with a non existing/available transaction on a node, will return an `error` from the respective function. (There might be exceptions to this rule)
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/communication.md b/docs/maintain/goshimmer/0.9/docs/apis/communication.md
deleted file mode 100644
index 08f7742ba49..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/communication.md
+++ /dev/null
@@ -1,309 +0,0 @@
----
-description: The communication layer represents the base Tangle layer where so called `Blocks` are gossiped around. A `Block` contains payloads, and it is up to upper layers to interpret and derive functionality out of them.
-image: /img/logo/goshimmer_light.png
-keywords:
- - client library
- - HTTP API
- - block
- - encoded block id
- - consensus
- - payload
----
-
-# Communication Layer APIs
-
-The communication layer represents the base Tangle layer where so called `Blocks` are gossiped around. A `Block` contains payloads and it is up to upper layers to interpret and derive functionality out of them.
-
-The API provides the following functions to interact with this primitive layer:
-
-- [/blocks/:blockID](#blocksblockid)
-- [/blocks/:blockID/metadata](#blocksblockidmetadata)
-- [/data](#data)
-- [/blocks/payload](#blockspayload)
-
-Client lib APIs:
-
-- [GetBlock()](#client-lib---getblock)
-- [GetBlockMetadata()](#client-lib---getblockmetadata)
-- [Data()](#client-lib---data)
-- [SendPayload()](#client-lib---sendpayload)
-
-## `/blocks/:blockID`
-
-Return block from the tangle.
-
-### Parameters
-
-| **Parameter** | `blockID` |
-| ------------------------ | ------------------------- |
-| **Required or Optional** | required |
-| **Description** | ID of a block to retrieve |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location --request GET 'http://localhost:8080/blocks/:blockID'
-```
-
-where `:blockID` is the base58 encoded block ID, e.g. 4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc.
-
-#### Client lib - `GetBlock`
-
-Blocks can be retrieved via `GetBlock(base58EncodedID string) (*jsonmodels.Block, error)`
-
-```go
-block, err := goshimAPI.GetBlock(base58EncodedBlockID)
-if err != nil {
- // return error
-}
-
-// will print "Hello GoShimmer World"
-fmt.Println(string(block.Payload))
-```
-
-Note that we're getting actual `Block` objects from this call which represent a vertex in the communication layer Tangle. It does not matter what type of payload the block contains, meaning that this will also return blocks which contain a transactions or DRNG payloads.
-
-### Response Examples
-
-```json
-{
- "id": "4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc",
- "strongParents": [
- "6LrXyDCorw8bTWKFaEmm3CZG6Nb6Ga8Bmosi1GPypGc1",
- "B89koPthm9zDx1p1fbkHwoyC1Buq896Spu3Mx1SmSete"
- ],
- "weakParents": [],
- "strongChildren": [
- "4E4ucAA9UTTd1UC6ri4GYaS4dpzEnHPjs5gMEYhpUK8p",
- "669BRH69afQ7VfZGmNTMTeh2wnwXGKdBxtUCcRQ9CPzq"
- ],
- "weakChildren": [],
- "issuerPublicKey": "9DB3j9cWYSuEEtkvanrzqkzCQMdH1FGv3TawJdVbDxkd",
- "issuingTime": 1621873309,
- "sequenceNumber": 4354,
- "payloadType": "GenericDataPayloadType(0)",
- "payload": "BAAAAAAAAAA=",
- "signature": "2J5XuVnmaHo54WipirWo7drJeXG3iRsnLYfzaPPuy6TXKiVBqv6ZYg2NjYP75xvgvut1SKNm8oYTchGi5t2SjyWJ"
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :---------------- | :--------- | :---------------------------------- |
-| `id` | `string` | Block ID. |
-| `strongParents` | `[]string` | List of strong parents' block IDs. |
-| `weakParents` | `[]string` | List of weak parents' block IDs. |
-| `strongChildren` | `[]string` | List of strong children' block IDs. |
-| `weakChildren` | `[]string` | List of weak children' block IDs. |
-| `issuerPublicKey` | `[]string` | Public key of issuing node. |
-| `issuingTime` | `int64` | Time this block was issued |
-| `sequenceNumber` | `uint64` | Block sequence number. |
-| `payloadType` | `string` | Payload type. |
-| `payload` | `[]byte` | The contents of the block. |
-| `signature` | `string` | Block signature. |
-| `error` | `string` | Error block. Omitted if success. |
-
-## `/blocks/:blockID/metadata`
-
-Return block metadata.
-
-### Parameters
-
-| **Parameter** | `blockID` |
-| ------------------------ | ------------------------- |
-| **Required or Optional** | required |
-| **Description** | ID of a block to retrieve |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location --request GET 'http://localhost:8080/blocks/:blockID/metadata'
-```
-
-where `:blockID` is the base58 encoded block ID, e.g. 4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc.
-
-#### Client lib - `GetBlockMetadata`
-
-Block metadata can be retrieved via `GetBlockMetadata(base58EncodedID string) (*jsonmodels.BlockMetadata, error)`
-
-```go
-block, err := goshimAPI.GetBlockMetadata(base58EncodedBlockID)
-if err != nil {
- // return error
-}
-
-// will print whether block is finalized
-fmt.Println(string(block.Finalized))
-```
-
-### Response Examples
-
-```json
-{
- "id": "4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc",
- "receivedTime": 1621873309,
- "solid": true,
- "solidificationTime": 1621873309,
- "structureDetails": {
- "rank": 23323,
- "pastMarkerGap": 0,
- "isPastMarker": true,
- "pastMarkers": {
- "markers": {
- "1": 21904
- },
- "highestIndex": 21904,
- "lowestIndex": 21904
- }
- },
- "conflictID": "ConflictID(MasterConflictID)",
- "scheduled": false,
- "booked": true,
- "invalid": false,
- "confirmationState": 3,
- "confirmationStateTime": 1621873310
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :------------------- | :----------------- | :---------------------------------------------- |
-| `id` | `string` | Block ID. |
-| `receivedTime` | `int64` | Time when block was received by the node. |
-| `solid` | `bool` | Flag indicating whether the block is solid. |
-| `solidificationTime` | `int64` | Time when block was solidified by the node. |
-| `structureDetails` | `StructureDetails` | List of weak children' block IDs. |
-| `conflictID` | `string` | Name of conflict that the block is part of. |
-| `scheduled` | `bool` | Flag indicating whether the block is scheduled. |
-| `booked` | `bool` | Flag indicating whether the block is booked. |
-| `eligible` | `bool` | Flag indicating whether the block is eligible. |
-| `invalid` | `bool` | Flag indicating whether the block is invalid. |
-| `finalized` | `bool` | Flag indicating whether the block is finalized. |
-| `finalizedTime` | `string` | Time when block was finalized. |
-| `error` | `string` | Error block. Omitted if success. |
-
-## `/data`
-
-Method: `POST`
-
-A data block is simply a `Block` containing some raw data (literally bytes). This type of block has therefore no real functionality other than that it is retrievable via `GetBlock`.
-
-### Parameters
-
-| **Parameter** | `data` |
-| ------------------------ | ----------------------- |
-| **Required or Optional** | required |
-| **Description** | data bytes |
-| **Type** | base64 serialized bytes |
-
-#### Body
-
-```json
-{
- "data": "dataBytes"
-}
-```
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location --request POST 'http://localhost:8080/data' \
---header 'Content-Type: application/json' \
---data-raw '{"data": "dataBytes"}'
-```
-
-#### Client lib - `Data`
-
-##### `Data(data []byte) (string, error)`
-
-```go
-blockID, err := goshimAPI.Data([]byte("Hello GoShimmer World"))
-if err != nil {
- // return error
-}
-```
-
-Note that there is no need to do any additional work, since things like tip-selection, PoW and other tasks are done by the node itself.
-
-### Response Examples
-
-```json
-{
- "id": "blockID"
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------- | :--------------------------------------- |
-| `id` | `string` | Block ID of the block. Omitted if error. |
-| `error` | `string` | Error block. Omitted if success. |
-
-## `/blocks/payload`
-
-Method: `POST`
-
-`SendPayload()` takes a `payload` object of any type (data, transaction, drng, etc.) as a byte slice, issues a block with the given payload and returns its `blockID`. Note that the payload must be valid, otherwise an error is returned.
-
-### Parameters
-
-| **Parameter** | `payload` |
-| ------------------------ | ----------------------- |
-| **Required or Optional** | required |
-| **Description** | payload bytes |
-| **Type** | base64 serialized bytes |
-
-#### Body
-
-```json
-{
- "payload": "payloadBytes"
-}
-```
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location --request POST 'http://localhost:8080/blocks/payload' \
---header 'Content-Type: application/json' \
---data-raw '{"payload": "payloadBytes"}'
-```
-
-#### Client lib - `SendPayload`
-
-##### `SendPayload(payload []byte) (string, error)`
-
-```go
-helloPayload := payload.NewData([]byte{"Hello GoShimmer World!"})
-blockID, err := goshimAPI.SendPayload(helloPayload.Bytes())
-```
-
-### Response Examples
-
-```shell
-{
- "id": "blockID"
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------- | :--------------------------------------- |
-| `id` | `string` | Block ID of the block. Omitted if error. |
-| `error` | `string` | Error block. Omitted if success. |
-
-Note that there is no need to do any additional work, since things like tip-selection, PoW and other tasks are done by the node itself.
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/faucet.md b/docs/maintain/goshimmer/0.9/docs/apis/faucet.md
deleted file mode 100644
index 335b25f4fdc..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/faucet.md
+++ /dev/null
@@ -1,115 +0,0 @@
----
-description: The Faucet endpoint allows requesting funds from the Faucet.
-image: /img/logo/goshimmer_light.png
-keywords:
- - client library
- - HTTP API
- - tokens
- - funds
- - address
- - faucet
- - testnet
- - node Id
----
-
-# Faucet API Methods
-
-Faucet endpoint allows requesting funds from the Faucet.
-
-The API provides the following functions and endpoints:
-
-- [/faucet](#faucet)
-
-Client lib APIs:
-
-- [SendFaucetRequest()](#client-lib---sendfaucetrequest)
-
-## `/faucet`
-
-Method: `POST`
-
-POST request asking for funds from the faucet to be transferred to address in the request.
-
-### Parameters
-
-| **Parameter** | `address` |
-| ------------------------ | -------------------------- |
-| **Required or Optional** | required |
-| **Description** | address to pledge funds to |
-| **Type** | string |
-
-| **Parameter** | `accessManaPledgeID` |
-| ------------------------ | -------------------------------- |
-| **Required or Optional** | optional |
-| **Description** | node ID to pledge access mana to |
-| **Type** | string |
-
-| **Parameter** | `consensusManaPledgeID` |
-| ------------------------ | ----------------------------------- |
-| **Required or Optional** | optional |
-| **Description** | node ID to pledge consensus mana to |
-| **Type** | string |
-
-| **Parameter** | `powTarget` |
-| ------------------------ | ------------------------------------------------------ |
-| **Required or Optional** | required |
-| **Description** | proof of the PoW being done, **only used in HTTP api** |
-| **Type** | uint64 |
-
-| **Parameter** | `nonce` |
-| ------------------------ | ----------------------------------------------------------- |
-| **Required or Optional** | required |
-| **Description** | target Proof of Work difficulty,**only used in client lib** |
-| **Type** | uint64 |
-
-#### Body
-
-```json
-{
- "address": "target address",
- "accessManaPledgeID": "nodeID",
- "consensusManaPledgeID": "nodeID",
- "nonce": 50
-}
-```
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location --request POST 'http://localhost:8080/faucet' \
---header 'Content-Type: application/json' \
---data-raw '{
- "address": "target address",
- "accessManaPledgeID": "nodeID",
- "consensusManaPledgeID": "nodeID",
- "nonce": 50
-}'
-```
-
-#### Client lib - SendFaucetRequest
-
-##### `SendFaucetRequest(base58EncodedAddr string, powTarget int, pledgeIDs ...string) (*jsonmodels.FaucetResponse, error)`
-
-```go
-_, err = webConnector.client.SendFaucetRequest(addr.Address().Base58(), powTarget)
-if err != nil {
- // return error
-}
-```
-
-### Response examples
-
-```json
-{
- "id": "4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc"
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------- | :------------------------------------------------ |
-| `id` | `string` | Block ID of the faucet request. Omitted if error. |
-| `error` | `string` | Error block. Omitted if success. |
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/info.md b/docs/maintain/goshimmer/0.9/docs/apis/info.md
deleted file mode 100644
index c165841f6e4..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/info.md
+++ /dev/null
@@ -1,218 +0,0 @@
----
-description: Info API returns basic info about the node with the /info and /healthz endpoints and the info() function.
-image: /img/logo/goshimmer_light.png
-keywords:
- - info
- - endpoint
- - function
- - health
- - healthz
- - client lib
----
-
-# Info API Methods
-
-Info API returns basic info about the node
-
-The API provides the following functions and endpoints:
-
-- [/info](#info)
-- [/healthz](#healthz)
-
-Client lib APIs:
-
-- [Info()](#client-lib---info)
-
-## `/info`
-
-Returns basic info about the node.
-
-### Parameters
-
-None.
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location 'http://localhost:8080/info'
-```
-
-#### Client lib - `Info`
-
-Information of a node can be retrieved via `Info() (*jsonmodels.InfoResponse, error)`
-
-```go
-info, err := goshimAPI.Info()
-if err != nil {
- // return error
-}
-
-// will print the response
-fmt.Println(string(info))
-```
-
-#### Response example
-
-```json
-{
- "version": "v0.6.2",
- "networkVersion": 30,
- "tangleTime": {
- "blockID": "6ndfmfogpH9H8C9X9Fbb7Jmuf8RJHQgSjsHNPdKUUhoJ",
- "time": 1621879864032595415,
- "synced": true
- },
- "identityID": "D9SPFofAGhA5V9QRDngc1E8qG9bTrnATmpZMdoyRiBoW",
- "identityIDShort": "XBgY5DsUPng",
- "publicKey": "9DB3j9cWYSuEEtkvanrzqkzCQMdH1FGv3TawJdVbDxkd",
- "solidBlockCount": 74088,
- "totalBlockCount": 74088,
- "enabledPlugins": [
- "Activity",
- "AnalysisClient",
- "AutoPeering",
- "Banner",
- "CLI",
- "Clock",
- "Config",
- "Consensus",
- "DRNG",
- "Dashboard",
- "Database",
- "Gossip",
- "GracefulShutdown",
- "Logger",
- "Mana",
- "ManaRefresher",
- "ManualPeering",
- "BlockLayer",
- "Metrics",
- "NetworkDelay",
- "PoW",
- "PortCheck",
- "Profiling",
- "Prometheus",
- "RemoteLog",
- "RemoteLogMetrics",
- "WebAPI",
- "WebAPIDRNGEndpoint",
- "WebAPIManaEndpoint",
- "WebAPIWeightProviderEndpoint",
- "WebAPIAutoPeeringEndpoint",
- "WebAPIDataEndpoint",
- "WebAPIFaucetEndpoint",
- "WebAPIHealthzEndpoint",
- "WebAPIInfoEndpoint",
- "WebAPILedgerstateEndpoint",
- "WebAPIBlockEndpoint",
- "WebAPIToolsEndpoint",
- "snapshot"
- ],
- "disabledPlugins": [
- "AnalysisDashboard",
- "AnalysisServer",
- "Faucet",
- "ManaEventLogger",
- "Spammer",
- "TXStream"
- ],
- "mana": {
- "access": 1,
- "accessTimestamp": "2021-05-24T20:11:05.451224937+02:00",
- "consensus": 10439991680906,
- "consensusTimestamp": "2021-05-24T20:11:05.451228137+02:00"
- },
- "manaDelegationAddress": "1HMQic52dz3xLY2aeDXcDhX53LgbsHghdfD8eGXR1qVHy",
- "mana_decay": 0.00003209,
- "scheduler": {
- "running": true,
- "rate": "5ms",
- "nodeQueueSizes": {}
- },
- "rateSetter": {
- "rate": 20000,
- "size": 0
- }
-}
-```
-
-#### Results
-
-| Return field | Type | Description |
-| :---------------------- | :----------- | :---------------------------------------------------------------------------- |
-| `version` | `String` | Version of GoShimmer. |
-| `networkVersion` | `uint32` | Network Version of the autopeering. |
-| `tangleTime` | `TangleTime` | TangleTime sync status |
-| `identityID` | `string` | Identity ID of the node encoded in base58. |
-| `identityIDShort` | `string` | Identity ID of the node encoded in base58 and truncated to its first 8 bytes. |
-| `publicKey` | `string` | Public key of the node encoded in base58 |
-| `blockRequestQueueSize` | `int` | The number of blocks a node is trying to request from neighbors. |
-| `solidBlockCount` | `int` | The number of solid blocks in the node's database. |
-| `totalBlockCount` | `int` | The number of blocks in the node's database. |
-| `enabledPlugins` | `[]string` | List of enabled plugins. |
-| `disabledPlugins` | `[]string` | List if disabled plugins. |
-| `mana` | `Mana` | Mana values. |
-| `manaDelegationAddress` | `string` | Mana Delegation Address. |
-| `mana_decay` | `float64` | The decay coefficient of `bm2`. |
-| `scheduler` | `Scheduler` | Scheduler is the scheduler used. |
-| `rateSetter` | `RateSetter` | RateSetter is the rate setter used. |
-| `error` | `string` | Error block. Omitted if success. |
-
-- Type `TangleTime`
-
-| field | Type | Description |
-| :-------- | :------- | :------------------------------------------- |
-| `blockID` | `string` | ID of the last confirmed block. |
-| `time` | `int64` | Issue timestamp of the last confirmed block. |
-| `synced` | `bool` | Flag indicating whether node is in sync. |
-
-- Type `Scheduler`
-
-| field | Type | Description |
-| :--------------- | :--------------- | :--------------------------------------------- |
-| `running` | `bool` | Flag indicating whether Scheduler has started. |
-| `rate` | `string` | Rate of the scheduler. |
-| `nodeQueueSizes` | `map[string]int` | The size for each node queue. |
-
-- Type `RateSetter`
-
-| field | Type | Description |
-| :----- | :-------- | :----------------------------- |
-| `rate` | `float64` | The rate of the rate setter.. |
-| `size` | `int` | The size of the issuing queue. |
-
-- Type `Mana`
-
-| field | Type | Description |
-| :------------------- | :---------- | :------------------------------------------- |
-| `access` | `float64` | Access mana assigned to the node. |
-| `accessTimestamp` | `time.Time` | Time when the access mana was calculated. |
-| `consensus` | `float64` | Consensus mana assigned to the node. |
-| `consensusTimestamp` | `time.Time` | Time when the consensus mana was calculated. |
-
-## `/healthz`
-
-Returns HTTP code 200 if everything is running correctly.
-
-### Parameters
-
-None.
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location 'http://localhost:8080/healthz'
-```
-
-#### Client lib
-
-This method is not available in client lib
-
-#### Results
-
-Empty response with HTTP 200 success code if everything is running correctly.
-Error block is returned if failed.
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/ledgerstate.md b/docs/maintain/goshimmer/0.9/docs/apis/ledgerstate.md
deleted file mode 100644
index af14ff8ecee..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/ledgerstate.md
+++ /dev/null
@@ -1,1174 +0,0 @@
----
-description: The ledgerstate API provides endpoints to retrieve address details, unspent outputs for an address, get conflict details, and list child conflicts amongst others.
-image: /img/logo/goshimmer_light.png
-keywords:
- - client library
- - HTTP API
- - addresses
- - conflicts
- - outputs
- - transactions
- - UTXO
- - unspent outputs
----
-
-# Ledgerstate API Methods
-
-## HTTP APIs
-
-- [/ledgerstate/addresses/:address](#ledgerstateaddressesaddress)
-- [/ledgerstate/addresses/:address/unspentOutputs](#ledgerstateaddressesaddressunspentoutputs)
-- [/ledgerstate/conflicts/:conflictID](#ledgerstateconflictsconflictid)
-- [/ledgerstate/conflicts/:conflictID/children](#ledgerstateconflictsconflictidchildren)
-- [/ledgerstate/conflicts/:conflictID/conflicts](#ledgerstateconflictsconflictidconflicts)
-- [/ledgerstate/conflicts/:conflictID/voters](#ledgerstateconflictsconflictidvoters)
-- [/ledgerstate/outputs/:outputID](#ledgerstateoutputsoutputid)
-- [/ledgerstate/outputs/:outputID/consumers](#ledgerstateoutputsoutputidconsumers)
-- [/ledgerstate/outputs/:outputID/metadata](#ledgerstateoutputsoutputidmetadata)
-- [/ledgerstate/transactions/:transactionID](#ledgerstatetransactionstransactionid)
-- [/ledgerstate/transactions/:transactionID/metadata](#ledgerstatetransactionstransactionidmetadata)
-- [/ledgerstate/transactions/:transactionID/attachments](#ledgerstatetransactionstransactionidattachments)
-- [/ledgerstate/transactions](#ledgerstatetransactions)
-- [/ledgerstate/addresses/unspentOutputs](#ledgerstateaddressesunspentoutputs)
-
-## Client Lib APIs
-
-- [GetAddressOutputs()](#client-lib---getaddressoutputs)
-- [GetAddressUnspentOutputs()](#client-lib---getaddressunspentoutputs)
-- [GetConflict()](#client-lib---getconflict)
-- [GetConflictChildren()](#client-lib---getconflictchildren)
-- [GetConflictConflicts()](#client-lib---getconflictconflicts)
-- [GetConflictVoters()](#client-lib---getconflictvoters)
-- [GetOutput()](#client-lib---getoutput)
-- [GetOutputConsumers()](#client-lib---getoutputconsumers)
-- [GetOutputMetadata()](#client-lib---getoutputmetadata)
-- [GetTransaction()](#client-lib---gettransaction)
-- [GetTransactionMetadata()](#client-lib---gettransactionmetadata)
-- [GetTransactionAttachments()](#client-lib---gettransactionattachments)
-- [PostTransaction()](#client-lib---posttransaction)
-- [PostAddressUnspentOutputs()](#client-lib---postaddressunspentoutputs)
-
-## `/ledgerstate/addresses/:address`
-
-Get address details for a given base58 encoded address ID, such as output types and balances. For the client library API call balances will not be directly available as values because they are stored as a raw block. Balance can be read after retrieving `ledgerstate.Output` instance, as presented in the examples.
-
-### Parameters
-
-| **Parameter** | `address` |
-| ------------------------ | ------------------------------ |
-| **Required or Optional** | required |
-| **Description** | The address encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/addresses/:address \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:address` is the base58 encoded address, e.g. 6PQqFcwarCVbEMxWFeAqj7YswK842dMtf84qGyKqVH7s1kK.
-
-#### Client lib - `GetAddressOutputs()`
-
-```Go
-resp, err := goshimAPI.GetAddressOutputs("6PQqFcwarCVbEMxWFeAqj7YswK842dMtf84qGyKqVH7s1kK")
-if err != nil {
- // return error
-}
-fmt.Println("output address: ", resp.Address)
-
-for _, output := range resp.Outputs {
- fmt.Println("outputID: ", output.OutputID)
- fmt.Println("output type: ", output.Type)
- // get output instance
- out, err = output.ToLedgerstateOutput()
-}
-```
-
-### Response Examples
-
-```json
-{
- "address": {
- "type": "AddressTypeED25519",
- "base58": "18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp"
- },
- "outputs": [
- {
- "outputID": {
- "base58": "gdFXAjwsm5kDeGdcZsJAShJLeunZmaKEMmfHSdoX34ZeSs",
- "transactionID": "32yHjeZpghKNkybd2iHjXj7NsUdR63StbJcBioPGAut3",
- "outputIndex": 0
- },
- "type": "SigLockedColoredOutputType",
- "output": {
- "balances": {
- "11111111111111111111111111111111": 1000000
- },
- "address": "18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp"
- }
- }
- ]
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------ | :---------------------------------------------- |
-| `address` | Address | The address corresponding to provided outputID. |
-| `outputs` | Output | List of transactions' outputs. |
-
-#### Type `Address`
-
-| Field | Type | Description |
-| :------- | :----- | :------------------------------- |
-| `type` | string | The type of an address. |
-| `base58` | string | The address encoded with base58. |
-
-#### Type `Output`
-
-| Field | Type | Description |
-| :----------- | :------- | :------------------------------------------------------------------- |
-| `outputID` | OutputID | The identifier of an output. |
-| `outputType` | string | The type of the output. |
-| `output` | string | An output raw block containing balances and corresponding addresses. |
-
-#### Type `OutputID`
-
-| Field | Type | Description |
-| :-------------- | :----- | :---------------------------------------------- |
-| `base58` | string | The output identifier encoded with base58. |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `outputIndex` | int | The index of an output. |
-
-## `/ledgerstate/addresses/:address/unspentOutputs`
-
-Gets list of all unspent outputs for the address based on a given base58 encoded address ID.
-
-### Parameters
-
-| **Parameter** | `address` |
-| ------------------------ | ------------------------------ |
-| **Required or Optional** | required |
-| **Description** | The address encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/addresses/:address/unspentOutputs \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:address` is the base58 encoded address, e.g. 6PQqFcwarCVbEMxWFeAqj7YswK842dMtf84qGyKqVH7s1kK.
-
-#### Client lib - `GetAddressUnspentOutputs()`
-
-```Go
-address := "6PQqFcwarCVbEMxWFeAqj7YswK842dMtf84qGyKqVH7s1kK"
-resp, err := goshimAPI.GetAddressUnspentOutputs(address)
-if err != nil {
- // return error
-}
-fmt.Println("output address: ", resp.Address)
-
-for _, output := range resp.Outputs {
- fmt.Println("outputID: ", output.OutputID)
- fmt.Println("output type: ", output.Type)
- // get output instance
- out, err = output.ToLedgerstateOutput()
-}
-```
-
-### Response Examples
-
-```json
-{
- "address": {
- "type": "AddressTypeED25519",
- "base58": "18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp"
- },
- "outputs": [
- {
- "outputID": {
- "base58": "gdFXAjwsm5kDeGdcZsJAShJLeunZmaKEMmfHSdoX34ZeSs",
- "transactionID": "32yHjeZpghKNkybd2iHjXj7NsUdR63StbJcBioPGAut3",
- "outputIndex": 0
- },
- "type": "SigLockedColoredOutputType",
- "output": {
- "balances": {
- "11111111111111111111111111111111": 1000000
- },
- "address": "18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp"
- }
- }
- ]
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------ | :------------------------------------------------------ |
-| `address` | Address | The address corresponding to provided unspent outputID. |
-| `outputs` | Output | List of transactions' unspent outputs. |
-
-#### Type `Address`
-
-| Field | Type | Description |
-| :------- | :----- | :------------------------------- |
-| `type` | string | The type of an address. |
-| `base58` | string | The address encoded with base58. |
-
-#### Type `Output`
-
-| Field | Type | Description |
-| :----------- | :------- | :------------------------------------------------------------------ |
-| `outputID` | OutputID | The identifier of an output. |
-| `outputType` | string | The type of the output. |
-| `output` | string | An output raw block containing balances and corresponding addresses |
-
-#### Type `OutputID`
-
-| Field | Type | Description |
-| :-------------- | :----- | :---------------------------------------------- |
-| `base58` | string | The output identifier encoded with base58. |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `outputIndex` | int | The index of an output. |
-
-## `/ledgerstate/conflicts/:conflictID`
-
-Gets a conflict details for a given base58 encoded conflict ID.
-
-### Parameters
-
-| **Parameter** | `conflictID` |
-| ------------------------ | ---------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The conflict ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/conflicts/:conflictID \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:conflictID` is the ID of the conflict, e.g. 2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ.
-
-#### Client lib - `GetConflict()`
-
-```Go
-resp, err := goshimAPI.GetConflict("2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ")
-if err != nil {
- // return error
-}
-fmt.Println("conflict ID: ", resp.ID)
-fmt.Println("conflict type: ", resp.Type)
-fmt.Println("conflict inclusion state: ", resp.ConfirmationState)
-fmt.Println("conflict parents IDs: ", resp.Parents)
-fmt.Println("conflict conflicts IDs: ", resp.ConflictIDs)
-fmt.Printf("liked: %v, finalized: %v, monotonically liked: %v", resp.Liked, resp.Finalized, resp.MonotonicallyLiked)
-```
-
-### Response Examples
-
-```json
-{
- "id": "5v6iyxKUSSF73yoZa6YngNN5tqoX8hJQWKGXrgcz3XTg",
- "type": "ConflictConflictType",
- "parents": ["4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM"],
- "conflictIDs": ["3LrHecDf8kvDGZKTAYaKmvdsqXA18YBc8A9UePu7pCxw5ks"],
- "liked": false,
- "monotonicallyLiked": false,
- "finalized": false,
- "confirmationState": "ConfirmationState(Pending)"
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :------------------- | :------- | :-------------------------------------------------------- |
-| `id` | string | The conflict identifier encoded with base58. |
-| `type` | string | The type of the conflict. |
-| `parents` | []string | The list of parent conflicts IDs. |
-| `conflictIDs` | []string | The list of conflicts identifiers. |
-| `liked` | bool | The boolean indicator if conflict is liked. |
-| `monotonicallyLiked` | bool | The boolean indicator if conflict is monotonically liked. |
-| `finalized` | bool | The boolean indicator if conflict is finalized. |
-| `confirmationState` | string | Confirmation state of a conflict. |
-
-## `/ledgerstate/conflicts/:conflictID/children`
-
-Gets a list of all child conflicts for a conflict with given base58 encoded conflict ID.
-
-### Parameters
-
-| **Parameter** | `conflictID` |
-| ------------------------ | ---------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The conflict ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/conflicts/:conflictID/children \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:conflictID` is the ID of the conflict, e.g. 2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ.
-
-#### Client lib - `GetConflictChildren()`
-
-```Go
-resp, err := goshimAPI.GetConflictChildren("2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ")
-if err != nil {
- //return error
-}
-fmt.Printf("All children conflicts for conflict %s:\n", resp.ConflictID)
-for _, conflict := range resp.ChildConflicts {
- fmt.Println("conflictID: ", conflict.ConflictID)
- fmt.Printf("type: %s\n", conflict.ConflictID)
-}
-```
-
-### Response Examples
-
-```json
-{
- "conflictID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV",
- "childConflicts": [
- {
- "conflictID": "4SdXm5NXEcVogiJNEKkecqd5rZzRYeGYBj8oBNsdX91W",
- "type": "AggregatedConflictType"
- }
- ]
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :--------------- | :-------------- | :------------------------------------------- |
-| `conflictID` | string | The conflict identifier encoded with base58. |
-| `childConflicts` | []ChildConflict | The child conflicts data. |
-
-#### Type `ChildConflict`
-
-| Field | Type | Description |
-| :----------- | :----- | :------------------------------------------- |
-| `conflictID` | string | The conflict identifier encoded with base58. |
-| `type` | string | The type of the conflict. |
-
-## `/ledgerstate/conflicts/:conflictID/conflicts`
-
-Get all conflicts for a given conflict ID, their outputs and conflicting conflicts.
-
-### Parameters
-
-| **Parameter** | `conflictID` |
-| ------------------------ | ---------------------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The conflicting conflict ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/conflicts/:conflictID/conflicts \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:conflictID` is the ID of the conflict, e.g. 2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ.
-
-#### Client lib - `GetConflictConflicts()`
-
-```Go
-resp, err := goshimAPI.GetConflictConflicts("2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ")
-if err != nil {
- // return error
-}
-fmt.Printf("All conflicts for conflict %s:\n", resp.ConflictID)
-// iterate over all conflicts
-for _, conflict := range resp.Conflicts {
- fmt.Println("output ID: ", conflict.OutputID.Base58)
- fmt.Println("conflicting transaction ID: ", conflict.OutputID.TransactionID)
- fmt.Printf("related conflicts: %v\n", conflict.ConflictIDs)
-}
-```
-
-### Response Examples
-
-```json
-{
- "conflictID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV",
- "conflicts": [
- {
- "outputID": {
- "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK",
- "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g",
- "outputIndex": 0
- },
- "conflictIDs": [
- "b8QRhHerfg14cYQ4VFD7Fyh1HYTCbjt9aK1XJmdoXwq",
- "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV"
- ]
- }
- ]
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :--------- | :------------------------------------------- |
-| `conflictID` | string | The conflict identifier encoded with base58. |
-| `conflicts` | []Conflict | The conflict data. |
-
-#### Type `Conflict`
-
-| Field | Type | Description |
-| :------------ | :------- | :---------------------------------------------------------- |
-| `outputID` | OutputID | The conflict identifier encoded with base58. |
-| `conflictIDs` | []string | The identifiers of all related conflicts encoded in base58. |
-
-#### Type `OutputID`
-
-| Field | Type | Description |
-| :-------------- | :----- | :---------------------------------------------- |
-| `base58` | string | The output identifier encoded with base58. |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `outputIndex` | int | The index of an output. |
-
-## `/ledgerstate/conflicts/:conflictID/voters`
-
-Get a list of voters of a given conflictID.
-
-| **Parameter** | `conflictID` |
-| ------------------------ | ---------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The conflict ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/conflicts/:conflictID/voters \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:conflictID` is the ID of the conflict, e.g. 2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ.
-
-#### Client lib - `GetConflictVoters()`
-
-```Go
-resp, err := goshimAPI.GetConflictVoters("2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ")
-if err != nil {
- // return error
-}
-fmt.Printf("All voters for conflict %s:\n", resp.ConflictID)
-// iterate over all voters
-for _, voter := range resp.Voters {
- fmt.Println("ID: ", voter)
-}
-```
-
-### Response examples
-
-```json
-{
- "conflictID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV",
- "voters": [
- "b8QRhHerfg14cYQ4VFD7Fyh1HYTCbjt9aK1XJmdoXwq",
- "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK"
- ]
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :-------- | :------------------------------------------- |
-| `conflictID` | string | The conflict identifier encoded with base58. |
-| `voters` | [] string | The list of conflict voter IDs |
-
-## `/ledgerstate/outputs/:outputID`
-
-Get an output details for a given base58 encoded output ID, such as output types, addresses, and their corresponding balances.
-For the client library API call balances will not be directly available as values because they are stored as a raw block.
-
-### Parameters
-
-| **Parameter** | `outputID` |
-| ------------------------ | -------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The output ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/outputs/:outputID \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:outputID` is the ID of the output, e.g. 41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK.
-
-#### Client lib - `GetOutput()`
-
-```Go
-resp, err := goshimAPI.GetOutput("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK")
-if err != nil {
- // return error
-}
-fmt.Println("outputID: ", resp.OutputID.Base58)
-fmt.Println("output type: ", resp.Type)
-fmt.Println("transactionID: ", resp.OutputID.TransactionID)
-```
-
-### Response Examples
-
-```json
-{
- "outputID": {
- "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK",
- "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g",
- "outputIndex": 0
- },
- "type": "SigLockedColoredOutputType",
- "output": {
- "balances": {
- "11111111111111111111111111111111": 1000000
- },
- "address": "1F95a2yceDicNLvqod6P3GLFZDAFdwizcTTYow4Y1G3tt"
- }
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------- | :------------------------------------------------------------------ |
-| `outputID` | OutputID | The identifier of an output. |
-| `outputType` | string | The type of the output. |
-| `output` | string | An output raw block containing balances and corresponding addresses |
-
-#### Type `OutputID`
-
-| Field | Type | Description |
-| :-------------- | :----- | :---------------------------------------------- |
-| `base58` | string | The output identifier encoded with base58. |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `outputIndex` | int | The index of an output. |
-
-## `/ledgerstate/outputs/:outputID/consumers`
-
-Get a list of consumers based on a provided base58 encoded output ID. Transactions that contains the output and information about its validity.
-
-### Parameters
-
-| **Parameter** | `outputID` |
-| ------------------------ | -------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The output ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/outputs/:outputID/consumers \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:outputID` is the ID of the output, e.g. 41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK.
-
-#### Client lib - `GetOutputConsumers()`
-
-```Go
-resp, err := goshimAPI.GetOutputConsumers("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK")
-if err != nil {
- // return error
-}
-fmt.Println("outputID: ", resp.OutputID.Base58)
-// iterate over output consumers
-for _, consumer := range resp.Consumers {
- fmt.Println("transactionID: ", consumer.TransactionID)
- fmt.Println("valid: ", consumer.Valid)
-}
-```
-
-### Response Examples
-
-```json
-{
- "outputID": {
- "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK",
- "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g",
- "outputIndex": 0
- },
- "consumers": [
- {
- "transactionID": "b8QRhHerfg14cYQ4VFD7Fyh1HYTCbjt9aK1XJmdoXwq",
- "valid": "true"
- },
- {
- "transactionID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV",
- "valid": "true"
- }
- ]
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :--------- | :----------------------------------------- |
-| `outputID` | OutputID | The output identifier encoded with base58. |
-| `consumers` | []Consumer | Consumers of the requested output. |
-
-#### Type `OutputID`
-
-| Field | Type | Description |
-| :-------------- | :----- | :---------------------------------------------- |
-| `base58` | string | The output identifier encoded with base58. |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `outputIndex` | int | The index of an output. |
-
-#### Type `Consumers`
-
-| Field | Type | Description |
-| :-------------- | :----- | :------------------------------------------------- |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `valid` | string | The boolean indicator if the transaction is valid. |
-
-## `/ledgerstate/outputs/:outputID/metadata`
-
-Gets an output metadata for a given base58 encoded output ID.
-
-### Parameters
-
-| **Parameter** | `outputID` |
-| ------------------------ | -------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The output ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/outputs/:outputID/metadata \
--X GET \
--H 'Content-Type: application/json'
-
-```
-
-where `:outputID` is the ID of the output, e.g. 41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK.
-
-#### Client lib - `GetOutputMetadata()`
-
-```Go
-resp, err := goshimAPI.GetOutputMetadata("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK")
-if err != nil {
- // return error
-}
-fmt.Printf("Metadata of an output %s:\n", resp.OutputID.Base58)
-fmt.Println("conflictID: ", resp.ConflictID)
-fmt.Println("first consumer: ", resp.FirstConsumer)
-fmt.Println("number of consumers: ", resp.ConsumerCount)
-fmt.Printf("finalized: %v, solid: %v\n", resp.Finalized, resp.Solid)
-fmt.Println("solidification time: ", time.Unix(resp.SolidificationTime, 0))
-```
-
-### Response Examples
-
-```json
-{
- "outputID": {
- "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK",
- "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g",
- "outputIndex": 0
- },
- "conflictID": "4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM",
- "solid": true,
- "solidificationTime": 1621889327,
- "consumerCount": 2,
- "firstConsumer": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV",
- "finalized": true
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :------------------- | :------- | :----------------------------------------------------- |
-| `outputID` | OutputID | The output identifier encoded with base58. |
-| `conflictID` | string | The identifier of the conflict encoded with base58. |
-| `solid` | bool | The boolean indicator if the block is solid. |
-| `solidificationTime` | int64 | The time of solidification of a block. |
-| `consumerCount` | int | The number of consumers. |
-| `firstConsumer` | string | The first consumer of the output. |
-| `finalized` | bool | The boolean indicator if the transaction is finalized. |
-
-#### Type `OutputID`
-
-| Field | Type | Description |
-| :-------------- | :----- | :---------------------------------------------- |
-| `base58` | string | The output identifier encoded with base58. |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `outputIndex` | int | The index of an output. |
-
-## `/ledgerstate/transactions/:transactionID`
-
-Gets a transaction details for a given base58 encoded transaction ID.
-
-### Parameters
-
-| **Parameter** | `transactionID` |
-| ------------------------ | ------------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The transaction ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/transactions/:transactionID \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:transactionID` is the ID of the conflict, e.g. HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV.
-
-#### Client lib - `GetTransaction()`
-
-```Go
-resp, err := goshimAPI.GetTransaction("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK")
-if err != nil {
- // return error
-}
-fmt.Println("transaction inputs:")
-for _, input := range resp.Inputs {
- fmt.Println("inputID:", input.ReferencedOutputID.Base58)
-}
-fmt.Println("transaction outputs:")
-for _, output := range resp.Outputs{
- fmt.Println("outputID:", output.OutputID.Base58)
- fmt.Println("output type:", output.Type)
-}
-fmt.Println("access mana pledgeID:", resp.AccessPledgeID)
-fmt.Println("consensus mana pledgeID:", resp.ConsensusPledgeID)
-```
-
-### Response Examples
-
-```json
-{
- "version": 0,
- "timestamp": 1621889348,
- "accessPledgeID": "DsHT39ZmwAGrKQe7F2rAjwHseUnJeY89gDPEH1FJxYdH",
- "consensusPledgeID": "DsHT39ZmwAGrKQe7F2rAjwHseUnJeY89gDPEH1FJxYdH",
- "inputs": [
- {
- "type": "UTXOInputType",
- "referencedOutputID": {
- "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK",
- "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g",
- "outputIndex": 0
- }
- }
- ],
- "outputs": [
- {
- "outputID": {
- "base58": "6gMWUCgJDozmyLeGzW3ibGFicEq2wbhsxgAw8rUVPvn9bj5",
- "transactionID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV",
- "outputIndex": 0
- },
- "type": "SigLockedColoredOutputType",
- "output": {
- "balances": {
- "11111111111111111111111111111111": 1000000
- },
- "address": "1HrUn1jWAjrMU58LLdFhfnWBwUKVdWjP5ojp7oCL9mVWs"
- }
- }
- ],
- "unlockBlocks": [
- {
- "type": "SignatureUnlockBlockType",
- "publicKey": "12vNcfgRHLSsobeqZFrjFRcVAmFQbDVniguPnEoxmkbG",
- "signature": "4isq3qzhY4MwbSeYM2NgRn5noWAyh5rqD12ruiTQ7P89TfXNecwHZ5nbpDc4UB7md1bkfM1xYtSh18FwLqK8HAC6"
- }
- ],
- "dataPayload": ""
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :------------------ | :------------ | :--------------------------------------------------------------------------------------------------- |
-| `version` | uint8 | The version of the transaction essence. |
-| `timestamp` | int64 | The issuing time of the transaction. |
-| `accessPledgeID` | string | The node ID indicating to which node pledge the access mana. |
-| `consensusPledgeID` | string | The node ID indicating to which node pledge the consensus mana. |
-| `inputs` | []Input | The inputs of the transaction. |
-| `outputs` | []Output | The outputs of the transaction. |
-| `unlockBlocks` | []UnlockBlock | The unlock block containing signatures unlocking the inputs or references to previous unlock blocks. |
-| `dataPayload` | []byte | The raw data payload that can be attached to the transaction. |
-
-#### Type `Input`
-
-| Field | Type | Description |
-| :------------------- | :----------------- | :---------------------------------------------------------- |
-| `Type` | string | The type of input. |
-| `ReferencedOutputID` | ReferencedOutputID | The output ID that is used as an input for the transaction. |
-
-#### Type `ReferencedOutputID`
-
-| Field | Type | Description |
-| :-------------- | :----- | :---------------------------------------------------- |
-| `base58` | string | The referenced output identifier encoded with base58. |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `outputIndex` | int | The index of a referenced output. |
-
-#### Type `Output`
-
-| Field | Type | Description |
-| :----------- | :------- | :------------------------------------------------------------------- |
-| `outputID` | OutputID | The identifier of an output. |
-| `outputType` | string | The type of the output. |
-| `output` | string | An output raw block containing balances and corresponding addresses. |
-
-#### Type `OutputID`
-
-| Field | Type | Description |
-| :-------------- | :----- | :---------------------------------------------- |
-| `base58` | string | The output identifier encoded with base58. |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `outputIndex` | int | The index of an output. |
-
-#### Type `UnlockBlock`
-
-| Field | Type | Description |
-| :---------------- | :----- | :---------------------------------------------------------------------------------------------- |
-| `type` | string | The unlock block type: signature or reference. |
-| `referencedIndex` | uint16 | The reference index of an unlock block. |
-| `signatureType` | uint8 | The unlock block signature type: ED25519 or BLS. |
-| `publicKey` | string | The public key of a transaction owner. |
-| `signature` | string | The string representation of a signature encoded with base58 signed over a transaction essence. |
-
-## `/ledgerstate/transactions/:transactionID/metadata`
-
-Gets a transaction metadata for a given base58 encoded transaction ID.
-
-### Parameters
-
-| **Parameter** | `transactionID` |
-| ------------------------ | ------------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The transaction ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/transactions/:transactionID/metadata \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:transactionID` is the ID of the conflict, e.g. HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV.
-
-#### Client lib - `GetTransactionMetadata()`
-
-```Go
-resp, err := goshimAPI.GetTransactionMetadata("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK")
-if err != nil {
- // return error
-}
-fmt.Println("transactionID:", resp.TransactionID)
-fmt.Println("conflictID:", resp.ConflictID)
-fmt.Printf("conflict lazy booked: %v, solid: %v, finalized: %v\n", resp.LazyBooked, resp.Solid, resp.Finalized)
-fmt.Println("solidification time:", time.Unix(resp.SolidificationTime, 0))
-```
-
-### Response Examples
-
-```json
-{
- "transactionID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV",
- "conflictID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV",
- "solid": true,
- "solidificationTime": 1621889358,
- "finalized": true,
- "lazyBooked": false
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :------------------- | :----- | :--------------------------------------------------------- |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `conflictID` | string | The conflict identifier of the transaction. |
-| `solid` | bool | The boolean indicator if the transaction is solid. |
-| `solidificationTime` | uint64 | The time of solidification of the transaction. |
-| `finalized` | bool | The boolean indicator if the transaction is finalized. |
-| `lazyBooked` | bool | The boolean indicator if the transaction is lazily booked. |
-
-## `/ledgerstate/transactions/:transactionID/attachments`
-
-Gets the list of blocks IDs with attachments of the base58 encoded transaction ID.
-
-### Parameters
-
-| **Parameter** | `transactionID` |
-| ------------------------ | ------------------------------------- |
-| **Required or Optional** | required |
-| **Description** | The transaction ID encoded in base58. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/transactions/:transactionID/attachments \
--X GET \
--H 'Content-Type: application/json'
-```
-
-where `:transactionID` is the ID of the conflict, e.g. HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV.
-
-#### Client lib - `GetTransactionAttachments()`
-
-```Go
-resp, err := goshimAPI.GetTransactionAttachments("DNSN8GaCeep6CVuUV6KXAabXkL3bv4PUP4NkTNKoZMqS")
-if err != nil {
- // return error
-}
-fmt.Printf("Blocks IDs containing transaction %s:\n", resp.TransactionID)
-for _, blkID := range resp.BlockIDs {
- fmt.Println(blkID)
-}
-```
-
-### Response Examples
-
-```json
-{
- "transactionID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV",
- "blockIDs": ["J1FQdMcticXiiuKMbjobq4zrYGHagk2mtTzkVwbqPgSq"]
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :-------------- | :------- | :------------------------------------------------------ |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `blockIDs` | []string | The blocks IDs that contains the requested transaction. |
-
-## `/ledgerstate/transactions`
-
-Sends transaction provided in form of a binary data, validates transaction before issuing the block payload. For more detail on how to prepare transaction bytes see the [tutorial](../tutorials/send_transaction.md).
-
-### Examples
-
-#### Client lib - `PostTransaction()`
-
-```GO
-// prepare tx essence and signatures
-...
-// create transaction
-tx := ledgerstate.NewTransaction(txEssence, ledgerstate.UnlockBlocks{unlockBlock})
-resp, err := goshimAPI.PostTransaction(tx.Bytes())
-if err != nil {
- // return error
-}
-fmt.Println("Transaction sent, txID: ", resp.TransactionID)
-```
-
-### Results
-
-| Return field | Type | Description |
-| :-------------- | :----- | :------------------------------------------------------------------------------- |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `Error` | error | The error returned if transaction was not processed correctly, otherwise is nil. |
-
-## `/ledgerstate/addresses/unspentOutputs`
-
-Gets all unspent outputs for a list of addresses that were sent in the body block. Returns the unspent outputs along with inclusion state and metadata for the wallet.
-
-### Request Body
-
-```json
-{
- "addresses": ["18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp"]
-}
-```
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/ledgerstate/addresses/unspentOutputs \
--X POST \
--H 'Content-Type: application/json'
---data-raw '{"addresses": ["18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp"]}'
-```
-
-#### Client lib - `PostAddressUnspentOutputs()`
-
-```Go
-resp, err := goshimAPI.PostAddressUnspentOutputs([]string{"H36sZQkopfoEzP3WCMThSjUv5v9MLVYuaQ73tsKgVzXo"})
-if err != nil {
- return
-}
-for _, outputs := range resp.UnspentOutputs {
- fmt.Println("address ID:", outputs.Address.Base58)
- fmt.Println("address type:", outputs.Address.Type)
-
- for _, output := range outputs.Outputs {
- fmt.Println("output ID:", output.Output.OutputID.Base58)
- fmt.Println("output type:", output.Output.Type)
- }
-}
-```
-
-### Response Examples
-
-```json
-{
- "unspentOutputs": [
- {
- "address": {
- "type": "AddressTypeED25519",
- "base58": "1Z4t5KEKU65fbeQCbNdztYTB1B4Cdxys1XRzTFrmvAf3"
- },
- "outputs": [
- {
- "output": {
- "outputID": {
- "base58": "4eGoQWG7UDtBGK89vENQ5Ea1N1b8xF26VD2F8nigFqgyx5m",
- "transactionID": "BqzgVk4yY9PDZuDro2mvT36U52ZYbJDfM41Xng3yWoQK",
- "outputIndex": 0
- },
- "type": "SigLockedColoredOutputType",
- "output": {
- "balances": {
- "11111111111111111111111111111111": 1000000
- },
- "address": "1Z4t5KEKU65fbeQCbNdztYTB1B4Cdxys1XRzTFrmvAf3"
- }
- },
- "confirmationState": {
- "confirmed": true,
- "rejected": false,
- "conflicting": false
- },
- "metadata": {
- "timestamp": "2021-05-25T15:47:04.50470213+02:00"
- }
- }
- ]
- }
- ]
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :--------------- | :--------------------- | :----------------------------------------- |
-| `unspentOutputs` | WalletOutputsOnAddress | Unspent outputs representation for wallet. |
-
-#### Type `WalletOutputsOnAddress`
-
-| Return field | Type | Description |
-| :----------- | :------------- | :----------------------------------------------- |
-| `Address` | Address | The address corresponding to the unspent output. |
-| `Outputs` | []WalletOutput | Unspent outputs representation for wallet. |
-
-#### Type `Address`
-
-| Field | Type | Description |
-| :------- | :----- | :------------------------------- |
-| `type` | string | The type of an address. |
-| `base58` | string | The address encoded with base58. |
-
-#### Type `WalletOutput`
-
-| Field | Type | Description |
-| :------------------ | :------------------- | :------------------------------------------------------------ |
-| `output` | Output | The unspent output. |
-| `confirmationState` | ConfirmationState | The inclusion state of the transaction containing the output. |
-| `metadata` | WalletOutputMetadata | The metadata of the output for the wallet lib. |
-
-#### Type `Output`
-
-| Field | Type | Description |
-| :----------- | :------- | :------------------------------------------------------------------- |
-| `outputID` | OutputID | The identifier of an output. |
-| `outputType` | string | The type of the output. |
-| `output` | string | An outputs raw block containing balances and corresponding addresses |
-
-#### Type `OutputID`
-
-| Field | Type | Description |
-| :-------------- | :----- | :---------------------------------------------- |
-| `base58` | string | The output identifier encoded with base58. |
-| `transactionID` | string | The transaction identifier encoded with base58. |
-| `outputIndex` | int | The index of an output. |
-
-#### Type `ConfirmationState`
-
-| Field | Type | Description |
-| :------------ | :--- | :---------------------------------------------------------------------------------------------------------------------- |
-| `confirmed` | bool | The boolean indicating if the transaction containing the output is confirmed. |
-| `rejected` | bool | The boolean indicating if the transaction that contains the output was rejected and is booked to the rejected conflict. |
-| `conflicting` | bool | The boolean indicating if the output is in conflicting transaction. |
-
-#### Type `WalletOutputMetadata`
-
-| Field | Type | Description |
-| :---------- | :-------- | :------------------------------------------------------ |
-| `timestamp` | time.Time | The timestamp of the transaction containing the output. |
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/mana.md b/docs/maintain/goshimmer/0.9/docs/apis/mana.md
deleted file mode 100644
index 2d9b18dd42c..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/mana.md
+++ /dev/null
@@ -1,864 +0,0 @@
----
-description: The mana APIs provide methods for people to retrieve the amount of access/consensus mana of nodes and outputs, as well as the event logs.
-image: /img/logo/goshimmer_light.png
-keywords:
- - client library
- - HTTP API
- - mana
- - percentile
- - online
- - consensus
- - pledge
----
-
-# Mana API Methods
-
-The mana APIs provide methods for people to retrieve the amount of access/consensus mana of nodes and outputs, as well as the event logs.
-
-HTTP APIs:
-
-- [/mana](#mana)
-- [/mana/all](#manaall)
-- [/mana/percentile](#manapercentile)
-- [/mana/access/online](#manaaccessonline)
-- [/mana/consensus/online](#manaconsensusonline)
-- [/mana/access/nhighest](#manaaccessnhighest)
-- [/mana/consensus/nhighest](#manaconsensusnhighest)
-- [/mana/pending](#manapending)
-- [/mana/consensus/past](#manaconsensuspast)
-- [/mana/consensus/logs](#manaconsensuslogs)
-- [/mana/allowedManaPledge](#manaallowedmanapledge)
-
-Client lib APIs:
-
-- [GetOwnMana()](#getownmana)
-- [GetManaFullNodeID()](#getmanafullnodeid)
-- [GetMana with short node ID()](#getmana-with-short-node-id)
-- [GetAllMana()](#client-lib---getallmana)
-- [GetManaPercentile()](#client-lib---getmanapercentile)
-- [GetOnlineAccessMana()](#client-lib---getonlineaccessmana)
-- [GetOnlineConsensusMana()](#client-lib---getonlineconsensusmana)
-- [GetNHighestAccessMana()](#client-lib---getnhighestaccessmana)
-- [GetNHighestConsensusMana()](#client-lib---getnhighestconsensusmana)
-- [GetPending()](#client-lib---getpending)
-- [GetPastConsensusManaVector()](#client-lib---getpastconsensusmanavector)
-- [GetConsensusEventLogs()](#client-lib---getconsensuseventlogs)
-- [GetAllowedManaPledgeNodeIDs()](#client-lib---getallowedmanapledgenodeids)
-
-## `/mana`
-
-Get the access and consensus mana of the node.
-
-### Parameters
-
-| **Parameter** | `node ID` |
-| ------------------------ | ------------ |
-| **Required or Optional** | optional |
-| **Description** | full node ID |
-| **Type** | string |
-
-#### **Note**
-
-If no node ID is given, it returns the access and consensus mana of the node you're communicating with.
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana?2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5 \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### client lib
-
-There are 3 APIs to get mana of a node, which is based on the same HTTP API `/mana`.
-
-##### `GetOwnMana`
-
-Get the access and consensus mana of the node this API client is communicating with.
-
-```go
-manas, err := goshimAPI.GetOwnMana()
-if err != nil {
- // return error
-}
-
-// print the node ID
-fmt.Println("full ID: ", manas.NodeID, "short ID: ", manas.ShortNodeID)
-
-// get access mana of the node
-fmt.Println("access mana: ", manas.Access, "access mana updated time: ", manas.AccessTimestamp)
-
-// get consensus mana of the node
-fmt.Println("consensus mana: ", manas.Consensus, "consensus mana updated time: ", manas.ConsensusTimestamp)
-```
-
-##### `GetManaFullNodeID`
-
-Get Mana of a node with its full node ID.
-
-```go
-manas, err := goshimAPI.GetManaFullNodeID("2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5")
-if err != nil {
- // return error
-}
-```
-
-##### `GetMana` with short node ID
-
-```go
-manas, err := goshimAPI.GetMana("2GtxMQD9")
-if err != nil {
- // return error
-}
-```
-
-### Response examples
-
-```json
-{
- "shortNodeID": "2GtxMQD9",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "access": 26.5,
- "accessTimestamp": 1614924295,
- "consensus": 26.5,
- "consensusTimestamp": 1614924295
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :------------------- | :------ | :--------------------------------------- |
-| `shortNodeID` | string | The short ID of a node. |
-| `nodeID` | string | The full ID of a node. |
-| `access` | float64 | The amount of access mana. |
-| `accessTimestamp` | int64 | The timestamp of access mana updates. |
-| `consensus` | float64 | The amount of consensus mana. |
-| `consensusTimestamp` | int64 | The timestamp of consensus mana updates. |
-
-## `/mana/all`
-
-Get the mana perception of the node in the network. You can retrieve the full/short node ID, consensus mana, access mana of each node, and the mana updated time.
-
-### Parameters
-
-None.
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/all \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### Client lib - `GetAllMana()`
-
-```go
-manas, err := goshimAPI.GetAllMana()
-if err != nil {
- // return error
-}
-
-// mana updated time
-fmt.Println("access mana updated time: ", manas.AccessTimestamp)
-fmt.Println("consensus mana updated time: ", manas.ConsensusTimestamp)
-
-// get access mana of each node
-for _, m := range manas.Access {
- fmt.Println("full node ID: ", m.NodeID, "short node ID:", m.ShortNodeID, "access mana: ", m.Mana)
-}
-
-// get consensus mana of each node
-for _, m := range manas.Consensus {
- fmt.Println("full node ID: ", m.NodeID, "short node ID:", m.ShortNodeID, "consensus mana: ", m.Mana)
-}
-```
-
-### Response examples
-
-```json
-{
- "access": [
- {
- "shortNodeID": "2GtxMQD9",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "mana": 26.5
- }
- ],
- "accessTimestamp": 1614924295,
- "consensus": [
- {
- "shortNodeID": "2GtxMQD9",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "mana": 26.5
- }
- ],
- "consensusTimestamp": 1614924295
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :------------------- | :----------- | :--------------------------------------- |
-| `access` | mana.NodeStr | A list of node that has access mana. |
-| `accessTimestamp` | int64 | The timestamp of access mana updates. |
-| `consensus` | mana.NodeStr | A list of node that has access mana. |
-| `consensusTimestamp` | int64 | The timestamp of consensus mana updates. |
-
-#### Type `mana.NodeStr`
-
-| field | Type | Description |
-| :------------ | :------ | :---------------------- |
-| `shortNodeID` | string | The short ID of a node. |
-| `nodeID` | string | The full ID of a node. |
-| `mana` | float64 | The amount of mana. |
-
-## `/mana/percentile`
-
-To learn the top percentile the node belongs to relative to the network in terms of mana. The input should be a full node ID.
-
-### Parameters
-
-| | |
-| ------------------------ | ------------ |
-| **Parameter** | `node ID` |
-| **Required or Optional** | Required |
-| **Description** | full node ID |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/percentile?2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5 \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### Client lib - `GetManaPercentile()`
-
-```go
-mana, err := goshimAPI.GetManaPercentile("2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5")
-if err != nil {
- // return error
-}
-
-// mana updated time
-fmt.Println("access mana percentile: ", mana.Access, "access mana updated time: ", manas.AccessTimestamp)
-fmt.Println("consensus mana percentile: ", mana.Consensus, "consensus mana updated time: ", manas.ConsensusTimestamp)
-```
-
-### Response examples
-
-```json
-{
- "shortNodeID": "2GtxMQD9",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "access": 75,
- "accessTimestamp": 1614924295,
- "consensus": 75,
- "consensusTimestamp": 1614924295
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :------------------- | :------ | :--------------------------------------- |
-| `shortNodeID` | string | The short ID of a node. |
-| `nodeID` | string | The full ID of a node. |
-| `access` | float64 | Access mana percentile of a node. |
-| `accessTimestamp` | int64 | The timestamp of access mana updates. |
-| `consensus` | float64 | Access mana percentile of a node. |
-| `consensusTimestamp` | int64 | The timestamp of consensus mana updates. |
-
-## `/mana/access/online`
-
-You can get a sorted list of online access mana of nodes, sorted from the highest access mana to the lowest. The highest access mana node has OnlineRank 1, and increases 1 by 1 for the following nodes.
-
-### Parameters
-
-None.
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/access/online \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### Client lib - `GetOnlineAccessMana()`
-
-```go
-// online access mana
-accessMana, err := goshimAPI.GetOnlineAccessMana()
-if err != nil {
- // return error
-}
-
-for _, m := accessMana.Online {
- fmt.Println("full node ID: ", m.ID, "mana rank: ", m.OnlineRank, "access mana: ", m.Mana)
-}
-```
-
-### Response examples
-
-```json
-{
- "online": [
- {
- "rank": 1,
- "shortNodeID": "2GtxMQD9",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "mana": 75
- }
- ],
- "timestamp": 1614924295
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------------ | :------------------------------------------- |
-| `online` | OnlineNodeStr | The access mana information of online nodes. |
-| `timestamp` | int64 | The timestamp of mana updates. |
-
-#### Type `OnlineNodeStr`
-
-| Field | Type | Description |
-| :------------ | :------ | :------------------------- |
-| `rank` | int | The rank of a node. |
-| `shortNodeID` | string | The short ID of a node. |
-| `nodeID` | string | The full ID of a node. |
-| `mana` | float64 | The amount of access mana. |
-
-## `/mana/consensus/online`
-
-You can get a sorted list of online consensus mana of nodes, sorted from the highest consensus mana to the lowest. The highest consensus mana node has OnlineRank 1, and increases 1 by 1 for the following nodes.
-
-### Parameters
-
-None.
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/consensus/online \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### Client lib - `GetOnlineConsensusMana()`
-
-```go
-// online access mana
-accessMana, err := goshimAPI.GetOnlineConsensusMana()
-if err != nil {
- // return error
-}
-
-for _, m := accessMana.Online {
- fmt.Println("full node ID: ", m.ID, "mana rank: ", m.OnlineRank, "consensus mana: ", m.Mana)
-}
-```
-
-### Response examples
-
-```json
-{
- "online": [
- {
- "rank": 1,
- "shortNodeID": "2GtxMQD9",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "mana": 75
- }
- ],
- "timestamp": 1614924295
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------------ | :---------------------------------------------- |
-| `online` | OnlineNodeStr | The consensus mana information of online nodes. |
-| `timestamp` | int64 | The timestamp of mana updates. |
-
-#### Type `OnlineNodeStr`
-
-| Field | Type | Description |
-| :------------ | :------ | :---------------------------- |
-| `rank` | int | The rank of a node. |
-| `shortNodeID` | string | The short ID of a node. |
-| `nodeID` | string | The full ID of a node. |
-| `mana` | float64 | The amount of consensus mana. |
-
-## `/mana/access/nhighest`
-
-You can get the N highest access mana holders in the network, sorted in descending order.
-If N=0, all nodes that have access mana are returned sorted.
-
-### Parameters
-
-| | |
-| ------------------------ | --------------------------------- |
-| **Parameter** | `N` |
-| **Required or Optional** | Required |
-| **Description** | The number of highest mana nodes. |
-| **Type** | int |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/access/nhighest?number=5 \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### Client lib - `GetNHighestAccessMana()`
-
-```go
-// get the top 5 highest access mana nodes
-accessMana, err := goshimAPI.GetNHighestAccessMana(5)
-if err != nil {
- // return error
-}
-
-for _, m := accessMana.Nodes {
- fmt.Println("full node ID: ", m.NodeID, "access mana: ", m.Mana)
-}v
-```
-
-### Response examples
-
-```json
-{
- "nodes": [
- {
- "shortNodeID": "2GtxMQD9",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "mana": 26.5
- }
- ],
- "timestamp": 1614924295
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :----------- | :------------------------------- |
-| `nodes` | mana.NodeStr | The N highest access mana nodes. |
-| `timestamp` | int64 | The timestamp of mana updates. |
-
-#### Type `mana.NodeStr`
-
-| field | Type | Description |
-| :------------ | :------ | :---------------------- |
-| `shortNodeID` | string | The short ID of a node. |
-| `nodeID` | string | The full ID of a node. |
-| `mana` | float64 | The amount of mana. |
-
-## `/mana/consensus/nhighest`
-
-You can get the N highest consensus mana holders in the network, sorted in descending order.
-
-### Parameters
-
-| | |
-| ------------------------ | ------------------------------------------- |
-| **Parameter** | `N` |
-| **Required or Optional** | Required |
-| **Description** | The number of highest consensus mana nodes. |
-| **Type** | int |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/consensus/nhighest?number=5 \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### Client lib - `GetNHighestConsensusMana()`
-
-```go
-// get the top 5 highest consensus mana nodes
-consensusMana, err := goshimAPI.GetNHighestConsensusMana(5)
-if err != nil {
- // return error
-}
-
-for _, m := consensusMana.Nodes {
- fmt.Println("full node ID: ", m.NodeID, "consensus mana: ", m.Mana)
-}v
-```
-
-### Response examples
-
-```json
-{
- "nodes": [
- {
- "shortNodeID": "2GtxMQD9",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "mana": 26.5
- }
- ],
- "timestamp": 1614924295
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :----------- | :---------------------------------- |
-| `nodes` | mana.NodeStr | The N highest consensus mana nodes. |
-| `timestamp` | int64 | The timestamp of mana updates. |
-
-#### Type `mana.NodeStr`
-
-| field | Type | Description |
-| :------------ | :------ | :---------------------- |
-| `shortNodeID` | string | The short ID of a node. |
-| `nodeID` | string | The full ID of a node. |
-| `mana` | float64 | The amount of mana. |
-
-## `/mana/pending`
-
-Get the amount of base access mana that would be pledged if the given output was spent.
-
-### Parameters
-
-| | |
-| ------------------------ | ------------------------- |
-| **Parameter** | `outputID` |
-| **Required or Optional** | Required |
-| **Description** | The requesting output ID. |
-| **Type** | string |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/pending?outputid="4a5KkxVfsdFVbf1NBGeGTCjP8Ppsje4YFQg9bu5YGNMSJK1" \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### Client lib - `GetPending()`
-
-```go
-res, err := goshimAPI.GetPending("4a5KkxVfsdFVbf1NBGeGTCjP8Ppsje4YFQg9bu5YGNMSJK1")
-if err != nil {
- // return error
-}
-
-// get the amount of mana
-fmt.Println("mana be pledged: ", res.Mana)
-fmt.Println("the timestamp of the output (decay duration)", res.Timestamp)
-```
-
-### Response examples
-
-```json
-{
- "mana": 26.5,
- "outputID": "4a5KkxVfsdFVbf1NBGeGTCjP8Ppsje4YFQg9bu5YGNMSJK1",
- "timestamp": 1614924295
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------ | :-------------------------------------------- |
-| `mana` | float64 | The amount of access base mana to be pledged. |
-| `outputID` | string | The output ID of the request. |
-| `timestamp` | int64 | The timestamp of mana updates. |
-
-## `/mana/consensus/past`
-
-Get the consensus base mana vector of a time (int64) in the past.
-
-### Parameters
-
-| | |
-| ------------------------ | ----------------------------- |
-| **Parameter** | `timestamp` |
-| **Required or Optional** | Required |
-| **Description** | The timestamp of the request. |
-| **Type** | int64 |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/consensus/past?timestamp=1614924295 \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### Client lib - `GetPastConsensusManaVector()`
-
-```go
-res, err := goshimAPI.GetPastConsensusManaVector(1614924295)
-if err != nil {
- // return error
-}
-
-// the mana vector of each node
-for _, m := range res.Consensus {
- fmt.Println("node ID:", m.NodeID, "consensus mana: ", m.Mana)
-}
-```
-
-### Response examples
-
-```json
-{
- "consensus": [
- {
- "shortNodeID": "2GtxMQD9",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "mana": 26.5
- }
- ],
- "timestamp": 1614924295
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :----------- | :----------------------------- |
-| `consensus` | mana.NodeStr | The consensus mana of nodes. |
-| `timestamp` | int64 | The timestamp of mana updates. |
-
-#### Type `mana.NodeStr`
-
-| field | Type | Description |
-| :------------ | :------ | :---------------------- |
-| `shortNodeID` | string | The short ID of a node. |
-| `nodeID` | string | The full ID of a node. |
-| `mana` | float64 | The amount of mana. |
-
-## `/mana/consensus/logs`
-
-Get the consensus event logs of the given node IDs.
-
-### Parameters
-
-| | |
-| ------------------------ | --------------------------------- |
-| **Parameter** | `nodeIDs` |
-| **Required or Optional** | Required |
-| **Description** | A list of node ID of the request. |
-| **Type** | string array |
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/consensus/logs \
--X GET \
--H 'Content-Type: application/json'
--d '{
- "nodeIDs": [
- "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux6"
- ]
-}'
-```
-
-#### Client lib - `GetConsensusEventLogs()`
-
-```go
-res, err := goshimAPI.GetConsensusEventLogs([]string{"2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5"})
-if err != nil {
- // return error
-}
-
-for nodeID, e := range res.Logs {
- fmt.Println("node ID:", nodeID)
-
- // pledge logs
- for _, p := e.Pledge {
- fmt.Println("mana type: ", p.ManaType)
- fmt.Println("node ID: ", p.NodeID)
- fmt.Println("time: ", p.Time)
- fmt.Println("transaction ID: ", p.TxID)
- fmt.Println("mana amount: ", p.Amount)
- }
-
- // revoke logs
- for _, r := e.Revoke {
- fmt.Println("mana type: ", r.ManaType)
- fmt.Println("node ID: ", r.NodeID)
- fmt.Println("time: ", r.Time)
- fmt.Println("transaction ID: ", r.TxID)
- fmt.Println("mana amount: ", r.Amount)
- fmt.Println("input ID: ", r.InputID)
- }
-}
-```
-
-### Response examples
-
-```json
-{
- "logs": [
- "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5": {
- "pledge": [
- {
- "manaType": "Consensus",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "time": 1614924295,
- "txID": "7oAfcEhodkfVyGyGrobBpRrjjdsftQknpj5KVBQjyrda",
- "amount": 28
- }
- ],
- "revoke": [
- {
- "manaType": "Consensus",
- "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5",
- "time": 1614924295,
- "txID": "7oAfcEhodkfVyGyGrobBpRrjjdsftQknpj5KVBQjyrda",
- "amount": 28,
- "inputID": "35P4cW9QfzHNjXJwZMDMCUxAR7F9mfm6FvPbdpJWudK2nBZ"
- }
- ]
- }
- ],
- "startTime": 1614924295,
- "endTime": 1614924300
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :----------- | :------------------------- | :------------------------------------------------------ |
-| `logs` | map[string]\*EventLogsJSON | The consensus mana of nodes. The key of map is node ID. |
-| `startTime` | int64 | The starting time of collecting logs. |
-| `endTime` | int64 | The ending time of collecting logs. |
-
-#### Type `EventLogsJSON`
-
-| field | Type | Description |
-| :------- | :--------------- | :------------------ |
-| `pledge` | PledgedEventJSON | Pledged event logs. |
-| `revoke` | RevokedEventJSON | Revoked event logs. |
-
-#### Type `PledgedEventJSON`
-
-| field | Type | Description |
-| :--------- | :------ | :---------------------------------- |
-| `manaType` | string | Type of mana. |
-| `nodeID` | string | The full ID of a node. |
-| `time` | int64 | The time of transaction. |
-| `txID` | string | The transaction ID of pledged mana. |
-| `amount` | float64 | The amount of pledged mana. |
-
-#### Type `RevokedEventJSON`
-
-| field | Type | Description |
-| :--------- | :------ | :---------------------------------- |
-| `manaType` | string | Type of mana. |
-| `nodeID` | string | The full ID of a node. |
-| `time` | int64 | The time of transaction. |
-| `txID` | string | The transaction ID of revoked mana. |
-| `amount` | float64 | The amount of revoked mana. |
-| `inputID` | string | The input ID of revoked mana. |
-
-## `/mana/allowedManaPledge`
-
-This returns the list of allowed mana pledge node IDs.
-
-### Parameters
-
-None.
-
-### Examples
-
-#### cURL
-
-```shell
-curl http://localhost:8080/mana/allowedManaPledge \
--X GET \
--H 'Content-Type: application/json'
-```
-
-#### Client lib - `GetAllowedManaPledgeNodeIDs()`
-
-```go
-res, err := goshimAPI.GetAllowedManaPledgeNodeIDs()
-if err != nil {
- // return error
-}
-
-// print the list of nodes that access mana is allowed to be pledged to
-for _, id := range res.Access.Allowed {
- fmt.Println("node ID:", id)
-}
-
-// print the list of nodes that consensus mana is allowed to be pledged to
-for _, id := range res.Consensus.Allowed {
- fmt.Println("node ID:", id)
-}
-```
-
-### Response examples
-
-```json
-{
- "accessMana": {
- "isFilterEnabled": false,
- "allowed": [
- "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5"
- ]
- }
- "consensusMana": {
- "isFilterEnabled": false,
- "allowed": [
- "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5"
- ]
- }
-}
-```
-
-### Results
-
-| Return field | Type | Description |
-| :-------------- | :------------ | :--------------------------------------------------- |
-| `accessMana` | AllowedPledge | A list of nodes that allow to pledge access mana. |
-| `consensusMana` | AllowedPledge | A list of nodes that allow to pledge consensus mana. |
-
-#### Type `AllowedPledge`
-
-| field | Type | Description |
-| :---------------- | :------- | :-------------------------------------------------------------------------------------------------------- |
-| `isFilterEnabled` | bool | A flag shows that if mana pledge filter is enabled. |
-| `allowed` | []string | A list of node ID that allow to be pledged mana. This list has effect only if `isFilterEnabled` is `true` |
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/manual_peering.md b/docs/maintain/goshimmer/0.9/docs/apis/manual_peering.md
deleted file mode 100644
index 07b0d0bae32..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/manual_peering.md
+++ /dev/null
@@ -1,203 +0,0 @@
----
-description: The manual peering APIs allows you to add, get and remove the list of known peers of the node.
-image: /img/logo/goshimmer_light.png
-keywords:
- - client library
- - HTTP API
- - known peer
- - peer
- - public key
- - gossip port
----
-
-# Manual Peering API methods
-
-The manual peering APIs allow managing the list of known peers of the node.
-
-HTTP APIs:
-
-- POST [/manualpeering/peers](#post-manualpeeringpeers)
-- GET [/manualpeering/peers](#get-manualpeeringpeers)
-- DELETE [/manualpeering/peers](#delete-manualpeeringpeers)
-
-Client lib APIs:
-
-- [AddManualPeers()](#addmanualpeers)
-- [GetManualPeers()](#getmanualpeers)
-- [RemoveManualPeers()](#removemanualpeers)
-
-## POST `/manualpeering/peers`
-
-Add peers to the list of known peers of the node.
-
-### Request Body
-
-```json
-[
- {
- "publicKey": "CHfU1NUf6ZvUKDQHTG2df53GR7CvuMFtyt7YymJ6DwS3",
- "address": "127.0.0.1:14666"
- }
-]
-```
-
-#### Description
-
-| Field | Description |
-| :---------- | :------------------------------------------------- |
-| `publicKey` | Public key of the peer. |
-| `address` | IP address of the peer's node and its gossip port. |
-
-### Response
-
-HTTP status code: 204 No Content
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location --request POST 'http://localhost:8080/manualpeering/peers' \
---header 'Content-Type: application/json' \
---data-raw '[
- {
- "publicKey": "CHfU1NUf6ZvUKDQHTG2df53GR7CvuMFtyt7YymJ6DwS3",
- "address": "172.19.0.3:14666"
- }
-]'
-```
-
-### Client library
-
-#### `AddManualPeers`
-
-```go
-import "github.com/iotaledger/goshimmer/packages/manualpeering"
-
-peersToAdd := []*manualpeering.KnownPeerToAdd{{PublicKey: publicKey, Address: address}}
-err := goshimAPI.AddManualPeers(peersToAdd)
-if err != nil {
-// return error
-}
-```
-
-## GET `/manualpeering/peers`
-
-Get the list of all known peers of the node.
-
-### Request Body
-
-```json
-{
- "onlyConnected": true
-}
-```
-
-#### Description
-
-| Field | Description |
-| :-------------- | :-------------------------------------------------------------------------------- |
-| `onlyConnected` | Optional, if set to true only peers with established connection will be returned. |
-
-### Response
-
-HTTP status code: 200 OK
-
-```json
-[
- {
- "publicKey": "CHfU1NUf6ZvUKDQHTG2df53GR7CvuMFtyt7YymJ6DwS3",
- "address": "127.0.0.1:14666",
- "connectionDirection": "inbound",
- "connectionStatus": "connected"
- }
-]
-```
-
-#### Description
-
-| Field | Description |
-| :-------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `publicKey` | The public key of the peer node. |
-| `address` | IP address of the peer's node and its gossip port. |
-| `connectionDirection` | Enum, possible values: "inbound", "outbound". Inbound means that the local node accepts the connection. On the other side, the other peer node dials, and it will have "outbound" connectionDirection. |
-| `connectionStatus` | Enum, possible values: "disconnected", "connected". Whether the actual TCP connection has been established between peers. |
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location --request GET 'http://localhost:8080/manualpeering/peers' \
---header 'Content-Type: application/json' \
---data-raw '{
- "onlyConnected": true
-}'
-```
-
-### Client library
-
-#### `GetManualPeers`
-
-```go
-import "github.com/iotaledger/goshimmer/packages/manualpeering"
-
-peers, err := goshimAPI.GetManualPeers(manualpeering.WithOnlyConnectedPeers())
-if err != nil {
-// return error
-}
-fmt.Println(peers)
-```
-
-## DELETE `/manualpeering/peers`
-
-Remove peers from the list of known peers of the node.
-
-### Request Body
-
-```json
-[
- {
- "publicKey": "CHfU1NUf6ZvUKDQHTG2df53GR7CvuMFtyt7YymJ6DwS3"
- }
-]
-```
-
-#### Description
-
-| Field | Description |
-| :---------- | :---------------------------------------------- |
-| `publicKey` | Public key of the peer to remove from the list. |
-
-### Response
-
-HTTP status code: 204 No Content
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location --request DELETE 'http://localhost:8080/manualpeering/peers' \
---header 'Content-Type: application/json' \
---data-raw '[
- {
- "publicKey": "8qN1yD95fhbfDZtKX49RYFEXqej5fvsXJ2NPmF1LCqbd"
- }
-]'
-```
-
-### Client library
-
-#### `RemoveManualPeers`
-
-```go
-import "github.com/iotaledger/hive.go/crypto/ed25519"
-import "github.com/iotaledger/goshimmer/packages/manualpeering"
-
-publicKeysToRemove := []ed25519.PublicKey{publicKey1, publicKey2}
-err := goshimAPI.RemoveManualPeers(publicKeysToRemove)
-if err != nil {
-// return error
-}
-```
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/snapshot.md b/docs/maintain/goshimmer/0.9/docs/apis/snapshot.md
deleted file mode 100644
index b341efce89c..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/snapshot.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-description: The snapshot API allows retrieving current snapshot.
-image: /img/logo/goshimmer_light.png
-keywords:
- - client library
- - HTTP API
- - snapshot
- - retrieve
- - current
----
-
-# Snapshot API Methods
-
-Snapshot API allows retrieving current snapshot.
-
-The API provides the following functions and endpoints:
-
-- [/snapshot](#snapshot)
-
-## `/snapshot`
-
-Returns a snapshot file.
-
-### Parameters
-
-None
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location 'http://localhost:8080/snapshot'
-```
-
-#### Client lib
-
-Method not available in the client library.
-
-#### Results
-
-Snapshot file is returned.
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/spammer.md b/docs/maintain/goshimmer/0.9/docs/apis/spammer.md
deleted file mode 100644
index a64ed6879e6..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/spammer.md
+++ /dev/null
@@ -1,96 +0,0 @@
----
-description: The Spammer tool lets you add blocks to the tangle when running GoShimmer.
-image: /img/logo/goshimmer_light.png
-keywords:
- - client library
- - HTTP API
- - spammer
- - add blocks
- - interval
- - tangle
----
-
-# Spammer API Methods
-
-The Spammer tool lets you add blocks to the tangle when running GoShimmer.
-**Note:** Make sure you enable the **spammer plugin** before interacting with the API.
-
-The API provides the following functions and endpoints:
-
-- [/spammer](#spammer)
-
-Client lib APIs:
-
-- [ToggleSpammer()](#client-lib---togglespammer)
-
-## `/spammer`
-
-In order to start the spammer, you need to send GET requests to a `/spammer` API endpoint with the following parameters:
-
-### Parameters
-
-| **Parameter** | `cmd` |
-| ------------------------ | ------------------------------------------------------------------ |
-| **Required or Optional** | required |
-| **Description** | Action to perform. One of two possible values: `start` and `stop`. |
-| **Type** | `string` |
-
-| **Parameter** | `rate` |
-| ------------------------ | -------------------------------------------------------------------- |
-| **Required or Optional** | optional |
-| **Description** | Blocks per time unit. Only applicable when `cmd=start`. (default: 1) |
-| **Type** | `int` |
-
-| **Parameter** | `unit` |
-| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------- |
-| **Required or Optional** | optional |
-| **Description** | Indicates the unit for the spam rate: block per minute or second. One of two possible values: `mpm` and `mps`. (default: `mps`) |
-| **Type** | `string` |
-
-| **Parameter** | `imif` (Inter Block Issuing Function) |
-| ------------------------ | ------------------------------------------------------------------------------------------------ |
-| **Required or Optional** | optional |
-| **Description** | Parameter indicating time interval between issued blocks. Possible values: `poisson`, `uniform`. |
-| **Type** | `string` |
-
-Description of `imif` values:
-
-- `poisson` - emit blocks modeled with Poisson point process, whose time intervals are exponential variables with mean 1/rate
-- `uniform` - issues blocks at constant rate
-
-### Examples
-
-#### cURL
-
-```shell
-curl --location 'http://localhost:8080/spammer?cmd=start&rate=100'
-curl --location 'http://localhost:8080/spammer?cmd=start&rate=100&imif=uniform&unit=mpm'
-curl --location 'http://localhost:8080/spammer?cmd=stop'
-```
-
-#### Client lib - `ToggleSpammer()`
-
-Spammer can be enabled and disabled via `ToggleSpammer(enable bool, rate int, imif string) (*jsonmodels.SpammerResponse, error)`
-
-```go
-res, err := goshimAPI.ToggleSpammer(true, 100, "mps", "uniform")
-if err != nil {
- // return error
-}
-
-// will print the response
-fmt.Println(res.Block)
-```
-
-#### Response examples
-
-```json
-{ "block": "started spamming blocks" }
-```
-
-#### Results
-
-| Return field | Type | Description |
-| :----------- | :------- | :------------------------------- |
-| `block` | `string` | Block with resulting block. |
-| `error` | `string` | Error block. Omitted if success. |
diff --git a/docs/maintain/goshimmer/0.9/docs/apis/webAPI.md b/docs/maintain/goshimmer/0.9/docs/apis/webAPI.md
deleted file mode 100644
index 27aeb1c7a76..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/apis/webAPI.md
+++ /dev/null
@@ -1,91 +0,0 @@
----
-description: The web API interface allows access to functionality of the node software via exposed HTTP endpoints.
-image: /img/logo/goshimmer_light.png
-keywords:
- - web API
- - POST
- - GET
- - node software
- - http endpoint
- - port
- - handler
----
-
-# WebAPI - clientLib
-
-The web API interface allows access to functionality of the node software via exposed HTTP endpoints.
-
-## How to Use the API
-
-The default port to access the web API is set to `8080:8080/tcp` in `docker-compose.yml`, where the first port number is the internal port number within the node software, and the second for the access from an http port. An example where these two would be set to different values, or the external port is not utilized, can be found in the docker-network tool (see also the `docker-compose.yml` file in the docker-network tool folder).
-
-The server instance of the web API is contacted via `webapi.Server()`. Next we need to register a route with a matching handler.
-
-```go
-webapi.Server().ROUTE(path string, h HandlerFunc)
-```
-
-where `ROUTE` will be replaced later in this documentation by `GET` or `POST`. The `HandlerFunc` defines a function to serve HTTP requests that gives access to the Context
-
-```go
-func HandlerFunc(c Context) error
-```
-
-We can then use the Context to send a JSON response to the node:
-
-```go
-JSON(statuscode int, i interface{}) error
-```
-
-An implementation example is shown later for the POST method.
-
-## GET and POST
-
-Two methods are currently used. First, with `GET` we register a new GET route for a handler function. The handler is accessed via the address `path`. The handler for a GET method can set the node to perform certain actions.
-
-```go
-webapi.Server().GET("path", HandlerFunc)
-```
-
-A command can be sent to the node software to the API, e.g. via command prompt:
-
-```shell
-curl "http://127.0.0.1:8080/path?command"
-```
-
-$$ . $$
-
-Second, with `POST` we register a new POST route for a handler function. The handler can receive a JSON body input and send specific blocks to the tangle.
-
-```go
-webapi.Server().POST("path", HandlerFunc)
-```
-
-For example, the following Handler `broadcastData` sends a data block to the tangle
-
-```go
-func broadcastData(c echo.Context) error {
- var request Request
- if err := c.Bind(&request); err != nil {
- log.Info(err.Error())
- return c.JSON(http.StatusBadRequest, Response{Error: err.Error()})
- }
-
- blk, err := blocklayer.IssuePayload(
- payload.NewGenericDataPayload(request.Data), blocklayer.Tangle())
- if err != nil {
- return c.JSON(http.StatusBadRequest, Response{Error: err.Error()})
- }
- return c.JSON(http.StatusOK, Response{ID: blk.ID().String()})
-}
-```
-
-As an example the JSON body
-
-```json
-{
- "data": "HelloWorld"
-}
-```
-
-can be sent to `http://127.0.0.1:8080/data`, which will issue a data block containing "HelloWor" (note that in this example the data input is size limited.)
diff --git a/docs/maintain/goshimmer/0.9/docs/faq.md b/docs/maintain/goshimmer/0.9/docs/faq.md
deleted file mode 100644
index 6504322bb9d..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/faq.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-description: Frequently Asked Questions. What is GoShimmer?, What Kind of Confirmation Time Can I Expect?, Where Can I See the State of the GoShimmer testnet?,How Many Transactions Per Second(TPS) can GoShimmer Sustain?, How is Spamming Prevented?, What Happens if I Issue a Double Spend?, Who's the Target Audience for Operating a GoShimmer Node?
-image: /img/logo/goshimmer_light.png
-keywords:
- - average network delay
- - testnet
- - analysis
- - dashboard
- - vote
- - frequently asked questions
- - node software
- - double spend
- - transactions
----
-
-# FAQ
-
-## What is GoShimmer?
-
-GoShimmer is a research and engineering project from the IOTA Foundation seeking to evaluate Coordicide concepts by implementing them in a node software.
-
-## What Kind of Confirmation Time Can I Expect?
-
-Since non-conflicting transactions aren't even voted on, they materialize after 2x the average network delay parameter we set. This means that a transaction usually confirms within a time boundary of ~10 seconds.
-
-## Where Can I See the State of the GoShimmer testnet?
-
-You can access the global analysis dashboard in the [Pollen Analyzer](http://analysisentry-01.devnet.shimmer.iota.cafe:28080/) which showcases the network graph and active ongoing votes on conflicts.
-
-## How Many Transactions per Second (TPS) Can GoShimmer Sustain?
-
-The transactions per second metric is irrelevant for the current development state of GoShimmer. We are evaluating components from Coordicide, and aren't currently interested in squeezing out every little ounce of performance. Since the primary goal is to evaluate Coordicide components, we value simplicity over optimization . Even if we would put out a TPS number, it would not reflect an actual metric in a finished production ready node software.
-
-## How is Spamming Prevented?
-
-The Coordicide lays out concepts for spam prevention through the means of rate control and such. However, in the current version, GoShimmer relies on Proof of Work (PoW) to prevent over saturation of the network. Doing the PoW for a block will usually take a couple of seconds on commodity hardware.
-
-## What Happens if I Issue a Double Spend?
-
-If issue simultaneous transactions spending the same funds, there is high certainty that your transaction will be rejected by the network. This rejection will block your funds indefinitely, though this may change in the future.
-
-If you issue a transaction, await the average network delay, and then issue the double spend, then the first issued transaction should usually become confirmed, and the 2nd one rejected.
-
-## Who's the Target Audience for Operating a GoShimmer Node?
-
-Our primary focus is testing out Coordicide components. We are mainly interested in individuals who have a strong IT background, rather than giving people of any knowledge-level the easiest way to operate a node. We welcome people interested in trying out the bleeding edge of IOTA development and providing meaningful feedback or problem reporting in form of [issues](https://github.com/iotaledger/goshimmer/issues/new/choose).
diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/configuration_parameters.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/configuration_parameters.md
deleted file mode 100644
index c2cc76d28c1..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/implementation_design/configuration_parameters.md
+++ /dev/null
@@ -1,117 +0,0 @@
----
-description: You can pass configuration parameters in two ways when running GoShimmer, through a JSON configuration file or through command line arguments. Parameters are grouped into embedded objects containing parameters for a single plugin or functionality. There is no limit on how deep the configuration object may be embedded.
-image: /img/logo/goshimmer_light.png
-keywords:
- - json
- - command line
- - embedded object
- - parameters
----
-
-# Configuration Parameters
-
-## Customizing Configuration
-
-Users can pass configuration parameters in two ways when running GoShimmer. One way is through a JSON configuration file and another way is through command line arguments.
-Settings passed through command line arguments take precedence. The JSON configuration file is structured as a JSON object containing parameters and their values.
-Parameters are grouped into embedded objects containing parameters for a single plugin or functionality. There is no limit on how deep the configuration object may be embedded.
-For example, the config below contains example parameters for the PoW plugin.
-
-```json
-{
- "pow": {
- "difficulty": 2,
- "numThreads": 1,
- "timeout": "10s"
- }
-}
-```
-
-The same arguments can be passed through command line arguments in the following way. Embedded objects' values are described using JSON dot-notation.
-Additionally,the user can pass the path of the JSON config file through a command-line argument as well, as shown in an example below.
-
-```shell
-goshimmer \
---config=/tmp/config.json \
---pow.difficulty=2 \
---pow.numThreads=1 \
---pow.timeout=10s
-```
-
-## Custom Parameter Fields
-
-Currently, in the code there are two ways in which parameters are registered with GoShimmer. However, one is deprecated way, while the second should be used any longer when adding new parameters.
-
-### New Way
-
-Defining configuration parameters using the new way is really similar, however the parameters are not registered directly with the package reading the configuration,
-but rather with our custom package that contains all the logic required to make it work seamlessly.
-
-In this approach, instead of defining a parameter name, a new type is defined with all necessary parameters, their default values and usage descriptions using Go's struct field tags.
-A variable is then initialized with the defined type.
-
-One difference is that parameter names do not contain the namespace they belong to, the namespace is set when registering the parameters structure with the `configuration` package. One `parameters.go` file can contain definitions and register multiple parameter structures.
-
-```go
-package customPlugin
-
-import "github.com/iotaledger/hive.go/app/configuration"
-
-// Parameters contains the configuration parameters used by the custom plugin.
-type ParametersDefinition struct {
- // ParamName contains some value used within the plugin
- ParamName float64 `default:"0.31" usage:"ParamName used in some calculation"`
-
- // ParamGroup contains an example of embedded configuration definitions.
- ParamGroup struct {
- // DetailedParam1 is the example value
- DetailedParam1 string `default:"defaultValue" usage:"DetailedParam1 used in the plugin"`
- // DetailedParam2 is the example value
- DetailedParam2 string `default:"defaultValue" usage:"DetailedParam2 used in the plugin"`
- }
-}
-
-var Parameters = &ParametersDefinition{}
-
-func init() {
- configuration.BindParameters(Parameters, "customPlugin")
-}
-```
-
-In order to access the parameter value, a user can simply access the structure's field: `Parameters.ParamName` or `Parameters.ParamGroup.DetailedParam1`
-and it will be populated either with the default value or values passed through a JSON config or command-line argument.
-
-This approach makes it more simple to define new parameters as well as makes accessing configuration values more clear.
-
-### Old, Deprecated Way
-
-The old way is described shortly to give a basic understanding of how it works, but it should not be used any longer when adding new parameters.
-
-In a package where the parameters will be used, create a `parameters.go` file, that contains the definition of constants, which define parameter names in JSON dot-notation.
-The constants will be later used in the code to access the parameter value.
-The file should also contain an `init()` function, which registers the parameters with the `flag` library responsible for parsing configuration along with its default value and short description.
-It should include comments describing what the parameter is for. Here is an example `parameters.go` file:
-
-```go
-package customPackage
-
-import (
- flag "github.com/spf13/pflag"
-)
-const (
- // ParamName contains some value used within the plugin
- ParamName = "customPlugin.paramName"
-)
-
-func init() {
- flag.Float64(paramName, 0.31, "ParamName used in some calculation")
-}
-```
-
-The parameter values can be accessed in the code in the following way through the `config` plugin:
-
-```go
-import "github.com/iotaledger/goshimmer/plugins/config"
-
-config.Node().Int(CfgGossipPort)
-```
diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/event_driven_model.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/event_driven_model.md
deleted file mode 100644
index 78300651d04..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/implementation_design/event_driven_model.md
+++ /dev/null
@@ -1,88 +0,0 @@
----
-description: When an event is triggered, an event handler (or a collection of handlers) is executed and the state of the application is updated as necessary. In GoShimmer some of those events can be the arrival of new tangle block, peering request or plugin start.
-image: /img/logo/goshimmer_light.png
-keywords:
- - events
- - plugin
- - handler
- - trigger
- - specific type
----
-
-# Event Driven Model
-
-Event driven model is popular approach often used for example in GUI applications, where a program is waiting for some external event to take place (e.g. mouse click) in order to perform some action.
-In case of GoShimmer there is no GUI, however it applies this architecture approach as it's really flexible and is used to handle communication with other nodes and other internal parts.
-In GoShimmer some of those events can be e.g. arrival of new tangle block, peering request or plugin start.
-When an event is triggered, an event handler (or a collection of handlers) is executed and the state of the application is updated as necessary.
-
-## Glossary
-
-At first let's define some terms used further to avoid misunderstandings:
-
-### Event
-
-Represents the type of event (e.g. new block or peering request) as well as set of handlers and trigger functions. Each type of event is separately defined
-which means that events are independent of each other - each event has its own set of handlers and is triggered separately.
-
-### Event handler (callback)
-
-A function that is executed when an event of given type occurs. An event handler can accept multiple arguments (e.g. block ID or plugin) so that it can perform appropriate actions.
-Every handler must accept the same set of parameters. Each event has a different set of handlers (there can be multiple handlers) that are executed when the event is triggered.
-
-### Trigger
-
-A method that triggers execution of event handlers with given parameter values.
-
-## Creating a New Event With Custom Callbacks
-
-Below are the steps that show the example code necessary to create a custom event, attach a handler and trigger the event.
-
-1. Create a function that will call event handlers (handler caller) for a specific event.
- Each event has only one handler caller. It enforces that all handlers for the event must share the same interface, because the caller will pass a fixed set of arguments of specific types to handler function.
- It's not possible to pass different number of arguments or types to the handler function.
- Callers for all events must also share the same interface - the first argument represents the handler function that will be called represented by a generic argument.
- Further arguments represent parameters that will be passed to the handler during execution. Below are example callers that accept one and two parameters respectively.
- More arguments can be passed in similar manner.
-
-```go
-func singleArgCaller(handler interface{}, params ...interface{}) {
- handler.(func (*Plugin))(params[0].(*Plugin))
-}
-
-func twoArgsCaller(handler interface{}, params ...interface{}) {
- handler.(func(*peer.Peer, error))(params[0].(*peer.Peer), params[1].(error))
-}
-```
-
-`handler.(func (*Plugin))(params[0].(*Plugin))` - this code seems a little complicated, so to make things simpler we will divide into smaller parts and explain each:
-
-- `handler.(func (*Plugin))` (A) - this part does type-cast the handler from generic type onto type of desired, specific function type - in this case it's a function that accepts `*Plugin` as its only parameter.
-- `params[0].(*Plugin)` (B)- similarly to previous part, first element of parameter slice is type-casted onto `*Plugin` type, so that it matches the handler function interface.
-- `handler.(func (*Plugin))(params[0].(*Plugin))` - the whole expression calls the type-casted handler function with the type-casted parameter value. We can also write this as `A(B)` to make things simpler.
-
-The above explanation also allows a better understanding of why all handlers must share the same interface - handler caller passes fixed number of parameters and does type-casting of arguments onto specific types.
-
-2. Next, a new event object needs to be created. We pass the handler caller as an argument, which is saved inside the object to be called when the event is triggered.
-
-```go
-import "github.com/iotaledger/hive.go/runtime/events"
-
-ThisEvent := events.NewEvent(singleArgCaller)
-```
-
-3. After creating the event, handlers (or callbacks) can be attached to it. An event can have multiple callbacks, however they all need to share the same interface.
- One thing to note, is that functions are not passed directly - first they are wrapped into a `events.Closure` object like in the example below.
-
-```go
-ThisEvent.Attach(events.NewClosure(func (arg *Plugin) {
- // do something
-}))
-```
-
-4. In order to trigger the event with some parameters we need to run the `.Trigger` method on the event object with parameters that handler functions will receive:
-
-```go
-somePlugin Plugin
-ThisEvent.Trigger(&somePlugin)
-```
diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/object_storage.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/object_storage.md
deleted file mode 100644
index dda51c0b224..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/implementation_design/object_storage.md
+++ /dev/null
@@ -1,210 +0,0 @@
----
-description: ObjectStorage is used as a base data structure for many data collection elements such as `conflictStorage`, `conflictStorage`, `blockStorage` amongst others.
-image: /img/logo/goshimmer_light.png
-keywords:
- - storage
- - dynamic creation
- - database
- - parameters
- - object types
- - stream of bytes
- - cached
----
-
-# Object Storage
-
-In GoShimmer `ObjectStorage` is used as a base data structure for many data collection elements such as `conflictStorage`, `conflictStorage`, `blockStorage` and others.
-It can be described by the following characteristics, it:
-
-- is a manual cache which keeps objects in memory as long as consumers are using it
-- uses key-value storage type
-- provides mutex options for guarding shared variables and preventing changing the object state by multiple goroutines at the same time
-- takes care of dynamic creation of different object types depending on the key, and the serialized data it receives through the utility `objectstorage.Factory`
-- helps with the creation of multiple `ObjectStorage` instances from the same package and automatic configuration.
-
-In order to create an object storage we need to provide the underlying `kvstore.KVStore` structure backed by the database.
-
-## Database
-
-GoShimmer stores data in the form of an object storage system. The data is stored in one large repository with flat structure. It is a scalable solution that allows for fast data retrieval because of its categorization structure.
-
-Additionally, GoShimmer leaves the possibility to store data only in memory that can be specified with the parameter `CfgDatabaseInMemory` value. In-memory storage is purely based on a Go map, package `mapdb` from hive.go.
-For the persistent storage in a database it uses `RocksDB`. It is a fast key-value database that performs well for both reads and writes simultaneously that was chosen due to its low memory consumption.
-
-Both solutions are implemented in the `database` package, along with prefix definitions that can be used during the creation of new object storage elements.
-
-The database plugin is responsible for creating a `store` instance of the chosen database under the directory specified with `CfgDatabaseDir` parameter. It will manage a proper closure of the database upon receiving a shutdown signal. During the start configuration, the database is marked as unhealthy, and it will be marked as healthy on shutdown. Then the garbage collector is run and the database can be closed.
-
-## ObjectStorage
-
-Assume we need to store data for some newly created object `A`. Then we need to define a new prefix for our package in the `database` package, and prefixes for single storage objects. They will be later used during `ObjectStorage` creation. A package prefix will be combined with a store specific prefix to create a specific realm.
-
-```Go
-package example
-
-type Storage struct {
- A *generic.ObjectStorage
- ...
- shutdownOnce sync.Once
-}
-```
-
-### ObjectStorage Factory
-
-To easily create multiple storage objects instances for one package, the most convenient way is to use the factory function.
-
-```Go
-osFactory := objectstorage.NewFactory(store, database.Prefix)
-```
-
-It needs two parameters:
-
-- `store` - the key value `kvstore` instance
-- `database.Prefix` - a prefix defined in the `database` package for our new `example` package. It will be responsible for automatic configuration of the newly provided `kvstore` instance.
-
-After defining the storage factory for the group of objects, we can use it to create an `*objectstorage.ObjectStorage` instance:
-
-```Go
-AStorage = osFactory.New(objPrefix, FromObjectStorage)
-AStorage = osFactory.New(objPrefix, FromObjectStorage, optionalOptions...)
-```
-
-For the function parameter we should provide:
-
-- `objPrefix` - mentioned before, we provide the object specific prefix.
-- `FromObjectStorage` - a function that allows the dynamic creation of different object types depending on the stored data.
-- `optionalOptions` - an optional parameter provided in the form of options array `[]objectstorage.Option`. All possible options are defined in `objectstorage.Options`. If we do not specify them during creation, the default values will be used, such as enabled persistence or setting cache time to 0.
-
-### StorableObject
-
-`StorableObject` is an interface that allows the dynamic creation of different object types depending on the stored data. We need to make sure that all methods required by the interface are implemented to use the object storage factory.
-
-- `SetModified` - marks the object as modified, which will be written to the disk (if persistence is enabled).
-- `IsModified` - returns true if the object is marked as modified
-- `Delete` - marks the object to be deleted from the persistence layer
-- `IsDeleted` - returns true if the object was marked as deleted
-- `Persist` - enables or disables persistence for this object
-- `ShouldPersist` - returns true if this object is going to be persisted
-- `Update` - updates the object with the values of another object - requires an explicit implementation
-- `ObjectStorageKey` - returns the key that is used to store the object in the database - requires an explicit implementation
-- `ObjectStorageValue` - marshals the object data into a sequence of bytes that are used as the value part in the object storage - requires an explicit implementation
-
-Most of these have their default implementation in `objectstorage` library, except from `Update`, `ObjectStorageKey`, `ObjectStorageValue` which need to be provided.
-
-### StorableObjectFactory Function
-
-The function `ObjectFromObjectStorage` from object storage provides functionality to restore objects from the `ObjectStorage`. By convention the implementation of this function usually follows the schema:
-`ObjectFromObjectStorage` uses `ObjectFromBytes`
-
-```Go
-func ObjectFromObjectStorage(key []byte, data []byte) (result StorableObject, err error) {
- result, err := ObjectFromBytes(marshalutil.New(data))
- ...
- return
-}
-```
-
-`ObjectFromBytes` unmarshals the object sequence of bytes with a help of `marshalutil` library. The returned `consumedBytes` can be used for the testing purposes.
-The created `marshalUtil` instance stores the stream of bytes and keeps track of what has been already read (`readOffset`).
-
-```Go
-func ObjectFromBytes(bytes []byte) (object *ObjectType, consumedBytes int, err error) {
- marshalUtil := marshalutil.New(bytes)
- if object, err = ObjectFromMarshalUtil(marshalUtil); err != nil {
- ...
- consumedBytes = marshalUtil.ReadOffset()
- return
-}
-```
-
-The key logic is implemented in `ObjectFromMarshalUtil` that takes the marshaled object and transforms it into the object of specified type.
-Because the data is stored in a sequence of bytes, it has no information about the form of an object and any data types it had before writing to the database.
-Thus, we need to serialize any data into a stream of bytes in order to write it (marshaling), and deserialize the stream of bytes back into correct data structures when reading it (unmarshaling).
-Let's consider as an example, unmarshaling of the `Child` object.
-
-```Go
-type Child struct {
- childType ChildType // 8 bytes
- referencedBlockID BlockID // 32 bytes
- childBlockID BlockID // 32 bytes
-}
-```
-
-The order in which we read bytes has to reflect the order in which it was written down during marshaling. As in the example, the order: `referencedBlockID`, `childType`, `childBlockID` is the same in both marshalling and unmarshalling.
-
-```Go
-// Unmarshalling
-func ChildFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (result *Child) {
- result = &Child{}
- result.referencedBlockID = BlockIDFromMarshalUtil(marshalUtil)
- result.childType = ChildTypeFromMarshalUtil(marshalUtil)
- result.childBlockID = BlockIDFromMarshalUtil(marshalUtil)
- return
-}
-// Marshalling
-func (a *Child) ObjectStorageChild() []byte {
- return marshalutil.New().
- Write(a.referencedBlockID).
- Write(a.childType).
- Write(a.childBlockID).
- Bytes()
-}
-```
-
-We continue to decompose our object into smaller pieces with help of `MarshalUtil` struct that keeps track of bytes, and a read offset.
-Then we use `marshalutil` build in methods on the appropriate parts of the byte stream with its length defined by the data
-type of the struct field. This way, we are able to parse bytes to the correct Go data structure.
-
-### ObjectStorage Methods
-
-After defining marshalling and unmarshalling mechanism for`objectStorage` bytes conversion,
-we can start using it for its sole purpose, to actually store and read the particular parts of the project elements.
-
-- `Load` allows retrieving the corresponding object based on the provided id. For example, the method on the block `objectStorage`
- is getting the cached object.
-- To convert an object retrieved in the form of a cache to its own corresponding type, we can use `Unwrap`.
- In the code below it will return the block wrapped by the cached object.
-- `Exists` - checks weather the object has been deleted. If so it is released from memory with the `Release` method.
-
- ```Go
- func (s *Storage) Block(blockID BlockID) *CachedBlock {
- return &CachedBlock{CachedObject: s.blockStorage.Load(blockID[:])}
- }
-
- cachedBlock := blocklayer.Tangle().Storage.Block(blkID)
- if !cachedBlock.Exists() {
- blkObject.Release()
- }
- block := cachedBlock.Unwrap()
- ```
-
-- `Consume` will be useful when we want to apply a function on the cached object. `Consume` unwraps the `CachedObject` and passes a type-casted version to the consumer function.
- Right after the object is consumed and when the callback is finished, the object is released.
-
- ```Go
- cachedBlock.Consume(func(block *tangle.Block) {
- doSomething(block)
- })
- ```
-
-- `ForEach` - allows to apply a `Consumer` function for every object residing within the cache and the underlying persistence layer.
- For example, this is how we can count the number of blocks.
-
- ```Go
- blockCount := 0
- blockStorage.ForEach(func(key []byte, cachedObject generic.CachedObject) bool {
- cachedObject.Consume(func(object generic.StorableObject) {
- blockCount++
- })
- }
- ```
-
-- `Store` - storing an object in the objectStorage. An extended version is method `StoreIfAbsent`
- that stores an object only if it was not stored before and returns boolean indication if the object was stored.
- `ComputeIfAbsent` works similarly but does not access the value log.
-
- ```Go
- cachedBlock := blockStorage.Store(newBlock)
- cachedBlock, stored := blockStorage.StoreIfAbsent(newBlock)
- cachedBlock := blockStorage.ComputeIfAbsent(newBlock, remappingFunction)
- ```
diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/packages_plugins.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/packages_plugins.md
deleted file mode 100644
index 70e1a4a3cbd..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/implementation_design/packages_plugins.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-description: GoShimmer uses the adapter design pattern to easily switch between different implementations and internal interfaces just by using a different plugin, without having to rewrite the code using it.
-image: /img/logo/goshimmer_light.png
-keywords:
- - dependency
- - plugins
- - plugin system
- - code
- - internal logic
- - package
- - adapter design pattern
- - adapter
- - circular dependency
----
-
-# Dependency of Packages and Plugins
-
-In GoShimmer, new features are added through the [plugin system](plugin.md).
-When creating a new plugin, it must implement an interface shared with all other plugins, so it's easy to add new
-plugins and change their internal implementation without worrying about compatibility.
-Because of this, to make the code clean and easily manageable the plugin's internal logic has to be implemented in a different package.
-This is an example of an [adapter design pattern](https://en.wikipedia.org/wiki/Adapter_pattern) that is often used in plugin systems.
-It's really useful in a prototype software like GoShimmer, because it's possible to easily switch between different implementations
-and internal interfaces just by using a different plugin, without having to rewrite the code using it.
-
-When creating a new plugin, the logic should be implemented in a separate package stored in the `packages/` directory.
-The package should contain all struct and interface definitions used, as well as the specific logic.
-It should not reference any `plugin` packages from the `plugin/` directory as this could lead to circular dependencies between packages.
-
-There are no special interfaces or requirements that packages in the `packages/` directory are forced to follow. However, they should be independent of other packages if possible,
-to avoid problems due to changing interfaces in other packages.
diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/plugin.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/plugin.md
deleted file mode 100644
index 20243e3e8c7..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/implementation_design/plugin.md
+++ /dev/null
@@ -1,202 +0,0 @@
----
-description: The Plugin system allows to quickly and easily add and remove modules that need to be started. In GoShimmer, this is taken to an extreme, everything is run through plugins.
-image: /img/logo/goshimmer_light.png
-keywords:
- - plugin
- - events
- - configure
- - event handlers
- - handler function
----
-
-# Plugin System
-
-GoShimmer is a complex application that is used in a research environment where requirements often changed and new ideas arise.
-The Plugin system allows to quickly and easily add and remove modules that need to be started. However, one thing that might be non-intuitive about the use of plugins is that it's taken to an extreme - everything is run through plugins.
-The only code that is not executed through a plugin system is the code responsible for configuring and starting the plugins.
-All new future features added to the GoShimmer must be added by creating a new plugin.
-
-## Plugin Structure
-
-`Plugin` structure is defined as following.
-
-```go
-type Plugin struct {
- Node *Node
- Name string
- Status int
- Events pluginEvents
- wg *sync.WaitGroup
-}
-```
-
-Below is a brief description of each field:
-
-- `Node` - contains a pointer to `Node` object which contains references to all the plugins and node-level logger. #TODO: figure out why it is there - not really used anywhere
-- `Name` - descriptive name of the plugin.
-- `Status` - flag indicating whether plugin is enabled or disabled.
-- `Events` - structure containing events used to properly deploy the plugin. Details described below.
-- `wg` - a private field containing WaitGroup. #TODO: figure out why it is there - not really used anywhere
-
-## Plugin Events
-
-Each plugin defines 3 events: `Init`, `Configure`, `Run`.
-Those events are triggered during different stages of node startup, but the plugin doesn't have to define handlers for all of those events in order to do what it's been designed for.
-Execution order and purpose of each event is described below:
-
-1. `Init` - is triggered almost immediately after a node is started. It's used in plugins that are critical for GoShimmer such as reading config file or initializing global logger. Most plugins don't need to use this event.
-2. `Configure` - this event is used to configure the plugin before it is started. It is used to define events related to internal plugin logic or initialize objects used by the plugin.
-3. `Run` - this event is triggered as the last one. The event handler function contains the main logic of the plugin.
- For many plugins, the event handler function creates a separate worker that works in the background, so that the handler function for one plugin can finish and allow other plugins to be started.
-
-Each event could potentially have more than one handler, however currently all existing plugins follow a convention where each event has only one handler.
-
-It is important to note that each event is triggered for all plugins sequentially, so that the event `Init` is triggered for all plugins, then `Configure` is triggered for all plugins and finally `Run`.
-Such order is crucial, because some plugins rely on other plugins' initialization or configuration. The order in which plugins are initialized, configured and run is also important and this is described below.
-
-Handler functions for all plugin events share the same interface, so they could potentially be used interchangeably. Sample handler functions look like this:
-
-```go
-func configure(_ *node.Plugin) {
- // configure stuff
-}
-
-func run(*node.Plugin) {
- // run plugin
-}
-```
-
-The handler functions receive one argument of type `*Plugin`. The code responsible for triggering those events passes a pointer to the plugin object itself.
-The object needs to be passed so that the handler function can access plugin fields (e.g. plugin name to configure logger).
-
-## Creating a New Plugin
-
-A plugin object can be created by calling the `node.NewPlugin` method.
-The method creates and returns a new plugin object, as well as registers it so that GoShimmer knows the plugin is available.
-It accepts the following arguments:
-
-- `name string` - plugin name.
-- `status int` - flag indicating whether plugin is enabled or disabled by default. This can be overridden by enabling/disabling the plugin in the external configuration file. Possible values: `node.Enabled`, `node.Disabled`.
-- `callbacks ...Callback` - list of event handler functions. The method will correctly create a plugin when passing up to 2 callbacks. Note: `type Callback = func(plugin *Plugin)`, which is a raw function type without being wrapped in `events.Closure`.
-
-There is a couple of ways that the method can be called, depending on which plugin events need to be configured.
-
-- Define `Configure` and `Run` event handlers. It's the most common usage that plugins currently use.
-
-```go
-plugin = node.NewPlugin(PluginName, node.Enabled, configure, run)
-```
-
-- Define only `Configure` event. It's used for plugins that are used to configure objects used (or managed) by other plugins, such as creating API endpoints.
-
-```go
-plugin = node.NewPlugin(PluginName, node.Enabled, configure)
-```
-
-- Define a plugin without `Configure` or `Run` event handlers. This is used to create plugins that perform some action when the `Init` event is triggered.
-
-```go
-plugin = node.NewPlugin(PluginName, node.Enabled)
-```
-
-However, the `Init` event handler cannot be attached using the `node.NewPlugin` method.
-In order to specify this handler, plugin creator needs to attach it manually to the event, for example inside the package's `init()` method in the file containing the rest of the plugin definition.
-
-```go
-func init() {
- plugin.Events.Init.Attach(events.NewClosure(func(*node.Plugin) {
- // do something
- }))
-}
-```
-
-It's important to note, that the `node.NewPlugin` accepts handler functions in a raw format, that is, without being wrapped by the `events.Closure` object as the method does the wrapping inside.
-However, when attaching the `Init` event handler manually, it must be wrapped by the `events.Closure` object.
-
-It's crucial that each plugin is created only once and `sync.Once` class is used to guarantee that. Contents of a file containing sample plugin definition is presented. All plugins follow this format.
-
-```go
-const PluginName = "SamplePlugin"
-
-var (
- // plugin is the plugin instance of the new plugin plugin.
- plugin *node.Plugin
- pluginOnce sync.Once
-)
-
-// Plugin gets the plugin instance.
-func Plugin() *node.Plugin {
- pluginOnce.Do(func() {
- plugin = node.NewPlugin(PluginName, node.Enabled, configure, run)
- })
- return plugin
-}
-
-// Handler functions
-func init() {
- plugin.Events.Init.Attach(events.NewClosure(func(*node.Plugin) {
- // do something
- }))
-}
-func configure(_ *node.Plugin) {
- // configure stuff
-}
-
-func run(*node.Plugin) {
- // run stuff
-}
-```
-
-## Running a New Plugin
-
-In order to correctly add a new plugin to GoShimmer, apart from defining it, it must also be passed to the `node.Run` method.
-Because there are plenty of plugins, in order to improve readability and make managing plugins easier, they are grouped into separate wrappers passed to the `node.Run` method.
-When adding a new plugin, it must be added into one of those groups, or a new group must be created.
-
-```go
-node.Run(
- plugins.Core,
- plugins.Research,
- plugins.UI,
- plugins.WebAPI,
-)
-```
-
-You can add a plugin simply by calling the `Plugin()` method of the newly created plugin and passing the argument further. An example group definition is presented below. When it's added, the plugin is correctly added and will be run when GoShimmer starts.
-
-```go
-var Core = node.Plugins(
- banner.Plugin(),
- newPlugin.Plugin(),
- // other plugins ommited
-)
-```
-
-## Background workers
-
-In order to run plugins beyond the scope of the short-lived `Run` event handler, possibly multiple `daemon.BackgroundWorker` instances can be started inside the handler function.
-This allows the `Run` event handler to finish quickly, and the plugin logic can continue running concurrently in a separate goroutine.
-
-Background worker can be started by running the `daemon.BackgroundWorker` method, which accepts following arguments:
-
-- `name string` - background worker name
-- `handler WorkerFunc` - long-running function that will be started in its own goroutine. It accepts a single argument of type `<-chan struct{}`. When something is sent to that channel, the worker will shut down. Note: `type WorkerFunc = func(shutdownSignal <-chan struct{})`
-- `order ...int` - value used to define in which shutdown order this particular background worker must be shut down (higher = earlier).
- The parameter can either accept one or zero values, more values will be ignored. When passing zero values, default value of `0` is assumed.
- Values are normalized in the `github.com/iotaledger/goshimmer/packages/shutdown` package, and it should be used instead of passing integers manually.
- Correct shutdown order is as important as correct start order, because different plugins depend on others working correctly, so when one plugin shuts down too soon, other plugins may run into errors, crash and leave an incorrect state.
-
-An example code for creating a background worker:
-
-```go
-func start(shutdownSignal <-chan struct{}) {
- // long-running function
- // possibly start goroutines here
- // wait for shutdown signal
- <-shutdownSignal
-}
-
-if err := daemon.BackgroundWorker(backgroundWorkerName, start, shutdown.PriorityGossip); err != nil {
- log.Panicf("Failed to start as daemon: %s", err)
-}
-```
diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/advanced_outputs.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/advanced_outputs.md
deleted file mode 100644
index 04d6f98fc03..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/advanced_outputs.md
+++ /dev/null
@@ -1,337 +0,0 @@
----
-description: IOTA strives to provide output types beyond the basic functionality of a cryptocurrency application such as Smart Contracts.
-image: /img/protocol_specification/bob_alias.png
-keywords:
- - smart contract chain
- - state metadata
- - state controller
- - governance controller
- - alias
- - smart contract
- - transactions
- - NFT
----
-
-# UTXO Output Types
-
-## Motivation
-
-In the previous [section](ledgerstate.md) two basic output types were introduced that enable the use of the UTXO ledger
-as a payment application between addresses. Each `SigLockedSingleOutput` and `SigLockedAssetOutput` encodes a list of
-balances and an address in the output. The output can be unlocked by providing a valid signature for the address, hence
-only the owner of the address can initiate a payment.
-
-While these two output types provide the basic functionality for a cryptocurrency application, IOTA aims to strive
-for more. The first and foremost application the UTXO ledger should support besides payments is the IOTA Smart Contract
-Protocol (ISCP). Due to the lack of total ordering of the Tangle (that is a direct result of the scalable, parallel
-architecture), it is not possible to implement Turing-complete smart contracts directly on layer 1. Therefore,
-IOTA aims to develop a layer 2 protocol called ISCP for smart contracts.
-
-After carefully evaluating the proposed architecture of ISCP and the required properties of the layer 2 protocol, we
-came up with special types of outputs for layer 1 UTXO support: `AliasOutput` and `ExtendedLockedOutput`.
-These output types are experimental: the IOTA 2.0 DevNet serves as their testing ground. Bear in mind that there is no
-guarantee that they will not change as the protocol evolves.
-
-It will be demonstrated later that these outputs can also be used for enhanced cryptocurrency payment application, such
-as conditional payments or time locked sending, but also open up the world of native non-fungible tokens (NFTs).
-
-## Functional Requirements of ISCP
-
-Designing the output types starts with a proper requirement analysis. Below you can read the summary of the functional
-requirements imposed by the layer 2 smart contract protocol. You can read more about ISCP
-[here](https://blog.iota.org/an-introduction-to-iota-smart-contracts-16ea6f247936/),
-[here](https://blog.iota.org/iota-smart-contracts-protocol-alpha-release/)
-or check out this [presentation](https://youtu.be/T1CJFr6gz8I).
-
-- Smart contract chains need a globally unique account in the UTXO ledger, that does not change if the controlling entities changes.
-- An account state is identified by balances and state metadata.
-- Two levels of control: **state controller** and **governance controller**.
-- State controller can change state metadata (state transition) and balance (min required).
-- Governance controller can change state controller and governance controller.
-- An account shall have only one valid state in the ledger.
-- Smart contract chain state transitions are triggered by requests in the ledger.
-- A request is a ledger entity belonging to the account with tokens and data.
-- The account can identify and control requests.
-- Fallback mechanism needs to be in place in case the requests are not picked up.
-- When request is completed in a state transition, it should be atomically removed from the ledger.
-
-## Output Design
-
-### Introducing Alias Account
-
-Previously, the account concept in the ledger was realized with cryptographic entities called addresses, that are backed
-by public and private key pairs. Addresses are present in the ledger through outputs and define who can spend this
-output by providing a digital signature.
-
-Addresses are not able to provide the necessary functionality needed for smart contract chain accounts, because:
-
-- addresses change with the rotation of the controlling body (committee),
-- and there is no notion of separate control levels for an address account.
-
-We define a new account type in the ledger, called **Alias**, to represent smart contract chain accounts. An alias
-account can hold token balances, but also has state metadata, which stores the state of the smart contract chain.
-The alias account defines two to controlling entities: a state controller and a governance controller. The state
-controller can transition the account into a new state, and can manipulate account balances. The governance controller
-can change the state controller or the governance controller.
-
-An alias is not a cryptographic entity, but it is controlled via either regular addresses or other aliases.
-
-### Representing a Smart Contract Chain Account in Ledger
-
-An alias is translated into the ledger as a distinct output type, called **AliasOutput**. The output contains:
-
-- the unique identifier of the alias, called **AliasID**,
-- the **State Controller** entity,
-- **State Metadata**,
-- the **Governance Controller**,
-- **Governance Metadata**,
-- **Immutable Metadata**,
-- and token **balances**.
-
-The state controller and governance controller entities can either be private key backed addresses (cryptographic
-entities) or `AliasAddress`, that is the unique identifier of another alias. Note, that an alias cannot be controlled by
-its own `aliasID`.
-
-An alias output itself can be regarded as a non-fungible token with a unique identifier `aliasID`, metadata and token
-balances. An NFT that can hold tokens, can control its metadata and has a governance model.
-
-Alias output can be created in a transaction that spends the minimum required amount of tokens into a freshly created
-alias output. The new transaction output specifies the state and governance controller next to the balances, but aliasID
-is assigned by the protocol once the transaction is processed. Once the output is booked, aliasID becomes the hash of
-the outputID that created it.
-
-An alias output can only be destroyed by the governance controller by simply consuming it as an input but not creating
-a corresponding output in the transaction.
-
-The alias account is transitioned into a new state by spending its alias output in a transaction and creating an
-updated alias output with the same aliasID. Depending on what unlocking conditions are met, there are certain
-restrictions on how the newly created alias output can look like.
-
-### Consuming an Alias Output
-
-As mentioned above, an alias output can be unlocked by both the state controller and the governance controller.
-
-#### Unlocking via State Controller
-
-When the state controller is an address, the alias output is unlocked by providing a signature of the state controller
-address in the output that signs the essence of the transaction. When state controller is another alias, unlocking is
-done by providing a reference to the state controller unlocked other alias within the transaction.
-
-When an alias output is unlocked as input in a transaction by the state controller, the transaction must contain a
-corresponding alias output. Only the state metadata and the token balances of the alias output are allowed to change,
-and token balances must be at least a protocol defined constant.
-
-#### Unlocking via governance controller
-
-The governance controller is either an address, or another alias. In the former case, unlocking is done via the regular
-signature. In the latter case, unlocking is done by providing a reference to the unlocked governance alias within the
-transaction.
-
-When an alias output is unlocked as input by the governance controller, the transaction doesn't need to have a
-corresponding output. If there is no such output in the transaction, the alias is destroyed. If however the output
-is present, only the state and governance controller fields are allowed to be changed.
-
-A governance controller therefore can:
-
-- destroy the alias all together,
-- assign the state controller of the alias,
-- assign the governance controller of the alias.
-
-## Locking Funds Into Aliases
-
-Address accounts in the ledger can receive funds by the means of signature locking. Outputs specify an address field,
-which essentially gives the control of the funds of the output to the owner of the address account, the holder of the
-corresponding private key.
-
-In order to make alias accounts (smart contract chains) able to receive funds, we need to define a new fund locking
-mechanism, called alias locking. An alias locked output can be unlocked by unlocking the given alias output for
-state transition in the very same transaction.
-
-An alias account (smart contract chain) can receive funds now, but there are additional requirements to be satisfied
-for smart contracts:
-
-- Alias locked outputs represent smart contract requests, and hence, need to contain metadata that is interpreted on
- layer 2.
-- A dormant smart contract chain might never consume alias locked outputs, therefore, there needs to be a fallback
- mechanism for the user to reclaim the funds locked into the request.
-- Requests might be scheduled by the user by specifying a time locking condition on the output. The output can not be
- spent before the time locking period expires.
-
-As we can see, there are couple new concepts regarding outputs that we need to support for the smart contract use case:
-
-- **alias locking**
-- **metadata tied to output**
-- **fallback unlocking mechanism**
-- **time locking**
-
-In the next section, we are going to design an **Extended Output** model that can support these concepts.
-
-## Extended Output
-
-An extended output is an output that supports alias locking, output metadata, fallback unlocking mechanisms and time
-locking. The structure of an extended output is as follows:
-
-Extended Output:
-
-- **AliasID**: the alias account that is allowed to unlock this output.
-- **Token Balances**: tokens locked by the output.
-- **Metadata**: optional, bounded size binary data.
-- **FallbackAccount**: an alias or address that can unlock the output after **FallbackDeadline**.
-- **FallbackDeadline**: a point in time after which the output might be unlocked by **FallbackAccount**.
-- **Timelock** (Optional): a point in time. When present, the output can not be unlocked before.
-
-### Unlocking via AliasID
-
-The extended output can be unlocked by unlocking the alias output with aliasID by the state controller within the same
-transaction. The unlock block of an extended output then references the unlock block of the corresponding alias output.
-
-Aliases abstract away the underlying address of a smart contract committee, so when a committee is rotated, `aliasID`
-stays the same, but the address where the alias points to can be changed.
-
-It is trivial then to define the unique account of a smart contract on layer 1 as the `aliasID`, however, a new locking
-mechanism is needed on the UTXO layer to be able to tie funds to an alias.
-
-Previously, only addresses defined accounts in the protocol. Funds can be locked into addresses, and a signature of the
-respective address has to be provided in the transaction to spend funds the account.
-
-With the help of aliases, it is possible to extend the capabilities of the protocol to support locking funds into
-aliases. This is what we call alias locking. An alias locked output specifies an `aliasID` that can spend the funds
-from this output. The owner of the alias account can spend aforementioned alias locked outputs by unlocking/moving the
-alias in the very same transaction. We will use the term `ExtendedLockedOutput` for outputs that support alias locking.
-
-Let's illustrate this through a simple example. Alice wants to send 10 IOTA to Bob's alias account. Bob then wants to
-spend the 10 IOTA from his alias account to his address account.
-
-1. Bob creates an alias where `aliasID=BobAliasID` with Transaction A.
-
-[![Bob creates an alias](/img/protocol_specification/bob_alias.png 'Bob creates an alias')](/img/protocol_specification/bob_alias.png)
-
-2. Bob shares `BobAliasID` with Alice.
-3. Alice sends 10 IOTA to Bob by sending Transaction B that creates an `ExtendedLockedOutput`, specifying the balance,
- and `aliasID=BobAliasID`.
-
-[![Alice sends 10 IOTA to Bob](/img/protocol_specification/alice_sends_10_mi.png 'Alice sends 10 IOTA to Bob')](/img/protocol_specification/alice_sends_10_mi.png)
-
-4. Bob can spend the outputs created by Alice by creating Transaction C that moves his `BobAlias` (to the very same
- address), and including the `ExtendedLockedOutput` with `aliasID=BobAliasID`.
-
-[![Bob can spend the outputs created by Alice by creating Transaction C](/img/protocol_specification/bob_can_spend_outputs_created_by_alice.png 'Bob can spend the outputs created by Alice by creating Transaction C')](/img/protocol_specification/bob_can_spend_outputs_created_by_alice.png)
-
-In a simple scenario, a user wishing to send a request to a smart contract creates an extended output. The output
-contains the AliasID of the smart contract chain account, the layer 2 request as metadata, and some tokens to pay
-for the request. Once the transaction is confirmed, the smart contract chain account "receives" the output. It
-interprets the request metadata, carries out the requested operation in its chain, and submits a transaction that
-contains the updated smart contract chain state (alias output), and also spends the extended output to increase
-the balance of its alias output.
-
-What happens when the smart contract chain goes offline or dies completely? How do we prevent the extended output to
-be lost forever?
-
-### Unlocking via Fallback
-
-Extended outputs can also define a fallback account and a fallback deadline. After the fallback deadline, only the
-fallback account is authorized to unlock the extended output. Fallback deadline cannot be smaller than a protocol
-wide constant to give enough time to the smart contract chain to pick up the request.
-
-Fallback unlocking can either be done via signature unlocking or alias unlocking, depending on the type of account
-specified.
-
-### Timelock
-
-Timelocking outputs is a desired operation not only for smart contracts, but for other use cases as well. A user might
-for example scheduled a request to a smart contract chain at a later point in time by timelocking the extended output
-for a certain period.
-
-Timelocks can be implemented quite easily if transactions have enforced timestamps: the output can not be unlocked if
-the transaction timestamp is before the timelock specified in the output.
-
-## Notes
-
-One of the most important change that the new output types imply is that checking the validity of an unlock block of a
-certain consumed input has to be done in the context of the transaction. Previously, an unlock block was valid if the
-provided signature was valid. Now, even if the signature is valid for an alias output unlocked for state transition,
-additional constraints also have to be met.
-
-## How Does It Work for ISCP?
-
-- The new output types are completely orthogonal to colored coins, ISCP will not rely on them anymore.
-- The Alias output functions as a chain constraint to allow building a non-forkable chain of transactions in the
- ledger by the state controller. The alias output holds tokens, that are the balance of the smart contract chain.
- The hash of the smart contract chain state is stored in the alias output, registering each state transition as a
- transaction on the ledger.
-- The governance controller of an alias output can change the state controller, meaning that a committee rotation can
- be carried out without changing the smart contract chain account, aliasID.
- - A smart contract chain can be self governed, if the state and governance controllers coincide.
- - A smart contract chain can be governed by an address account, or by another smart contract chain through an
- alias account.
-- Each Extended Output is a request which is “sent” to the alias account. The ISCP can retrieve the backlog of
- requests by retrieving all outputs for the aliasID. Consuming the Extended Output means it is atomically removed
- from the backlog. It can only be done by the state controller, i.e. the committee of the smart contract chain.
-- Fallback parameters prevent from losing funds if the committee is inactive for some timeout. After timeout the
- Extended Output can be unlocked by FallbackAccount, an address or another alias.
-
-## Additional Use Cases
-
-### Delegated Keys
-
-An alias output is controlled by two parties: the state controller and the governance controller. The state controller
-can only change the state metadata and the tokens when spending the output, therefore it only has the right to move the
-alias to the very same account in a transaction. The governance controller however can change the state controller, or
-destroy the alias and hence release the funds locked into it.
-
-This makes it an ideal candidate for mana delegation, that is a crucial part of a mana marketplace. In Coordidice,
-moving funds generate access and consensus mana. Alias outputs make it possible to delegate the right to move funds
-without losing control over them.
-
-1. An account owning funds create an alias output and locks funds into it. The governance controller of the alias output
- shall be `ownAccount`.
-2. An entity in need of mana generated by the locked funds can purchase the right from the governance controller to
- move the alias output, generating mana.
-3. Once purchased, the governance controller updates the alias output by specifying the state controller to be
- `buyerAccount`.
-4. `buyerAccount` now can move the alias output, but only to its own account. Each move generates (access) mana.
-5. Since `ownAccount` is the governance controller, it can revoke `buyerAccount`'s state controlling right at any point
- in time.
-6. `ownAccount` can also destroy the alias and "free" the locked funds.
-
-Notes:
-
-- The state controller can redeem funds from the alias output up to the point where only `minimum allowed amount` is
- present in the alias output. Therefore, without additional mechanism, it would only make sense to lock
- `minimum allowed amount` into an alias by the governance controller. This is obviously a drawback, users should not
- be restricted in how many funds they would like to delegate.
-- A governance controller can destroy the alias output at any time, which is not desired from the buyer perspective.
- The buyer should be able to buy the right to move the funds for a pre-defined amount of time.
-
-To solve above problems, the `AliasOutput` currently implemented in GoShimmer supports the delegation use case by
-introducing two new fields in the output:
-
-- `isDelegated` and
-- `delegationTimelock`.
-
-When an alias is delegated, the state controller cannot modify token balances, and the governor can destroy the
-output with any balance. However, when delegation time lock is present, the governor is not allowed to unlock the
-output until the delegation time expires.
-
-### Non-Fungible Tokens
-
-NFTs are unique tokens that have metadata attached to them. Since an AliasOutput implements a chain constraint in the
-UTXO ledger, it is perfectly suited to represent NFTs. The unique identifier of the NFT is the `aliasID` or `AliasAddress`.
-The `Immutable Data` field of the output can only be defined upon creation and can't be changed afterward, therefore
-it is perfect to store metadata belonging to the NFT.
-
-The ID of an IOTA NFT is also a valid address, therefore the NFT itself can receive and manage funds and other NFTs as
-well. Refer to the [cli-wallet tutorial](../../tutorials/wallet_library.md) for an overview of what you can do with an NFT.
-
-Interestingly, minting an IOTA NFT costs you only the minimum required deposit balance (0.0001 IOTA at the moment), which
-you can take back when you destroy the NFT. This is required so that NFTs are not minted out of thin air, and there are
-some IOTAs backing the output. Otherwise, the ledger database could be easily spammed.
-Transferring NFTs is also feeless, just like any other transaction in IOTA.
-
-## GoShimmer Implementation
-
-If you are interested, you can find the GoShimmer implementation of the new output types in
-[output.go](https://github.com/iotaledger/goshimmer/blob/develop/packages/protocol/engine/ledger/vm/devnetvm/output.go):
-
-- [AliasOutput](https://github.com/iotaledger/goshimmer/blob/develop/packages/protocol/engine/ledger/vm/devnetvm/output.go#L598) and
-- [ExtendedLockedOutput](https://github.com/iotaledger/goshimmer/blob/develop/packages/protocol/engine/ledger/vm/devnetvm/output.go#L1582)
diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/autopeering.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/autopeering.md
deleted file mode 100644
index c7a941e69ac..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/autopeering.md
+++ /dev/null
@@ -1,232 +0,0 @@
----
-description: An IOTA node needs to discover and maintain a list of the reachable IP addresses of other peers. Nodes need to be kept up-to-date about the ledger state, thus they exchange information with each other.
-image: /img/protocol_specification/peer_discovery.png
-keywords:
- - node
- - neighbors
- - selection
- - pong
- - ping
- - peer
- - peering
- - discovery
- - request
- - accepted
- - salt update
----
-
-# Autopeering
-
-In order to establish connections, an IOTA node needs to discover and maintain a list of the reachable IP addresses of other peers. Nodes need to be kept up-to-date about the ledger state, thus they exchange information with each other. Each node establishes a communication channel with a small subset of nodes (i.e., neighbors) via a process called `peering`. Such a process must be resilient against eclipse attacks: if all of a node’s neighbors are controlled by an attacker, then the attacker has complete control over the node’s view of the Tangle. Moreover, to prevent or limit sybil-based attacks, the neighbor selection protocol makes use of a scarce resource dubbed Consensus Mana: arbitrary nodes can be created, but it is difficult to produce high mana nodes.
-
-Throughout this section the terms `Node` and `Peer` are used interchangeably to refer to a `Node` device.
-
-The usage of the _Ping_ and _Pong_ mechanism is to be considered as a bidirectional exchange similarly to how described by other standards such as [CoAP](https://core-wg.github.io/coap-sig/) and [WebSocket](https://tools.ietf.org/html/rfc6455#section-5.5.2).
-
-## Node Identities
-
-Every node has a cryptographic identity, a key on the ed25519 elliptic curve. The `blake2b` hash of the public key of the peer serves as its identifier or `node ID`.
-
-## Peer Discovery
-
-The main goal of the _Peer Discovery_ protocol is to expose an interface providing a list of all the verified peers.
-To bootstrap the peer discovery, a node _must_ be able to reach one or more entry nodes. To achieve this, the implementation of the protocol _shall_ provide a hard-coded list of trusted **entry nodes** run by the IF or by trusted community members that answer to peer discovery packets coming from new nodes joining the IOTA network. This approach is a common practice of many distributed networks [[Neudecker 2018]](https://ieeexplore.ieee.org/iel7/9739/8649699/08456488.pdf).
-Public Key-based Cryptography (PKC) _shall_ be used for uniquely [identifying](#Node_identities) peers and for authenticating each packet.
-The usage of the Ping and Pong protocols is that _Ping_ are sent to verify a given peer and, upon reception of a valid _Pong_ as a response from that peer, the peer is verified.
-Once a peer has been verified, it can be queried to discover new peers by sending a _DiscoveryRequest_. As a response, a _DiscoveryResponse_ _shall_ be returned, containing a list of new peers. The new peer nodes in this list _shall_ be verified by the receiving application.
-
-This process is summarized in the following figure and detailed in the following subsections:
-
-[![Peer discovery](/img/protocol_specification/peer_discovery.png 'Peer discovery')](/img/protocol_specification/peer_discovery.png)
-
-### Verification
-
-The verification process aims at both verifying peer identities and checking their online status. Each peer _shall_ maintain a list of all the known peers. This list _shall_ be called `known_peer_list`. Elements of any known peer list _shall_ contain a reference to a [Peer](#Peer) and a time at which it _shall_ be verified/re-verified.
-As such, the `known_peer_list` can be seen as a time-priority queue. A newly discovered peer gets added to the list at the current time. Whenever a peer is verified, its time value on the `known_peer_list` gets updated to the time at which that peer _shall_ be re-verified.
-The intent of this arrangement is to allow the node application to first verify newly discovered (and thus still unverified) peers and then to re-verify older peers (to confirm their online status) by iterating over the `known_peer_list`.
-It is worthwhile to note that the order in which the `known_peer_list` is worked through is important. For example, if the peer is added to the front ('head') of the `known_peer_list`, it is possible for an adversary to front-fill the `known_peer_list` with a selection of its own nodes. This is resolved by the use of the time-priority queue.
-
-The verification process always initiates from a _Ping_. Upon reception of a _Ping_, a peer _shall_ check its validity by:
-
-- verifying that the signature of the _Ping_ is valid and discarding the request otherwise;
-- checking that the `version` and `network_id` fields match its configuration and discarding the _Ping_ otherwise;
-- checking that the `timestamp` field is fresh (i.e., not older than a given time) and discarding the packet otherwise;
-- checking that the `dest_addr` matches its IP address and discarding the _Ping_ otherwise.
-
-Upon successful validation of a received _Ping_, a peer _shall_ respond with a _Pong_. In case the sender of the _Ping_ is a new peer from the perspective of the receiving node, the receiver peer _shall_ add it to its `known_peer_list`. This enables the verification process to also occur in the reverse direction.
-
-Upon reception of a _Pong_, a peer _shall_ check its validity by:
-
-- verifying that the signature of the _Pong_ is valid and discarding it otherwise;
-- checking that the `req_hash` field matches a request (i.e. _Ping_) previously sent and not expired (i.e., the difference between the timestamp of the _Ping_ and _Pong_ is not greater than a given threshold) and discarding the associated _Ping_ or _Pong_ otherwise;
-- checking that the `dest_addr` matches its IP address and discarding the associated _Ping_ or _Pong_ otherwise.
-
-Upon successful validation of a received _Pong_, a peer _shall_:
-
-- add the peer sender of the _Pong_ to a list of verified peers called `verified_peer_list`;
-- move the peer entry of the `known_peer_list` to the tail.
-
-### Removal
-
-While verifying a new peer, if no or an invalid _Pong_ is received after `max_verify_attempts` attempts, that node _shall_ be removed from the `known_peer_list`. Each expected reply should have a timeout such that if no answer is received after that, an attempt is considered concluded and counted as failed.
-
-Each peer on the `verified_peer_list` _shall_ be re-verified after `verification_lifetime` hours; while re-verifying a peer, if no or invalid _Pong_ is received after `max_reverify_attempts` attempts, the peer _shall_ be removed from the `verified_peer_list`.
-
-### Discovery
-
-Each peer entry of the `verified_peer_list` may be used to discover new peers. This process is initiated by sending a _DiscoveryRequest_.
-
-Upon reception of a _DiscoveryRequest_, a peer node _shall_ check its validity by:
-
-- checking that the sender of the _DiscoveryRequest_ is a verified peer (i.e. is stored in the `verified_peer_list`) and discarding the request otherwise;
-- verifying that the signature of the _DiscoveryRequest_ is valid and discarding the request otherwise;
-- checking that the `timestamp` field is fresh (i.e., not older than a given time) and discarding the request otherwise.
-
-Upon successful validation of a received _DiscoveryRequest_, a peer _shall_ reply with a _DiscoveryResponse_.
-
-Upon reception of a _DiscoveryResponse_, a peer _shall_ check its validity by:
-
-- verifying that the signature of the _DiscoveryResponse_ is valid and discarding the response otherwise;
-- checking that the `req_hash` field matches a discovery request (i.e. _DiscoveryRequest_) previously sent and not expired (i.e., the difference between the timestamp of the _DiscoveryRequest_ and _DiscoveryResponse_ is not greater than a given threshold) and discarding the response otherwise.
-
-Upon successful validation of a received _DiscoveryResponse_, a node _shall_ add the nodes contained in the `peers` field to the `known_peer_list`.
-
-## Neighbor Selection
-
-The goal of the neighbor selection is to build a node's neighborhood (to be used by the gossip protocol) while preventing attackers from “tricking” other nodes into becoming neighbors. Neighbors are established when one node sends a peering request to another node, which in turn accepts or rejects the request with a peering response.
-
-To prevent attacks, the protocol makes the peering request _verifiably random_ such that attackers cannot create nodes to which the target node will send requests. At its core, the neighbor selection protocol uses both a screening process called _Consensus Mana rank_ and a _score function_ that takes into account some randomness dubbed _private salt_ and _public salt_.
-Half of the neighbors will be constituted from nodes that accepted the peering request, while half will be constituted of nodes that will request for the peering. The two distinct groups of neighbors are consequently called:
-
-- Chosen neighbors (outbound). The peers that the node proactively selected through the neighbor selection mechanism.
-- Accepted neighbors (inbound). The peers that sent the peering request to the node and were accepted as a neighbor.
-
-### Local Variables
-
-Local variables defined here are included to help in understanding the protocol described in this section. The node application shall handle those variables in some form.
-
-- `saltUpdateInterval`: The time interval at which nodes shall update their salts.
-- `responseTimeout`: The time that node waits for a response during one peering attempt.
-- `requestExpirationTime`: The time used for the request timestamp validation, if the timestamp is older than this threshold the request is dropped
-- `maxPeeringAttempts`: The maximum number of peering requests retries sent to the selected node before the next salt update.
-
-### Mana Rank Interval
-
-Each peer discovered and verified via the _Peer Discovery_ protocol _shall_ have a consensus mana value associated with it. The peer running the _Neighbor Selection_ protocol _shall_ keep this information up-to-date and use it to update a data structure called `manaRank` containing the list of the nodes' identities for each mana value. The aim of this ranking is to select a subset of peers having similar mana to the node preparing the ranking. More specifically, let's define `potentialNeighbors` to be such a subset, that is divided into a `lower` and an `upper` set with respect to a `targetMana` value (i.e., the mana value of the node performing the ranking). By iterating over the `manaRank`, each node _shall_ fill both the `lower` and `upper` sets with nodes' identities having a similar rank to itself, not less/greater than a given threshold `rho` respectively, except when each subset does not reach the minimal size `r`.
-
-The following pseudocode describes a reference implementation of this process:
-
-```
-Inputs:
- manaRank: mapping between mana values and the list of nodes' identities with that mana;
- targetMana: the mana value of the node performing the ranking;
- rho: the ratio determining the length of the rank to consider;
- r: the minimum number of nodes' identities to return for both lower and upper sets;
- Largest(r, targetMana): the set of r largest cMana holders less than targetMana;
- Smallest(r, targetMana): the set of r smallest cMana holders greater than targetMana;
-
-Outputs:
- potentialNeighbors: the set of nodes' identities to consider for neighbor selection;
-```
-
-```vbnet
-FOR mana IN manaRank
- nodeID = manaRank[mana]
- IF mana > targetMana
- IF mana / targetMana < rho
- Append(upperSet, nodeID)
- ELSE IF mana == 0 || mana == targetMana
- BREAK
- ELSE IF targetMana / mana < rho
- Append(lowerSet, nodeID)
-
-IF Len(lowerSet) < r
- // set lowerSet with the r largest mana holders less than targetMana
- lowerSet = Largest(r, targetMana)
-
-IF Len(upperSet) < r
- // set upperSet with the r smallest mana holders greater than targetMana
- upperSet = Smallest(r, targetMana)
-
-potentialNeighbors = Append(upperSet, lowerSet)
-RETURN potentialNeighbors
-
-```
-
-### Selection
-
-The maximum number of neighbors is a parameter of the gossip protocol. This section proposes to use a size of 8 equally divided into 4 chosen (outbound) and 4 accepted (inbound) neighbors. It is crucial to decide on a fixed number of neighbors, as the constant number decreases an eclipse probability exponentially. The chosen _k_ is a compromise between having more connections resulting in lower performance and increased protection from an eclipse attack.
-
-The operations involved during neighbor selection are listed in the following:
-
-1. Get an up-to-date list of verified and known peers from the _Peer Discovery_ protocol.
-2. Use [mana rank](#Mana_rank) to filter the previous list to obtain a list of peers to be potential neighbors.
-3. Use the score function to request/accept neighbors.
-
-The score between two nodes is measured through the score function _s_, defined by:
-
-s(nodeID1, nodeID2, salt) = hash(nodeID1 || nodeID2 || salt), where:
-
-- `nodeID1` and `nodeID2` are the identities of the considered nodes.
-- `salt` is the salt value that can be private or public depending on the peering direction (inbound/outbound).
-- `hash` is the `blake2b` hash function.
-- `||` is the concatanation operation.
-
-Note that the value used as the score is an unsigned integer derived from the first 4 bytes of the byte array after the `hash` function.
-
-In order to connect to new neighbors, each node with ID `ownID` and public salt `pubSalt` keeps a list of potential neighbors derived via [Mana rank](#Mana_rank) that is sorted by their score `d(ownID, ·, pubSalt)`. Then, the node shall send peering requests in _ascending order_, containing its own current public salt and a timestamp representing the issuance time of the request.
-The connecting node shall repeat this process until it has established connections to enough neighbors or it finds closer peers. Those neighbors make up its list of chosen neighbors. This entire process is also illustrated in the following pseudocode:
-
-```
-Inputs:
- k: desired amount of neighbors;
- c: current list of chosen neighbors;
- p: list of potential peers;
- localID: local nodeID
- pubSalt: local public salt;
-```
-
-```vbnet
-pSorted = SortByScoreAsc(P, localID, pubSalt)
-FOR p IN pSorted
- peeringRequest = SendPeeringRequest(p)
- IF peeringRequest.accepted
- Append(c, p)
- IF Len(c) == Ceil(k/2)
- RETURN
-```
-
-More specifically, after sending a peering request a node _shall_:
-
-- wait to get a _Peering Response_ that could be positive or negative.
- - If positive, add the peer to its chosen neighbor list
- - If negative, filter out the peer from future requests until the next salt update or the end of the list of potential neighbors is reached.
- - If after `responseTimeout` no response is received, try again for a fixed `maxPeeringAttempts`. If not successful, filter out the peer from future requests until the next salt update or the end of the list of potential neighbors is reached.
-
-Similar to the previous case, in order to accept neighbors, every node with ID ownID _shall_ generate a private salt `privSalt`.
-
-Upon reception of a _Peering Request_, a peer _shall_ make a decision to accept, reject or discard the request by:
-
-- verifying that the signature of the _Peering Request_ is valid and discard the request otherwise;
-- checking that the `timestamp` field is valid (i.e., not older than a given threshold `requestExpirationTime` specified by the node) and discard the request otherwise;
-- checking that the _mana_ of the requester peer is within the own [Mana rank](#Mana_rank) and send back a _negative_ _Peering Response_ otherwise;
-- checking that the requestor salt matches its hash chain by:
- - taking the difference between the timestamp of the peering request and the time the initial salt was set, and then dividing this number by `saltUpdateInterval`, rounding down;
- - hashing the requester public salt as many times as the number of salt changes;
- - finally, if the result does not match the initial salt, discard the peering request;
-- applying a statistical test to the request defined as _s(remoteID, ownID, ζ_remote) < θ_ for a fixed threshold θ, and discard it otherwise.
- - this test determines the effectiveness of the brute force attack when an attacker tries to establish a connection with a desired peer;
- - with θ set to 0.01 an attacker has only 1% of chance of being successful;
-- accept the peering request by sending back a _positive_ _Peering Response_ if either one of the following conditions is satisfied, and send back a _negative_ _Peering Response_ otherwise:
- - the current size of the accepted neighbors list is smaller than _Floor(k/2)_;
- - the score defined as _s(ownID, remoteID, privSalt)_ is lower than the current highest score among accepted neighbors. In this case, send a _Peering Drop_ to drop the accepted neighbor with the highest score replaced by the requester peer.
-
-### Neighbor Removal
-
-Neighbor removal can occur for several reasons:
-
-- A node is replacing a neighbor with a better (in terms of score function) one;
-- From the gossip layer, the connection with a neighbor is lost;
-- If some form of reputation or bad behavior is being monitored, a neighbor could be dropped in case of misbehavior. For example, a node could respond to the peering request but choose not to gossip received blocks.
-
-Independently from the reason, when a peer drops a neighbor _shall_ send a _Peering Drop_ and remove the neighbor from its requested/accepted neighbor list. Upon reception of a _Peering Drop_, the peer _shall_ remove the dropping neighbor from its requested/accepted neighbor list.
diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/congestion_control.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/congestion_control.md
deleted file mode 100644
index 9c65052e4a0..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/congestion_control.md
+++ /dev/null
@@ -1,162 +0,0 @@
----
-description: Every network has to deal with its intrinsic limited resources. GoShimmer uses congestion control algorithm to regulate the influx of blocks in the network with the goal of maximizing throughput (blocks/bytes per second) and minimizing delays.
-image: /img/protocol_specification/congestion_control_algorithm_infographic_new.png
-keywords:
- - node
- - congestion control algorithm
- - honest node
- - block
- - access mana
- - malicious nde
- - scheduling
----
-
-# Congestion Control
-
-Every network has to deal with its limited intrinsic resources in bandwidth and node capabilities (CPU and
-storage). In this document, we present a congestion control algorithm to regulate the influx of blocks in the
-network to maximize throughput (blocks/bytes per second) and minimize delays. Furthermore, the
-following requirements must be satisfied:
-
-- **Consistency**: If an honest node writes a block, it should be written by all honest nodes within some
- delay bound.
-- **Fairness**: Nodes can obtain a share of the available throughput depending on their access Mana. Throughput is
- shared in a way that an attempt to increase the allocation of any node necessarily results in the decrease
- in the allocation of some other node with an equal or smaller allocation (max-min fairness).
-- **Security**: Malicious nodes shall be unable to interfere with either of the above requirements.
-
-[![Congestion Control](/img/protocol_specification/congestion_control_algorithm_infographic_new.png)](/img/protocol_specification/congestion_control_algorithm_infographic_new.png)
-
-You can find more information in the following papers:
-
-- [Access Control for Distributed Ledgers in the Internet of Things: A Networking Approach](https://arxiv.org/abs/2005.07778).
-- [Secure Access Control for DAG-based Distributed Ledgers](https://arxiv.org/abs/2107.10238).
-
-## Detailed Design
-
-The algorithm has three core components:
-
-- A scheduling algorithm that ensures fair access for all nodes according to their access Mana.
-- A TCP-inspired algorithm for decentralized rate setting to utilize the available bandwidth efficiently while
- preventing large delays.
-- A buffer management policy to deal with malicious flows.
-
-### Prerequirements
-
-- **Node identity**: The congestion control module requires node accountability. Each block is associated with the node ID of its issuing
- node.
-
-- **Access mana**: The congestion control module knows the access Mana of the network nodes to share the available
- throughput fairly. Without access Mana, the network would be subject to Sybil attacks, which would incentivize actors
- to artificially split (or aggregate) onto multiple identities.
-
-- **Block weight**. The weight of a block is used to prioritize blocks over the others, and it is calculated
- based on the type and length of a block.
-
-### Outbox Buffer Management
-
-Once a block has successfully passed the block parser checks, is solid and booked, it is enqueued into the outbox
-buffer for scheduling. The outbox is split into several queues, each corresponding to a different node issuing
-blocks. The total outbox buffer size is limited, but individual queues do not have a size limit. This section
-describes the operations of block enqueuing and dequeuing into and from the outbox buffer.
-
-The enqueuing mechanism includes the following components:
-
-- **Classification**: The mechanism identifies the queue where the block belongs according to the node ID of
- the block issuer.
-- **Block enqueuing**: The block is actually enqueued, the queue is sorted by block timestamps in increasing order
- and counters are updated (e.g., counters for the total number of blocks in the queue).
-
-The dequeuing mechanism includes the following components:
-
-- **Queue selection**: A queue is selected according to a round-robin scheduling algorithm. In particular, the
- mechanism uses a modified version of the deficit round-robin (DRR) algorithm.
-- **Block dequeuing**. The first (oldest) block of the queue, that satisfies certain conditions is dequeued. A
- block must satisfy the following conditions:
- - The block has a ready flag assigned. A ready flag is assigned to a block when all of its parents are eligible (the parents have been scheduled or confirmed).
- - The block timestamp is not in the future.
-- **Block skipping**. Once a block in the outbox is confirmed by another block approving it, it will get removed from the outbox buffer. Since the block already has children and is supposed to be replicated on enough nodes in the network, it is not gossiped or added to the tip pool, hence "skipped".
-- **Block drop**: Due to the node's bootstrapping, network congestion, or ongoing attacks, the buffer occupancy of the outbox buffer may become large. To keep bounded delays and isolate the attacker's spam, a node shall drop some blocks if the total number of blocks in all queues exceeds the maximum buffer size. Particularly, the node will drop blocks from the queue with the largest mana-scaled length, computed by dividing the number of blocks in the queue by the amount of access Mana of the corresponding node.
- - `Mana-scaled queue size = queue size / node aMana`;
-- **Scheduler management**: The scheduler counters and pointers are updated.
-
-#### False positive drop
-
-During an attack or congestion, a node may drop a block already scheduled by the rest of the network, causing a
-_false positive drop_. This means that the block’s future cone will not be marked as _ready_ as its past cone is not
-eligible. This is not a problem because blocks dropped from the outbox are already booked and confirmation comes
-eventually due to blocks received from the rest of the network which approve the dropped ones.
-
-#### False positive schedule
-
-Another possible problem is that a node schedules a block that the rest of the network drops, causing a _false
-positive_. The block is gossiped and added to the tip pool. However, it will never accumulate enough approval
-weight to be _Confirmed_. Eventually, the node will orphan this part of tangle as the blocks in the future-cone
-will not pass the [Time Since Confirmation check](tangle.md#tip-pool-and-time-since-confirmation-check) during tip
-selection.
-
-### Scheduler
-
-Scheduling is the most critical task in the congestion control component. The scheduling algorithm must guarantee that
-an honest node `node` meets the following requirements:
-
-- **Consistency**: The node's blocks will not accumulate indefinitely at any node, and so, starvation is avoided.
-- **Fairness**: The node's fair share of the network resources are allocated to it according to its access Mana.
-- **Security**: Malicious nodes sending above their allowed rate will not interrupt a node's throughput requirement.
-
-Although nodes in our setting are capable of more complex and customised behaviour than a typical router in a
-packet-switched network, our scheduler must still be lightweight and scalable due to the potentially large number of
-nodes requiring differentiated treatment. It is estimated that over 10,000 nodes operate on the Bitcoin network, and
-we expect that an even greater number of nodes are likely to be present in the IoT setting. For this reason, we
-adopt a scheduler based on [Deficit Round Robin](https://ieeexplore.ieee.org/document/502236) (DRR) (the Linux
-implementation of the [FQ-CoDel packet scheduler](https://tools.ietf.org/html/rfc8290), which is based on DRR,
-supports anywhere up to 65535 separate queues).
-
-The DRR scans all non-empty queues in sequence. When it selects a non-empty queue, the DDR will increment the queue's
-priority counter (_deficit_) by a specific value (_quantum_). Then, the value of the deficit counter is a maximal amount
-of bytes that can be sent this turn. If the deficit counter is greater than the weight of the block at the head of the
-queue, the DRR can schedule this block, and this weight decrements the value of the counter. In our implementation,
-the quantum is proportional to the node's access Mana, and we add a cap on the maximum deficit that a node can achieve
-to keep the network latency low. It is also important to mention that the DRR can assign the weight of the block so
-that specific blocks can be prioritized (low weight) or penalized (large weight); by default, in our mechanism, the
-weight is proportional to the block size measured in bytes. The weight of a block is set by the
-function `WorkCalculator()`.
-
-:::note
-
-The network manager sets up the desired maximum (fixed) rate `SCHEDULING_RATE` at which it will schedule blocks,
-computed in weight per second. This implies that every block is scheduled after a delay which is equal to the weight (
-size as default) of the latest scheduled block times the parameter
-`SCHEDULING_RATE`. This rate mainly depends on the degree of decentralization you desire: a larger rate leads to
-higher throughput but will leave behind slower devices that will fall out of sync.
-
-:::
-
-### Rate Setting
-
-If nodes were continuously willing to issue new blocks,rate-setting would not be a problem. Nodes could simply operate
-at a fixed, assured rate and share the total throughput according to the percentage of access Mana they own. The
-scheduling algorithm would ensure that this rate is enforceable, and only misbehaving nodes would experience increasing
-delays or dropped blocks. However, it is unrealistic to expect all nodes always to have blocks to issue. We would
-like nodes to better utilize network resources without causing excessive congestion and violating any requirement.
-
-We propose a rate-setting algorithm inspired by TCP — each node employs [additive increase, multiplicative decrease]
-(https://https://epubs.siam.org/doi/book/10.1137/1.9781611974225) (AIMD) rules to update their issuance rate in response
-to congestion events. In the case of distributed ledgers, all block traffic passes through all nodes, contrary to the
-case of traffic typically found in packet-switched networks and other traditional network architectures. Under these
-conditions, local congestion at a node is all that is required to indicate congestion elsewhere in the network. This
-observation is crucial as it presents an opportunity for a congestion control algorithm based entirely on local traffic.
-
-Our rate-setting algorithm outlines the AIMD rules employed by each node to set their issuance rate. Rate updates for a
-node occur each time a new block is scheduled if the node has a non-empty set of its own blocks that are not yet
-scheduled. The node sets its own local additive-increase variable `localIncrease(node)` based on its access Mana and a
-global increase rate parameter `RATE_SETTING_INCREASE`. An appropriate choice of
-`RATE_SETTING_INCREASE` ensures a conservative global increase rate that does not cause problems even when many nodes
-simultaneously increase their rate.
-
-Nodes wait `RATE_SETTING_PAUSE` seconds after a global multiplicative decrease parameter `RATE_SETTING_DECREASE`, during
-which no further updates are made, to allow the reduced rate to take effect and prevent multiple successive decreases.
-At each update, the node checks how many of its own blocks are in its outbox queue and responds with a multiplicative
-decrease if this number is above a threshold,
-`backoff(node)`, which is proportional to the node's access Mana. If the number of the node's blocks in the outbox is
-below the threshold, the node's issuance rate is incremented by its local increase variable, `localIncrease(node)`.
diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/consensus_mechanism.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/consensus_mechanism.md
deleted file mode 100644
index dfc6bede7bf..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/consensus_mechanism.md
+++ /dev/null
@@ -1,259 +0,0 @@
----
-description: The consensus mechanism is necessary to achieve agreement among the nodes of the network. Since the Tangle is only partially ordered we have designed an open and leaderless consensus mechanism which combines FPC and Approval Weight.
-
-keywords:
- - node
- - approval weight
- - conflict
- - opinion
- - block
- - high probability
- - active consensus mana
----
-
-# Consensus Mechanism
-
-The consensus mechanism is necessary to achieve agreement among the nodes of the network. In case of a double spend, one way to decide which transaction should be considered valid would be to order them and pick the oldest one. However, the Tangle is only partially ordered. To tackle this problem in the context of the Tangle, we have designed an open and leaderless consensus mechanism that utilizes the Tangle as a medium to exchange votes. Any node can add a block to the Tangle, and each block added to the Tangle represents a virtual vote (i.e. there is no additional overhead to communicate votes) to its entire past.
-
-The consensus mechanism can broadly be devided into consensus on two separate entities. On the one hand, we need to resolve any conflicts on the underlying UTXO ledger to prevent double spends. On the other hand, we need to make sure that blocks within the Tangle are not orphaned. Both are simply derived by observing the Tangle and objectively keeping track of [Approval Weight (AW)](#approval-weight-aw) with cMana (more specifically [active cMana](#active-cmana)) as a Sibyl protection. Once a [conflict](ledgerstate.md#conflicts) (or block) reaches a certain AW threshold, an application can consider it as _confirmed_. To simplify this notion we introduce [Grades of Finality (GoF)](#grades-of-finality-gof) where a higher GoF represents a higher confidence.
-
-| Name | Component | Initial/local opinion | Consensus | Comparable blockchain mechanism for voting/finality |
-| ------------------- | ----------- | --------------------------------- | -------------- | --------------------------------------------------- |
-| voting on conflicts | UTXO ledger | OTV/FPCS | conflict/tx AW | longest chain rule |
-| finality of blocks | Tangle | inclusion score via tip selection | block AW | x block rule |
-
-On an abstract level, a node can be seen as a replicated state machine, just following whatever it receives through the Tangle, and, in case of blocks containing transactions, modifying the UTXO ledger. Only when a node wants to issue a block (read as: _cast a vote_) it needs to evaluate its own local opinion via [modular conflict selection function](#modular-conflict-selection-function). This decoupling of coming to consensus and setting the initial opinion allows for great flexibility and separation of concerns.
-
-## Approval Weight (AW)
-
-Approval weight represents the [weight](#active-consensus-mana) of conflicts (and blocks), similar to the longest chain rule in Nakamoto consensus. However, instead of selecting a leader based on a puzzle (PoW) or stake (PoS), it allows every node to express its opinion by simply issuing any block and attaching it in a part of the Tangle it _likes_ (based on its initial opinion on blocks and possibly utilizing the [like switch](#like-switch) to express its opinion on conflicts).
-
-It is important to note that tracking of AW for conflicts and markers/blocks is orthogonal. Thus, a block can reach a high AW whereas its contained payload, e.g., a transaction being a double spend, does not reach any AW on conflict/UTXO level.
-
-### Detailed Design
-
-Approval weight AW increases because of voters (nodes) that cast votes for conflicts and blocks by means of making statements. This is necessary due to the changing nature of cMana over time, which prevents simply counting the AW per conflict or block. Additionally, whenever a node changes its opinion on a conflict, the previous vote needs to be invalidated.
-
-#### Definitions
-
-- **Statement**: A statement is any block issued by a _node_, expressing its opinion and casting a (virtual) vote. It can be objectively ordered by its timestamp, and, if equal, its block ID.
-- **Conflict voter**: A conflict voter is a _node_ that issued a statement attaching to a conflict, and, thus, voting for it.
-- **Marker/block voter**: A marker/block's voter is a _node_ that issued a statement directly or indirectly referencing a marker/block, including its issuer.
-
-#### Conflicts
-
-Tracking voters of [conflicts](ledgerstate.md#conflicts) is an effective way of objective virtual voting. It allows nodes to express their opinion simply by attaching a statement to a conflict they like (see [like switch](#like-switch)). This statement needs to propagate down the conflict DAG, adding support to each of the conflict's parents. In case a voter changes their opinion, support needs to be revoked from all conflicting conflicts and their children. Thus, a node can only support one conflict of a conflict set.
-
-To make this more clear consider the following example:
-
-[![Conflict Voter](/img/protocol_specification/conflicts.png)](/img/protocol_specification/conflicts.png)
-
-The green node issued **statement 1** and attached it to the aggregated conflict `Conflict 1.1 + Conflict 4.1.1`. Thus, the green node is a voter of all the aggregated conflict's parent conflicts, which are (from top to bottom) `Conflict 4.1.1`, `Conflict 1.1`, `Conflict 4.1`, `Conflict 1`, and `Conflict 4`.
-
-Then, the green node issued **statement 2** and attached it to `Conflict 4.1.2`. This makes the green node a voter of `Conflict 4.1.2`, however, `Conflict 4.1.1` is its conflict conflict and thus support for `Conflict 4.1.1` has to be revoked.
-
-`Conflict 4.1`, `Conflict 4` are parent conflicts of `Conflict 4.1.2`, which the green node is still supporting. Since `Conflict 1.1`, `Conflict 1` are not conflicting to either of `Conflict 4.1.2`'s parents, the green node remains their voter.
-
-Finally, the green nodes issued **statement 3**, which is in `Conflict 2`. Now the green node is a voter of `Conflict 2`, and no longer a voter of `Conflict 1`, since `Conflict 1` is conflicting to `Conflict 2`. Note that, this voter removal will propagate to child conflicts. Thus, the green node is removed from `Conflict 1.1` as well.
-`Conflict 3`, `4` and both of their child conflicts have nothing to do with this attachement, the voter status remains.
-
-It is important to notice that the arrival order of the statements does not make a difference on the final outcome. Due to the fact that statements can be ordered objectively, every node in the network eventually comes to the same conclusion as to who is supporting which conflict, even when nodes change their opinions.
-
-##### Calculation of Approval Weight
-
-The approval weight itself is calculated every time a new voter is added/removed to a conflict. The AW for a conflict _B_ is calculated as follows:
-
-```
-AW(B) = 'active cMana of voters(B)' / 'total active cMana'
-```
-
-#### Markers
-
-It would be computationally expensive to track the AW for each block individually. Instead, we approximate the AW with the help of [markers](markers.md). Once a marker fulfills a GoF, the corresponding GoF value is propagated into its past cone until all blocks have an equal or higher GoF.
-
-Recall that markers are not part of the core protocol. As such, this description is merely an optimization from an implementation standpoint.
-
-Rather than keeping a list of voters for each marker and collecting voters for each marker (which would also be expensive), we keep a list of voters along with its approved marker index for each marker sequence. This approach provides a simple and fast look-up for marker voters making use of the Tangle structure as mapped by the markers.
-
-For each marker sequence, we keep a map of voter to marker index, meaning a voter supports a marker index `i`. This implies that the voter supports all markers with index `<= i`.
-
-Take the figure below as an example:
-![MarkersApprovalWeight SequenceVoters](/img/protocol_specification/MarkersApprovalWeight.png)
-
-The purple circles represent markers of the same sequence, the numbers are marker indices.
-
-Four nodes (A to D) issue statements with past markers of the purple sequence. Node A and D issue blocks having past marker with index 6, thus node A and D are the voters of marker 6 and all markers before, which is 1 to 5. On the other hand, node B issues a block having past marker with index 3, which implies node B is a voter for marker 1 and 2 as well.
-
-This is a fast look-up and avoids walking through a marker's future cone when it comes to retrieving voters for approval weight calculation.
-
-For example, to find all voter of marker 2, we iterate through the map and filter out those support marker with `index >= 2`. In this case, all nodes are its voters. As for marker 5, it has voters node A and D, which fulfill the check: `index >= 5`.
-
-Here is another more complicated example with parent sequences:
-![MarkersApprovalWeight SequenceVoters](/img/protocol_specification/MarkersApprovalWeightSequenceVoters.png)
-
-The voter will be propagated to the parent sequence.
-
-Node A issues block A2 having past markers `[1,4], [3,4]`, which implies node A is a voter for marker `[1,1]` to `[1,4]`, `[2,1]` to `[2,3]`, and `[3,4]` as well as the block with marker `[3,5]` itself.
-
-##### Calculation of Approval Weight
-
-The approval weight itself is calculated every time a new voter is added to a marker. The AW for a marker _M_ is calculated as follows:
-
-```
-AW(M) = 'active cMana of voters(M)' / 'total active cMana'
-```
-
-### Grades of Finality (GoF)
-
-The tracking of AW itself is objective as long as the Tangle converges on all nodes. However, delays, network splits and ongoing attacks might lead to differences in perception so that a finality can only be expressed probabilistically. The higher the AW, the less likely a decision is going to be reversed. To abstract and simplify this concept we introduce the GoF. Currently, they are simply a translation of AW thresholds to a GoF, but one can imagine other factors as well.
-
-**Block / non-conflicting transaction**
-GoF | AW
--- | --
-0 | < 0.25
-1 | >= 0.25
-2 | >= 0.45
-3 | >= 0.67
-
-**Conflict / conflicting transaction**
-GoF | AW
--- | --
-0 | < 0.25
-1 | >= 0.25
-2 | >= 0.45
-3 | >= 0.67
-
-These thresholds play a curcial role in the safety vs. liveness of the protocol, together with the exact workings of [active cMana](#active-cmana). We are currently investigating them with in-depth simulations.
-
-- The higher the AW threshold the more voters a conflict or block will need to reach a certain GoF -> more secure but higher confirmation time.
-- As a consequence of the above point, TangleTime will be tougher to advance; making the cMana window more likely to get stuck and confirmations to halt forever.
-
-An application needs to decide when to consider a block and (conflicting) transaction as _confirmed_ based on its safety requirements. Conversely, a block or conflict that does not gain enough AW stays pending forever (and is orphaned/removed on snapshotting time).
-
-## Modular Conflict Selection Function
-
-The modular conflict selection function is an abstraction on how a node sets an initial opinion on conflicts. By decoupling the objective perception of AW and a node's initial opinion, we gain flexibility and it becomes effortless to change the way we set initial opinions without modifying anything related to the AW.
-
-### Pure On Tangle Voting (OTV)
-
-The idea of pure OTV is simple: set the initial opinion based on the currently heavier conflict as perceived by AW. However, building a coherent overall opinion means that conflicting realities (possible outcomes of overlapping conflict sets) can not be liked at the same time, which makes finding the heaviest conflict to like not as trivial as it may seem.
-
-In the examples below, a snapshot at a certain time of a UTXO-DAG with its conflicts is shown. The gray boxes labelled with `O:X` represent an output and and arrow from an output to a transaction means that the transaction is consuming this output. An arrow from a transaction to an output creates this output. An output being consumed multiple times is a conflict and the transactions create a conflict, respectively. The number assiged to a conflict, e.g., `Conflict A = 0.2`, defines the currently perceived Approval Weight of the conflict. A conflict highlighted in **bold** is the outcome of applying the pure OTV rules, i.e., the conflicts that are liked from the perspective of the node.
-
-**Example 1**
-The example below shows how applying the heavier conflict rule recursively results in the end result of `A`, `C`, `E`, and thus the aggregated conflict `C+E` being liked. Looking at the individual conflict weights this result might be surprising: conflict `B` has a weight of `0.3` which is bigger than its conflict conflict `A = 0.2`. However, `B` is also in conflict with `C` which has an even higher weight `0.4`. Thus, `C` is liked, `B` cannot be liked, and `A` suddenly can become liked again.
-
-`E = 0.35` is heavier than `D = 0.15` and is therefore liked. An (aggregated) conflict can only be liked if all its parents are liked which is the case with `C+E`.
-
-![OTV example 1](/img/protocol_specification/otv-example-1.png)
-
-**Example 2**
-This example is exactly the same as example 1, except that conflict `C` has a weight of `0.25` instead of `0.4`. Now the end result is conflicts `B` and `E` liked. Conflict `B` is heavier than conflict `C` and `A` (winning in all its conflict sets) and becomes liked. Therefore, neither `A` nor `C` can be liked.
-
-Again, `E = 0.35` is heavier than `D = 0.15` and is therefore liked. An (aggregated) conflict can only be liked if all its parents are liked which is not the case with `C+E` in this example.
-
-![OTV example 2](/img/protocol_specification/otv-example-2.png)
-
-### Metastability: OTV and FPCS
-
-Pure OTV is susceptible to metastability attacks: If a powerful attacker can keep any conflict of a conflict set reaching a high enough approval weight, the attacker can prevent the network from tipping to a side and thus theoretically halt a decision on the given conflicts indefinitely. Only the decision on the targeted conflicts is affected but the rest of the consensus can continue working. By forcing a conflict to stay unresolved, an attacker can, at most, prevent a node from pruning resources related to the pending decision.
-
-In order to prevent such attacks from happening we are planning to implement FPCS with OTV as a conflict selection function. A more detailed description can be found [here](https://iota.cafe/t/on-tangle-voting-with-fpcs/1218).
-
-## Like Switch
-
-Without the like switch, blocks vote for conflicts simply by attaching in their underlying conflict's future cone. While this principle is simple, it has one major flaw: the part of the Tangle of the losing conflict is abandoned so that only the _valid_ part remains. This might lead to mass orphanage of "unlucky" blocks that happened to first vote for the losing conflict. With the help of weak parents these blocks might be _rescued_ without a reattachment but the nature of weak parents makes it necessary so that every block needs to be picked up individually. Next to the fact that keeping such a weak tip pool is computationally expensive, it also open up orphanage attack scenarios by keeping conflicts undecided (metastability attack turns into orphanage attack).
-
-The like switch is a special type of parent reference that enables keeping everything in the Tangle, even conflicting transactions that are not included into the valid ledger state by means of consensus. Therefore, it prevents mass orphanage and enables a decoupling of **voting on conflicts (UTXO ledger)** and **finality of blocks / voting on blocks (Tangle)**. It makes the overall protocol (and its implementation) not only more efficient but also easier to reason about and allows for lazy evaluation of a node's opinion, namely only when a node wants to issue a block (read as: _cast a vote_).
-
-From a high-level perspective, the like switch can be seen as a set of rules that influence the way a block inherits its conflicts. Using only strong parents, a block inherits all its parents' conflicts. A like parent retains all the properties of the strong parent (i.e., inherit the conflict of said parent) but additionally it means to exclude all conflicts that are conflicting with the liked conflict.
-Through this mechanism, it becomes possible to attach a block anywhere in the Tangle but still only vote for the conflicts that are liked. Thus, decoupling of block AW and conflict AW.
-
-**Examples**
-To make this more clear, let's consider the following examples. The conflicts `A` and `B`, as well as `C` and `D` form an independent conflict set, respectively. The `Conflict Weights` are the weights as perceived by the node that currently wants to create a block.
-
-**Block creation**
-A node performs random tip selection (e.g. URTS) and in this example selects blocks `5` and `11` as strong parents. Now the node needs to determine whether it currently _likes_ all the conflicts of the selected blocks (`red, yellow, green`) in order to apply the like switch (if necessary) and vote for its liked conflicts.
-
-![Like switch: block creation undecided](/img/protocol_specification/like-switch-block-creation-1.png)
-
-When performing the conflict selection function with pure OTV it will yield the result:
-
-- `red` is disliked, instead like `purple`
-- `green` is disliked, instead like `yellow`
-
-Therefore, the block needs to set two like references to the blocks that introduced the conflict (first attachment of the transaction). The final result is the following:
-
-- conflicts `purple` and `yellow` are supported
-- block `5` (and its entire past cone, `3`, `2`, `1`) is supported
-- block `11` (and its entire past cone, `6`, `4`, `2`, `1`) is supported
-- block `6` (and its entire past cone, `4`, `2`, `1`)
-- block `7` (and its entire past cone, `1`)
-
-![Like switch: block creation](/img/protocol_specification/like-switch-block-creation-2.png)
-
-**Block booking**
-On the flip side of block creation (casting a vote) is applying a vote when booking a block, where the process is essentiall just reversed. Assuming a node receives block `17`. First, it determines the conflicts of the strong parents (`red`, `yellow`, `purple`) and like parents (`red`). Now, it removes all conflicts that are conflicting with the like parents' conflicts (i.e., `purple`) and is left with the conflicts `red` and `yellow` (and adds the like conflicts to it, which in this case is without effect). If the resulting conflict is `invalid`, (e.g., because it combines conflicting conflicts) then the block itself is considered invalid because the issuer did not follow the protocol.
-
-In this example the final result is the following (block `17` supports):
-
-- conflicts `red` and `yellow`
-- block `16` and its entire past cone
-- block `11` and its entire past cone
-- block `4` and its entire past cone
-
-![Like switch: block booking](/img/protocol_specification/like-switch-block-booking.png)
-
-## Active cMana
-
-The consensus mechanism weighs votes on conflicts (blocks in future cone with like switch) or block inclusion (all blocks in future cone) by the limited resource cMana, which is thus our Sybil-protection mechanism. cMana can be pledged to any nodeID, including offline or non-existing nodes, when transferring funds (in the proportion of the funds) and is instantly available (current implementation without EMA). Funds might get lost/locked over time, and, therefore, the total accessible cMana declines as well. **Consequently, a fixed cMana threshold cannot be used to determine a voting outcome.**
-
-Finalization of voting outcomes should happen once a conflict is _sufficiently_ decided and/or a block is _sufficiently_ deep in the Tangle. However, measuring AW in terms of cMana alone does not yield enough information. Therefore, a degree/grade of finalization and of voting outcome in relation to _something_, e.g., **recently active nodes**, is preferred. This measure should have the following properties:
-
-- security/resilience against various attacks
-- lose influence (shortly) after losing access to funds
-- no possibility of long-range attacks
-- no too quick fluctuations
-- real incentives
-
-### Current Implementation
-
-Active cMana in GoShimmer basically combines two components in an active cMana WeightProvider: the TangleTime and the current state of cMana. A node is considered to be active if it has issued any block in the last `activeTimeThreshold=30min` with respect to the TangleTime. The total active consensus mana is, therefore, the sum of all the consensus mana of each active node.
-
-#### TangleTime
-
-The TangleTime is the issuing time of the last confirmed block. It cannot be attacked without controlling enough mana to accept incorrect timestamps, making it a reliable, attack-resistant quantity.
-
-![Tangle Time](/img/protocol_specification/tangle_time.jpg)
-
-#### cMana
-
-The current state of cMana is simply the current cMana vector, at the time the active cMana is requested.
-
-#### Putting it together
-
-Every node keeps track of a list of active nodes locally. Whenever a node issues a block it is added to the list of active nodes (nodeID -> issuing time of latest block). When the active cMana is requested all relevant node weights are returned. Relevant here means the following:
-
-- the node has more than `minimumManaThreshold=0` cMana to prevent bloating attacks with too little cMana
-- there is a block that fulfills the condition `issuing time <= TangleTime && TangleTime - issuing time <= activeTimeThreshold` where `activeTimeThreshold=30min` (see the following example, blocks `1` and `3` are not within the window)
-
-![Active cMana window](/img/protocol_specification/active-cMana-window.png)
-
-### Example
-
-When syncing (`TT=t0`) and booking a block from time `t1`, active cMana is considered from `t0-activeTimeThreshold`. Once this block gets confirmed, the TangleTime advances to `TT=t1`. For the next block at `t2`, `TT=t1-activeTimeThreshold` will be considered. Using active cMana in this way, we basically get a sliding window of how the Tangle emerged and _replay_ it from the past to the present.
-
-### Pros
-
-- replaying the Tangle as it emerged
-- always use cMana from the current perspective
-- relatively simple concept
-
-### Cons
-
-- active cMana does not yield sufficient information (e.g. when eclipsed), it might look like something is 100% confirmed even though only 2% of the total cMana are considered active.
-- active cMana might change quickly nodes with high mana suddenly become active
-- if nodes are only able to issue blocks when "in sync" and no block gets confirmed within that time, nobody might be able to issue blocks anymore
-- if a majority/all active cMana nodes go offline _within the active cMana window_, consensus will halt forever because the TangleTime can never advance unless a majority of these nodes move the TangleTime forward
-
-This reflects the current implementation and we are currently investigating active cMana with in-depth simulations to improve the mechanism.
diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/ledgerstate.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/ledgerstate.md
deleted file mode 100644
index 8c8b9eda6d6..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/ledgerstate.md
+++ /dev/null
@@ -1,323 +0,0 @@
----
-description: The unspent transaction output (UTXO) model defines a ledger state where balances are not directly associated with addresses but with the outputs of transactions. Transactions specify the outputs of previous transactions as inputs, which are consumed in order to create new outputs.
-image: /img/protocol_specification/utxo_fund_flow.png
-keywords:
- - transactions
- - ledger state
- - unlock block
- - essence
- - utxo
- - input
- - signature unlock block
- - reference unlock block
- - conflict conflict
- - aggregate conflict
----
-
-## UTXO model
-
-The unspent transaction output (UTXO) model defines a ledger state where balances are not directly associated with addresses but with the outputs of transactions. In this model, transactions specify the outputs of previous transactions as inputs, which are consumed in order to create new outputs.
-A transaction must consume the entirety of the specified inputs. The section unlocking the inputs is called an _unlock block_. An unlock block may contain a signature proving ownership of a given input's address and/or other unlock criteria.
-
-The following image depicts the flow of funds using UTXO:
-
-[![Flow of funds using UTXO](/img/protocol_specification/utxo_fund_flow.png 'Flow of funds using UTXO')](/img/protocol_specification/utxo_fund_flow.png)
-
-## Transaction Layout
-
-A _Transaction_ payload is made up of two parts:
-
-1. The _Transaction Essence_ part contains: version, timestamp, nodeID of the aMana pledge, nodeID of the cMana pledge, inputs, outputs and an optional data payload.
-2. The _Unlock Blocks_ which unlock the _Transaction Essence_'s inputs. In case the unlock block contains a signature, it signs the entire _Transaction Essence_ part.
-
-All values are serialized in little-endian encoding (it stores the most significant byte of a word at the largest address and the smallest byte at the smallest address). The serialized form of the transaction is deterministic, meaning the same logical transaction always results in the same serialized byte sequence.
-
-### Transaction Essence
-
-The _Transaction Essence_ of a _Transaction_ carries a version, timestamp, nodeID of the aMana pledge, nodeID of the cMana pledge, inputs, outputs and an optional data payload.
-
-### Inputs
-
-The _Inputs_ part holds the inputs to consume, that in turn fund the outputs of the _Transaction Essence_. There is only one supported type of input as of now, the _UTXO Input_. In the future, more types of inputs may be specified as part of protocol upgrades.
-
-Each defined input must be accompanied by a corresponding _Unlock Block_ at the same index in the _Unlock Blocks_ part of the _Transaction_.
-If multiple inputs may be unlocked through the same _Unlock Block_, the given _Unlock Block_ only needs to be specified at the index of the first input that gets unlocked by it.
-Subsequent inputs that are unlocked through the same data must have a _Reference Unlock Block_ pointing to the previous _Unlock Block_.
-This ensures that no duplicate data needs to occur in the same transaction.
-
-#### UTXO Input
-
-| Name | Type | Description |
-| ------------------------ | ------------- | ----------------------------------------------------------------------- |
-| Input Type | uint8 | Set to value 0 to denote an _UTXO Input_. |
-| Transaction ID | ByteArray[32] | The BLAKE2b-256 hash of the transaction from which the UTXO comes from. |
-| Transaction Output Index | uint16 | The index of the output on the referenced transaction to consume. |
-
-A _UTXO Input_ is an input which references an output of a previous transaction by using the given transaction's BLAKE2b-256 hash + the index of the output on that transaction.
-A _UTXO Input_ must be accompanied by an _Unlock Block_ for the corresponding type of output the _UTXO Input_ is referencing.
-
-Example: If the input references outputs to an Ed25519 address, then the corresponding unlock block must be of type _Signature Unlock Block_ holding an Ed25519 signature.
-
-### Outputs
-
-The _Outputs_ part holds the outputs to create with this _Transaction Payload_. There are different types of output:
-
-- _SigLockedSingleOutput_
-- _SigLockedAssetOutput_
-
-#### SigLockedSingleOutput
-
-| Name | Type | Description |
-| --------------- | ------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------- |
-| Output Type | uint8 | Set to value 0 to denote a _SigLockedSingleOutput_. |
-| Address `oneOf` | [Ed25519 Address](#ed25519-address) \| [BLS Address](#bls-address) | The raw bytes of the Ed25519/BLS address which is a BLAKE2b-256 hash of the Ed25519/BLS public key |
-| Balance | uint64 | The balance of IOTA tokens to deposit with this _SigLockedSingleOutput_ output. |
-
-##### Ed25519 Address
-
-| Name | Type | Description |
-| ------------ | ------------- | ------------------------------------------------------------------------------------------- |
-| Address Type | uint8 | Set to value 0 to denote an _Ed25519 Address_. |
-| Address | ByteArray[32] | The raw bytes of the Ed25519 address which is a BLAKE2b-256 hash of the Ed25519 public key. |
-
-#### BLS Address
-
-| Name | Type | Description |
-| ------------ | ------------- | ----------------------------------------------------------------------------------- |
-| Address Type | uint8 | Set to value 1 to denote a _BLS Address_. |
-| Address | ByteArray[49] | The raw bytes of the BLS address which is a BLAKE2b-256 hash of the BLS public key. |
-
-The _SigLockedSingleOutput_ defines an output holding an IOTA balance linked to a single address; it is unlocked via a valid signature proving ownership over the given address. Such output may hold an address of different types.
-
-#### SigLockedAssetOutput
-
-| Name | Type | Description |
-| -------------------- | ------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------- |
-| Output Type | uint8 | Set to value 1 to denote a _SigLockedAssetOutput_. |
-| Address `oneOf` | [Ed25519 Address](#ed25519-address) \| [BLS Address](#bls-address) | The raw bytes of the Ed25519/BLS address which is a BLAKE2b-256 hash of the Ed25519/BLS public key |
-| Balances count | uint32 | The number of individual balances. |
-| AssetBalance `anyOf` | [Asset Balance](#asset-balance) | The balance of the tokenized asset. |
-
-##### Asset Balance
-
-The balance of the tokenized asset.
-
-| Name | Type | Description |
-| ------- | ------------- | ----------------------------------- |
-| AssetID | ByteArray[32] | The ID of the tokenized asset |
-| Balance | uint64 | The balance of the tokenized asset. |
-
-The _SigLockedAssetOutput_ defines an output holding a balance for each specified tokenized asset linked to a single address; it is unlocked via a valid signature proving ownership over the given address. Such output may hold an address of different types.
-The ID of any tokenized asset is defined by the BLAKE2b-256 hash of the OutputID that created the asset.
-
-### Payload
-
-The payload part of a _Transaction Essence_ may hold an optional payload. This payload does not affect the validity of the _Transaction Essence_. If the transaction is not valid, then the payload _shall_ be discarded.
-
-### Unlock Blocks
-
-The _Unlock Blocks_ part holds the unlock blocks unlocking inputs within a _Transaction Essence_.
-
-There are different types of _Unlock Blocks_:
-| Name | Unlock Type | Description |
-| ---------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
-| Signature Unlock Block | 0 | An unlock block holding one or more signatures unlocking one or more inputs. |
-| Reference Unlock Block | 1 | An unlock block which must reference a previous unlock block which unlocks also the input at the same index as this _Reference Unlock Block_. |
-
-#### Signature Unlock Block
-
-| Name | Type | Description |
-| ----------------- | ------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------- |
-| Unlock Type | uint8 | Set to value 0 to denote a _Signature Unlock Block_. |
-| Signature `oneOf` | [Ed25519 Address](#ed25519-address) \| [BLS Address](#bls-address) | The raw bytes of the Ed25519/BLS address which is a BLAKE2b-256 hash of the Ed25519/BLS public key |
-
-A _Signature Unlock Block_ defines an _Unlock Block_ which holds one or more signatures unlocking one or more inputs.
-Such a block signs the entire _Transaction Essence_ part of a _Transaction Payload_ including the optional payload.
-
-#### Reference Unlock block
-
-| Name | Type | Description |
-| ----------- | ------ | ---------------------------------------------------- |
-| Unlock Type | uint8 | Set to value 1 to denote a _Reference Unlock Block_. |
-| Reference | uint16 | Represents the index of a previous unlock block. |
-
-A _Reference Unlock Block_ defines an _Unlock Block_ that references a previous _Unlock Block_ (that must not be another _Reference Unlock Block_). It must be used if multiple inputs can be unlocked through the same origin _Unlock Block_.
-
-Example:
-Consider a _Transaction Essence_ containing _UTXO Inputs_ A, B and C, where A and C are both spending the UTXOs originating from the same Ed25519 address. The _Unlock Block_ part must thereby have the following structure:
-
-| Index | Must Contain |
-| ----- | ---------------------------------------------------------------------------------------------------------- |
-| 0 | A _Signature Unlock Block_ holding the corresponding Ed25519 signature to unlock A and C. |
-| 1 | A _Signature Unlock Block_ that unlocks B. |
-| 2 | A _Reference Unlock Block_ that references index 0, since C also gets unlocked by the same signature as A. |
-
-## Validation
-
-A _Transaction_ payload has different validation stages since some validation steps can only be executed at the point when certain information has (or has not) been received. We, therefore, distinguish between syntactical and semantic validation.
-
-### Transaction Syntactical Validation
-
-This validation can commence as soon as the transaction data has been received in its entirety. It validates the structure but not the signatures of the transaction. A transaction must be discarded right away if it does not pass this stage.
-
-The following criteria define whether the transaction passes the syntactical validation:
-
-- Transaction Essence:
- - `Transaction Essence Version` value must be 0.
- - The `timestamp` of the _Transaction Essence_ must be older than (or equal to) the `timestamp` of the block
- containing the transaction by at most 10 minutes.
- - A _Transaction Essence_ must contain at least one input and output.
-- Inputs:
- - `Inputs Count` must be 0 < x < 128.
- - At least one input must be specified.
- - `Input Type` value must be 0, denoting an `UTXO Input`.
- - `UTXO Input`:
- - `Transaction Output Index` must be 0 ≤ x < 128.
- - Every combination of `Transaction ID` + `Transaction Output Index` must be unique in the inputs set.
- - Inputs must be in lexicographical order of their serialized form.1
-- Outputs:
- - `Outputs Count` must be 0 < x < 128.
- - At least one output must be specified.
- - `Output Type` must be 0, denoting a `SigLockedSingleOutput`.
- - `SigLockedSingleOutput`:
- - `Address Type` must either be 0 or 1, denoting an `Ed25519` - or `BLS` address .
- - The `Address` must be unique in the set of `SigLockedSingleOutputs`.
- - `Amount` must be > 0.
- - Outputs must be in lexicographical order by their serialized form. This ensures that serialization of the transaction becomes deterministic, meaning that libraries always produce the same bytes given the logical transaction.
- - Accumulated output balance must not exceed the total supply of tokens `2,779,530,283,277,761`.
-- `Payload Length` must be 0 (to indicate that there's no payload) or be valid for the specified payload type.
-- `Payload Type` must be one of the supported payload types if `Payload Length` is not 0.
-- `Unlock Blocks Count` must match the number of inputs. Must be 0 < x < 128.
-- `Unlock Block Type` must either be 0 or 1, denoting a `Signature Unlock Block` or `Reference Unlock block`.
-- `Signature Unlock Blocks` must define either an `Ed25519`- or `BLS Signature`.
-- A `Signature Unlock Block` unlocking multiple inputs must only appear once (be unique) and be positioned at the same index of the first input it unlocks. All other inputs unlocked by the same `Signature Unlock Block` must have a companion `Reference Unlock Block` at the same index as the corresponding input that points to the origin `Signature Unlock Block`.
-- `Reference Unlock Blocks` must specify a previous `Unlock Block` that is not of type `Reference Unlock Block`. The referenced index must therefore be smaller than the index of the `Reference Unlock Block`.
-- Given the type and length information, the _Transaction_ must consume the entire byte array the `Payload Length` field in the _Block_ defines.
-
-### Transaction Semantic Validation
-
-The following criteria define whether the transaction passes the semantic validation:
-
-1. All the UTXOs the transaction references are known (booked) and unspent.
-1. The transaction is spending the entirety of the funds of the referenced UTXOs to the outputs.
-1. The address type of the referenced UTXO must match the signature type contained in the corresponding _Signature Unlock Block_.
-1. The _Signature Unlock Blocks_ are valid, i.e. the signatures prove ownership over the addresses of the referenced UTXOs.
-
-If a transaction passes the semantic validation, its referenced UTXOs _shall_ be marked as spent and the corresponding new outputs _shall_ be booked/specified in the ledger.
-
-Transactions that do not pass semantic validation _shall_ be discarded. Their UTXOs are not marked as spent and neither are their outputs booked into the ledger. Moreover, their blocks _shall_ be considered invalid.
-
-# Ledger State
-
-The introduction of a voting-based consensus requires a fast and easy way to determine a node's initial opinion for every received transaction. This includes the ability to both detect double spends and transactions that try to spend non-existing funds.
-These conditions are fulfilled by the introduction of an Unspent Transaction Output (UTXO) model for record-keeping, which enables the validation of transactions in real time.
-
-The concept of UTXO style transactions is directly linked to the creation of a directed acyclic graph (DAG), in which the vertices are transactions and the links between these are determined by the outputs and inputs of transactions.
-
-To deal with double spends and leverage on certain properties of UTXO, we introduce the Realities Ledger State.
-
-## Realities Ledger State
-
-In the Realities Ledger State, we model the different perceptions of the ledger state that exist in the Tangle. In each “reality” on its own there are zero conflicting transactions.
-Each reality thus forms an in itself consistent UTXO sub-DAG, where every transaction references any other transaction correctly.
-
-Since outputs of transactions can only be consumed once, a transaction that double spends outputs creates a persistent conflict in a corresponding UTXO DAG. Each conflict receives a unique identifier `conflictID`. These conflicts cannot be merged by any vertices (transactions).
-A transaction that attempts to merge incompatible conflicts fails to pass a validity check and is marked as invalid.
-
-The composition of all realities defines the Realities Ledger State.
-
-From this composition nodes are able to know which possible outcomes for the Tangle exist, where they split, how they relate to each other, if they can be merged and which blocks are valid tips. All of this information can be retrieved in a fast and efficient way without having to walk the Tangle.
-
-Ultimately, for a set of competing realities, only one reality can survive. It is then up to the consensus protocol to determine which conflict is part of the eventually accepted reality.
-
-In total the ledger state thus involves three different layers:
-
-- the UTXO DAG,
-- its extension to the corresponding conflict DAG,
-- the Tangle which maps the parent relations between blocks and thus also transactions.
-
-## The UTXO DAG
-
-The UTXO DAG models the relationship between transactions, by tracking which outputs have been spent by what transaction. Since outputs can only be spent once, we use this property to detect double spends.
-
-Instead of permitting immediately only one transaction into to the ledger state, we allow for different versions of the ledger to coexist temporarily.
-This is enabled by extending the UTXO DAG by the introduction of conflicts, see the following section. We can then determine which conflicting versions of the ledger state exist in the presence of conflicts.
-
-### Conflict Sets and Detection of Double Spends
-
-We maintain a list of consumers `consumerList` associated with every output, that keeps track of which transactions have spent that particular output. Outputs without consumers are considered to be unspent outputs. Transactions that consume an output that have more than one consumer are considered to be double spends.
-
-If there is more than one consumer in the consumer list we _shall_ create a conflict set list `conflictSet`, which is identical to the consumer list. The `conflictSet` is uniquely identified by the unique identifier `conflictSetID`. Since the `outputID` is directly and uniquely linked to the conflict set, we set `conflictSetID=outputID`.
-
-## Conflicts
-
-The UTXO model and the concept of solidification, makes all non-conflicting transactions converge to the same ledger state no matter in which order the transactions are received. Blocks containing these transactions could always reference each other in the Tangle without limitations.
-
-However, every double spend creates a new possible version of the ledger state that will no longer converge. Whenever a double spend is detected, see the previous section, we track the outputs created by the conflicting transactions and all of the transactions that spend these outputs, by creating a container for them in the ledger which we call a conflict.
-
-More specifically a container `conflict` _shall_ be created for each transaction that double spends one or several outputs, or if transactions aggregated those conflicts.
-Every transaction that spends directly or indirectly from a transaction in a given `conflict`, i.e. is in the future cone in the UTXO DAG of the double-spending transaction that created `conflict`, is also contained in this `conflict` or one of the child conflicts.
-A conflict that was created by a transaction that spends multiple outputs can be part of multiple conflict sets.
-
-Every conflict _shall_ be identified by the unique identifier `conflictID`. We consider two kinds of conflicts: conflict conflicts and aggregated conflicts, which are explained in the following sections.
-
-### Conflict Conflicts
-
-A conflict conflict is created by a corresponding double spend transaction. Since the transaction identifier is unique, we choose the transaction id `transactionID` of the double spending transaction as the `conflictID`.
-
-Outputs inside a conflict can be double spent again, recursively forming sub-conflicts.
-
-On solidification of a block, we _shall_ store the corresponding conflict identifier together with every output, as well as the transaction metadata to enable instant lookups of this information. Thus, on solidification, a transaction can be immediately associated with a conflict.
-
-### Aggregated Conflicts
-
-A transaction that does not create a double spend inherits the conflicts of the input's conflicts. In the simplest case, where there is only one input conflict the transaction inherits that conflict.
-
-If outputs from multiple non-conflicting conflicts are spent in the same transaction, then the transaction and its resulting outputs are part of an aggregated conflict. This type of conflict is not part of any conflict set. Rather it simply combines the perception that the individual conflict conflicts associated to the transaction's inputs are the ones that will be accepted by the network. Each aggregated conflict _shall_ have a unique identifier `conflictID`, which is the same type as for conflict conflicts. Furthermore the container for an aggregated conflict is also of type `conflict`.
-
-To calculate the unique identifier of a new aggregated conflict, we take the identifiers of the conflicts that were aggregated, sort them lexicographically and hash the concatenated identifiers once
-
-An aggregated conflict can't aggregate other aggregated conflicts. However, it can aggregate the conflict conflicts that are part of the referenced aggregated conflict.
-Thus aggregated conflicts have no further conflicts as their children and they remain tips in the conflict DAG. Furthermore, the sortation of the `conflictID`s in the function `AggregatedConflictID()` ensures that even though blocks can attach at different points in the Tangle and aggregate different aggregated conflicts they are treated as if they are in the same aggregated conflict **if** the referenced conflict conflicts are the same.
-
-These properties allow for an efficient reduction of a set of conflicts. In the following we will require the following fields as part of the conflict data:
-
-- `isConflictConflict` is a boolean flat that is `TRUE` if the conflict is a conflict conflict or `FALSE` if its an aggregated conflict.
-- `parentConflicts` contains the list of parent conflict conflicts of the conflict, i.e. the conflict conflicts that are directly referenced by this conflict.
-
-Then the following function takes a list of conflicts (which can be either conflict or aggregated conflicts) and returns a unique set of conflict conflicts that these conflicts represent. This is done by replacing duplicates and extracting the parent conflict conflicts from aggregated conflicts.
-
-```vbnet
-FUNCTION reducedConflicts = ReduceConflicts(conflicts)
- FOR conflict IN conflicts
- IF conflict.isConflictConflict
- Append(reducedConflicts,conflict)
- ELSE
- FOR parentConflict IN conflict.parentConflicts
- IF NOT (parentConflict IN reducedConflicts)
- Append(reducedConflicts,parentConflict)
-
- RETURN reducedConflicts
-```
-
-### The Conflict DAG
-
-A new conflict is created for each transaction that is part of a conflict set, or if a transaction aggregates conflicts.
-In the conflict DAG, conflicts constitute the vertices of the DAG. A conflict that is created by a transaction that is spending outputs from other conflicts has edges pointing to those conflicts.
-The conflict DAG maps the UTXO DAG to a simpler structure that ignores details about relations between transactions inside the conflicts and instead retains only details about the interrelations of conflicts.
-The set of all non-conflicting transactions form the master conflict. Thus, at its root the conflict DAG has the master conflict, which consists of non-conflicting transaction and resolved transactions. From this root of the conflict DAG the various conflicts emerge.
-In other words the conflict conflicts and the aggregated conflicts appear as the children of the master conflict.
-
-### Detecting Conflicting Conflicts
-
-Conflicts are conflicting if they, or any of their ancestors, are part of the same conflict set.
-The conflict DAG can be used to check if conflicts are conflicting, by applying an operation called normalization, to a set of input conflicts.
-From this information we can identify blocks or transactions that are trying to combine conflicts belonging to conflicting double spends, and thus introduce an invalid perception of the ledger state.
-
-Since conflicts represent the ledger state associated with a double spend and sub-conflicts implicitly share the perception of their parents, we define an operation to normalize a list of conflicts that gets rid of all conflicts that are referenced by other conflicts in that list. The function returns `NULL` if the conflicts are conflicting and can not be merged.
-
-### Merging of Conflicts Into the Master Conflict
-
-A conflict gains approval weight when blocks from (previously non-attached) `nodeID`s attach to blocks in the future cone of that conflict. Once the approval weight exceeds a certain threshold we consider the conflict as confirmed.
-Once a conflict conflict is confirmed, it can be merged back into the master conflict. Since the approval weight is monotonically increasing for conflicts from the past to the future, conflicts are only merged into the master conflict.
-The loosing conflicts and all their children conflicts are booked into the container `rejectedConflict` that has the identifier `rejectedConflictID`.
diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/mana.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/mana.md
deleted file mode 100644
index 87a093272cb..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/mana.md
+++ /dev/null
@@ -1,617 +0,0 @@
----
-description: Mana is a reputation system for nodes within the IOTA network. Reputation is gained by contributing to the network. As time passes, part of the earned mana of a node decays to encourage keeping up the good behavior.
-image: /img/protocol_specification/mana.png
-keywords:
- - mana
- - node
- - calculation
- - transactions
- - base mana
- - vectors
- - access mana
- - consensus mana
- - effective base mana
- - ledger state
----
-
-# Mana Implementation
-
-This document provides a high level overview of how mana is implemented in GoShimmer.
-
-## Introduction
-
-Mana is a reputation system for nodes within the IOTA network.
-
-Reputation is gained by contributing to the network, i.e. creating value transfers.
-As time passes, part of the earned mana of a node decays to encourage keeping up the good behavior.
-
-## Scope
-
-The scope of the first implementation of mana into GoShimmer is to verify that mana calculations work,
-study base mana calculations 1 & 2, and mana distribution in the test network, furthermore to verify that nodes have
-similar view on the network.
-
-## Mana Calculation
-
-Mana is essentially the reputation score of a node in the IOTA network. Mana is calculated locally in each node, as a
-function that takes value transactions as input and produces the Base Mana Vector as output.
-
-Each transaction has an `accessMana` and `consensusMana` field that determine which node to pledge these two types
-of mana to. Both of these fields denote a `nodeID`, the receiver of mana. `accessMana` and `consensusMana` do not have
-to be pledged to the same node, but for simplicity, in the first implementation, they will be.
-
-In addition to the mana fields, a `timestamp` field is also added to the transactions that will be utilized for calculating
-decay and effective mana.
-
-From the pledged mana of a transaction, a node can calculate locally the `Base Mana Vector` for both `Access Mana` and
-`Consensus Mana`.
-
-A `Base Mana Vector` consists of Base Mana 1 and Base Mana 2 and their respective `Effective Base Mana`.
-Given a value transaction, Base Mana 1 and Base Mana 2 are determined as follows:
-
-1. Base Mana 1 is revoked from the node that created the output(s) used as input(s) in the transaction, and is pledged to
- the node creating the new output(s). The amount of `Base Mana 1` revoked and pledged is equal to the balance of the
- input.
-2. Base Mana 2 is freshly created at the issuance time of the transaction, awarded to the node, but decays with time.
- The amount of `Base Mana 2` pledged is determined with `Pending Mana` concept: funds sitting at an address generate
- `pending mana` that grows over time, but bounded.
- - `Mana_pending = (alpha*S)/gamma*(1-e^(-gamma*t))`, where `alpha` and `gamma` are chosen parameters, `S` is the amount
- of funds an output transfers to the address, and `t` is the time since the funds are on that address.
-
-An example `Base Mana Vector` for `Access Mana` could look like this:
-
-| | Node 1 | Node 2 | ... | Node k |
-| --------------------- | ------ | ------ | --- | ------ |
-| Base Mana 1 | 0 | 1 | ... | 100.54 |
-| Effective Base Mana 1 | 0 | 0.5 | ... | 120.7 |
-| Base Mana 2 | 0 | 1.2 | ... | 0.01 |
-| Effective Base Mana 2 | 0 | 0.6 | ... | 0.015 |
-
-`Base Mana` is pledged or revoked at discrete times, which results in `Base Mana` being discontinuous function over time.
-In order to make mana "smoother" and continuous, an exponential moving average is applied to the `Base Mana` values,
-resulting in `Effective Base Mana 1` and `Effective Base Mana 2`.
-
-It is important to note, that consuming a new transaction and pledging its mana happens when the transaction is
-confirmed on the node. At the same time, entries of the nodes whose mana is being modified during pledging in the
-`Base Mana Vector(s)` are updated with respect to time. In general, updates due to time happen whenever a node's mana is
-being accessed. Except for the aforementioned case, this could be for example a mana related query from an external
-module (AutoPeering, DRNG, Rate Control, tools, etc.).
-
-Following figure summarizes how `Access Mana` and `Consensus Mana` is derived from a transaction:
-
-[![Mana](/img/protocol_specification/mana.png 'Mana')](/img/protocol_specification/mana.png)
-
-The reason for having two separate `Base Mana Vectors` is the fact, that `accessMana` and `consensusMana` can be pledged
-to different nodes.
-
-The exact mathematical formulas, and their respective parameters will be determined later.
-
-## Challenges
-
-### Dependency on Tangle
-
-Since mana is awarded to nodes submitting value transfers, the tangle is needed as input for mana calculation.
-Each node calculates mana locally, therefore, it is essential to determine when to consider transactions in the
-tangle "final enough" (so that they will not be orphaned).
-
-When a transaction is `confirmed`, it is a sufficient indicator that it will not be orphaned. However, in current
-GoShimmer implementation, confirmation is not yet a properly defined concept. This issue will be addressed in a separate
-module.
-
-The Mana module assumes, that the (value) tangle's `TransactionConfirmed` event is the trigger condition to update the
-mana state machine (base mana vectors for access and consensus mana). Once the concept of transaction finality is
-introduced for the tangle, the trigger conditions for access and consensus mana calculations can be adjusted.
-
-### Transaction Layout
-
-A new field should be added to `Transaction` denoting `PledgedNodeID` for `Access Mana` and `Consensus Mana`.
-This is also beneficial to implement mana donation feature, that is, to donate the mana of a certain transaction to an
-arbitrary node.
-
-## Limitations
-
-The first implementation of mana in GoShimmer will:
-
-- not have voted timestamps on value transactions,
-- lack proper `TransactionConfirmed` mechanism to trigger mana update,
-- lack integration into rate control/autopeering/etc.
-
-## Detailed Design
-
-In this section, detailed GoShimmer implementation design considerations will be outlined about the mana module.
-In short, changes can be classified into 3 categories:
-
-1. Transaction related changes,
-2. Mana module functionality,
-3. and related tools/utilities, such as API, visualization, analytics.
-
-### Transaction
-
-As described above, 3 new fields will be added to the transaction layout:
-
-1. `Timestamp` time.time
-2. `AccessManaNodeID` []bytes
-3. `ConsensusManaNodeID` []bytes
-
-By adding these fields to the signed transaction, `valuetransfers/packages/transaction` should be modified.
-
-- The three new fields should be added to the transaction essence.
-- Marshalling and unmarshalling of a transaction should be modified.
-- For calculating `Base Mana 1` values, `mana module` should be able to derive from a transaction the nodes which received
- pledged `Base Mana 1` as a consequence of the consumed inputs of the transaction. Therefore, a lookup function should
- be exposed from the value tangle that given an `input`, returns the `pledgedNodeID` of the transaction creating the input.
-
-`Timestamp` is part of the signed transaction, therefore, a client sending a transaction to the node should already
-define it. In this case, this `Timestamp` will not be the same as the timestamp of the block containing the
-transaction and value payload, since the block is created on the node.
-A solution to this is that upon receiving a `transaction` from a client, the node checks if the timestamp is within
-a predefined time window, for example `t_current - delta`, where `delta` could be couple seconds. If true, then the node
-constructs the block, which must have a greater timestamp, than the transaction.
-
-`AccessManaNodeID` and `ConsensusManaNodeID` are also part of the signed transaction, so a client should fill them out.
-Node owners are free to choose to whom they pledge mana to with the transaction, so there should be a mechanism that
-lets the client know, what `AccessManaNodeID` and `ConsensusManaNodeID` are allowed. This could be a new API endpoint
-that works like this:
-
-1. Client asks node what nodeIDs can be included for pledging a certain type (access, consensus) mana.
-2. Node answers with either:
-
-- Don't care. Any node IDs are valid.
-- List of nodeIDs that are allowed for each type.
-
-3. If a client sends back the transaction with invalid or empty mana fields, the transaction is considered invalid.
-
-This way node owners can decide who their transactions are pledging mana to. It could be only their node, or they could
-provide mana pledging as a service. They could delegate access mana to others, but hold own to consensus mana, or the
-other way around.
-
-### Initialization
-
-Mana state machine is an extension of the ledger state, hence its calculation depends on the ledger state perception
-of the node. Snapshotting is the mechanism that saves the ledger states and prunes unnecessary transactions. Together
-with the ledger state, base mana vectors are also saved, since a certain ledger state reflects a certain mana distribution
-in the network. In future, when snapshotting is implemented in GoShimmer, nodes joining the network will be able to query
-for snapshot files that will contain initial base mana vectors as well.
-
-Until this functionality is implemented, mana calculation solely relies on transactions getting confirmed. That is, when
-a node joins the network and starts gathering blocks and transactions from peers, it builds its own ledger state through
-solidification process. Essentially, the node requests all blocks down to the genesis from the current tips of its neighbors.
-Once the genesis is found, blocks are solidified bottom up. For the value tangle, this means that for each solidified
-and liked transaction, `TransactionConfirmed` event is triggered, updating the base mana vectors.
-
-In case of a large database, initial synching and solidification is a computationally heavy task due to the sheer amount
-of blocks in the tangle. Mana calculation only adds to this burden. It will be determined through testing if additional
-"weight lifting" mechanism is needed (for example delaying mana calculation).
-
-In the GoShimmer test network, all funds are initially held by the faucet node, therefore all mana present at bootstrap belong
-to this node. Whenever a transaction is requested from the faucet, it pledges mana to the requesting node, helping other
-nodes to increase their mana.
-
-### Mana Package
-
-The functionality of the mana module should be implemented in a `mana` package. Then, a `mana plugin` can use the package
-structs and methods to connect the dots, for example execute `BookMana` when `TransactionConfirmed` event is triggered
-in the value tangle.
-
-`BaseMana` is a struct that holds the different mana values for a given node.
-Note that except for `Base Mana 1` calculation, we need the time when `BaseMana` values were updated, so we store it in the struct:
-
-```go
-type BaseMana struct {
- BaseMana1 float
- EffectiveBaseMana1 float
- BaseMana2 float
- EffectiveBaseMana2 float
- LastUpdated time
-}
-```
-
-`BaseManaVector` is a data structure that maps `nodeID`s to `BaseMana`. It also has a `Type` that denotes the type
-of mana this vector deals with (Access, Consensus, etc.).
-
-```go
-type BaseManaVector struct {
- vector map[identity.ID]*BaseMana
- vectorType Type
-}
-```
-
-#### Methods
-
-`BaseManaVector` should have the following methods:
-
-- `BookMana(transaction)`: Book mana of a transaction. Trigger `ManaBooked` event. Note, that this method updates
- `BaseMana` with respect to time and to new `Base Mana 1` and `Base Mana 2` values.
-- `GetWeightedMana(nodeID, weight) mana`: Return `weight` \* `Effective Base Mana 1` + (1-`weight`)+`Effective Base Mana 2`.
- `weight` is a number in [0,1] interval. Notice, that `weight` = 1 results in only returning `Effective Base Mana 1`,
- and the other way around. Note, that this method also updates `BaseMana` of the node with respect to time.
-- `GetMana(nodeID) mana`: Return 0.5*`Effective Base Mana 1` + 0.5*`Effective Base Mana 2` of a particular node. Note, that
- this method also updates `BaseMana` of the node with respect to time.
-- `update(nodeID, time)`: update `Base Mana 2`, `Effective Base Mana 1` and `Effective Base Mana 2` of a node with respect `time`.
-- `updateAll(time)`: update `Base Mana 2`, `Effective Base Mana 1` and `Effective Base Mana 2` of all nodes with respect to `time`.
-
-`BaseMana` should have the following methods:
-
-- `pledgeAndUpdate(transaction)`: update `BaseMana` fields and pledge mana with respect to `transaction`.
-- `revokeBaseMana1(amount, time)`: update `BaseMana` values with respect to `time` and revoke `amount` `BaseMana1`.
-- `update(time)`: update all `BaseMana` fields with respect to `time`.
-- `updateEBM1(time)`: update `Effective Base Mana 1` wrt to `time`.
-- `updateBM2(time)`: update `Base Mana 2` wrt to `time`.
-- `updateEBM2(time)`: update `Effective Base Mana 2` wrt to `time`.
-
-#### Base Mana Calculation
-
-There are two cases when the values within `Base Mana Vector` are updated:
-
-1. A confirmed transaction pledges mana.
-2. Any module accesses the `Base Mana Vector`, and hence its values are updated with respect to `access time`.
-
-First, let's explore the former.
-
-##### A confirmed transaction pledges mana
-
-For simplicity, we only describe mana calculation for one of the Base Mana Vectors, namely, the Base Access Mana Vector.
-
-First, a `TransactionConfirmed` event is triggered, therefore `BaseManaVector.BookMana(transaction)` is executed:
-
-```go
-func (bmv *BaseManaVector) BookMana(tx *transaction) {
- pledgedNodeID := tx.accessMana
-
- for input := range tx.inputs {
- // search for the nodeID that the input's tx pledged its mana to
- inputNodeID := loadPledgedNodeIDFromInput(input)
- // save it for proper event trigger
- oldMana := bmv[inputNodeID]
- // revoke BM1
- bmv[inputNodeID].revokeBaseMana1(input.balance, tx.timestamp)
-
- // trigger events
- Events.ManaRevoked.Trigger(&ManaRevokedEvent{inputNodeID, input.balance, tx.timestamp, AccessManaType})
- Events.ManaUpdated.Tigger(&ManaUpdatedEvent{inputNodeID, oldMana, bmv[inputNodeID], AccessManaType})
- }
-
- // save it for proper event trigger
- oldMana := bmv[pledgedNodeID]
- // actually pledge and update
- bm1Pledged, bm2Pledged := bmv[pledgedNodeID].pledgeAndUpdate(tx)
-
- // trigger events
- Events.ManaPledged.Trigger(&ManaPledgedEvent{pledgedNodeID, bm1Pledged, bm2Pledged, tx.timestamp, AccessManaType})
- Events.ManaUpdated.Trigger(&ManaUpdatedEvent{pledgedNodeID, oldMana, bmv[pledgedNodeID], AccessManaType})
-}
-```
-
-`Base Mana 1` is being revoked from the nodes that pledged mana for inputs that the current transaction consumes.
-Then, the appropriate node is located in `Base Mana Vector`, and mana is pledged to its `BaseMana`.
-`Events` are essential to study what happens within the module from the outside.
-
-Note, that `revokeBaseMana1` accesses the mana entry of the nodes within `Base Mana Vector`, therefore all values are
-updated with respect to `t`. Notice the two conflicts after the condition. When `Base Mana` values had been updated before
-the transaction's timestamp, a regular update is carried out. However, if `t` is older, than the transaction timestamp,
-an update in the "past" is carried out and values are updated up to `LastUpdated`.
-
-```go
-func (bm *BaseMana) revokeBaseMana1(amount float64, t time.Time) {
- if t.After(bm.LastUpdated) {
- // regular update
- n := t.Sub(bm.LastUpdated)
- // first, update EBM1, BM2 and EBM2 until `t`
- bm.updateEBM1(n)
- bm.updateBM2(n)
- bm.updateEBM2(n)
-
- bm.LastUpdated = t
- // revoke BM1 at `t`
- bm.BaseMana1 -= amount
- } else {
- // update in past
- n := bm.LastUpdated.Sub(t)
- // revoke BM1 at `t`
- bm.BaseMana1 -= amount
- // update EBM1 to `bm.LastUpdated`
- bm.EffectiveBaseMana1 -= amount*(1-math.Pow(math.E,-EMA_coeff_1*n))
- }
-}
-```
-
-The same regular and past update scheme is applied to pledging mana too:
-
-```go
-func (bm *BaseMana) pledgeAndUpdate(tx *transaction) (bm1Pledged int, bm2Pledged int){
- t := tx.timestamp
- bm1Pledged = sum_balance(tx.inputs)
-
- if t.After(bm.LastUpdated) {
- // regular update
- n := t.Sub(bm.LastUpdated)
- // first, update EBM1, BM2 and EBM2 until `t`
- bm.updateEBM1(n)
- bm.updateBM2(n)
- bm.updateEBM2(n)
- bm.LastUpdated = t
- bm.BaseMana1 += bm1Pledged
- // pending mana awarded, need to see how long funds sat
- for input := range tx.inputs {
- // search for the timestamp of the UTXO that generated the input
- t_inp := LoadTxTimestampFromOutputID(input)
- bm2Add := input.balance * (1 - math.Pow(math.E, -decay*(t-t_inp)))
- bm.BaseMana2 += bm2Add
- bm2Pledged += bm2Add
- }
- } else {
- // past update
- n := bm.LastUpdated.Sub(t)
- // update BM1 and BM2 at `t`
- bm.BaseMana1 += bm1Pledged
- oldMana2 = bm.BaseMana2
- for input := range tx.inputs {
- // search for the timestamp of the UTXO that generated the input
- t_inp := LoadTxTimestampFromOutputID(input)
- bm2Add := input.balance * (1-math.Pow( math.E,-decay*(t-t_inp) ) ) * math.Pow(math.E, -decay*n)
- bm.BaseMana2 += bm2Add
- bm2Pledged += bm2Add
- }
- // update EBM1 and EBM2 to `bm.LastUpdated`
- bm.EffectiveBaseMana1 += amount*(1-math.Pow(math.E,-EMA_coeff_1*n))
- if EMA_coeff_2 != decay {
- bm.EffectiveBaseMana2 += (bm.BaseMana2 - oldMana2) *EMA_coeff_2*(math.Pow(math.E,-decay*n)-
- math.Pow(math.E,-EMA_coeff_2*n))/(EMA_coeff_2-decay) / math.Pow(math.E, -decay*n)
- } else {
- bm.EffectiveBaseMana2 += (bm.BaseMana2 - oldMana2) * decay * n
- }
-}
- return bm1Pledged, bm2Pledged
-}
-```
-
-Notice, that in case of `EMA_coeff_2 = decay`, a simplified formula can be used to calculate `EffectiveBaseMana2`.
-The same approach is applied in `updateEBM2()`.
-
-```go
-func (bm *BaseMana) updateEBM1(n time.Duration) {
- bm.EffectiveBaseMana1 = math.Pow(math.E, -EMA_coeff_1 * n) * bm.EffectiveBaseMana1 +
- (1-math.Pow(math.E, -EMA_coeff_1 * n)) * bm.BaseMana1
-}
-```
-
-```go
-func (bm *BaseMana) updateBM2(n time.Duration) {
- bm.BaseMana2 = bm.BaseMana2 * math.Pow(math.E, -decay*n)
-}
-```
-
-```go
-func (bm *BaseMana) updateEBM2(n time.Duration) {
- if EMA_coeff_2 != decay {
- bm.EffectiveBaseMana2 = math.Pow(math.E, -emaCoeff2 * n) * bm.EffectiveBaseMana2 +
- (math.Pow(math.E, -decay * n) - math.Pow(math.E, -EMA_coeff_2 * n)) /
- (EMA_coeff_2 - decay) * EMA_coeff_2 / math.Pow(math.E, -decay * n)*bm.BaseMana2
- } else {
- bm.EffectiveBaseMana2 = math.Pow(math.E, -decay * n)*bm.EffectiveBaseMana2 +
- decay * n * bm.BaseMana2
- }
-}
-```
-
-##### Any module accesses the Base Mana Vector
-
-In this case, the accessed entries within `Base Mana Vector` are updated via the method:
-
-```go
-func (bmv *BaseManaVector) update(nodeID ID, t time.Time ) {
- oldMana := bmv[nodeID]
- bmv[nodeID].update(t)
- Events.ManaUpdated.Trigger(&ManaUpdatedEvent{nodeID, oldMana, bmv[nodeID], AccessManaType})
-}
-```
-
-where `t` is the access time.
-
-```go
-func (bm *BaseMana) update(t time.Time ) {
- n := t - bm.LastUpdated
- bm.updateEBM1(n)
- bm.updateBM2(n)
- bm.updateEBM2(n)
-
- bm.LastUpdated = t
-}
-```
-
-#### Events
-
-The mana package should have the following events:
-
-- `Pledged` when mana (`BM1` and `BM2`) was pledged for a node due to new transactions being confirmed.
-
-```go
-type PledgedEvent struct {
- NodeID []bytes
- AmountBM1 int
- AmountBM2 int
- Time time.Time
- Type ManaType // access or consensus
-}
-```
-
-- `Revoked` when mana (`BM1`) was revoked from a node.
-
-```go
-type RevokedEvent struct {
- NodeID []bytes
- AmountBM1 int
- Time time.Time
- Type ManaType // access or consensus
-}
-```
-
-- `Updated` when mana was updated for a node due to it being accessed.
-
-```go
-type UpdatedEvent struct {
- NodeID []bytes
- OldMana BaseMana
- NewMana BaseMana
- Type ManaType // access or consensus
-}
-```
-
-#### Testing
-
-- Write unit tests for all methods.
-- Test all events and if they are correctly triggered.
-- Benchmark calculations in tests to see how heavy it is to calculate EMAs and decays.
-
-### Mana Plugin
-
-The `mana plugin` is responsible for:
-
-- calculating mana from value transactions,
-- keeping a log of the different mana values of all nodes,
-- updating mana values,
-- responding to mana related queries from other modules,
-- saving base mana vectors in database when shutting down the node,
-- trying to load base mana vectors from database when starting the node.
-
-The proposed mana plugin should keep track of the different mana values of nodes and handle calculation
-updates. Mana values are mapped to `nodeID`s and stored in a `map` data structure. The vector also stores information on
-what `Type` of mana it handles.
-
-```go
-type BaseManaVector struct {
- vector map[identity.ID]*BaseMana
- vectorType Type
-}
-```
-
-`Access Mana` and `Consensus Mana` should have their own respective `BaseManaVector`.
-
-```go
-accessManaVector := BaseManaVector{vectorType: AccesMana}
-consensusManaVector := BaseManaVector{vectorType: ConsensusMana}
-```
-
-In the future, it should be possible to combine `Effective Base Mana 1` and `Effective Base Mana 2` from a `BaseManaVector`
-in arbitrary proportions to arrive at a final mana value that other modules use. The `mana package` has these methods
-in place. Additionally, a parameter could be passed to the `getMana` type of exposed functions to set the proportions.
-
-#### Methods
-
-The mana plugin should expose utility functions to other modules:
-
-- `GetHighestManaNodes(type, n) [n]NodeIdManaTuple`: return the `n` highest `type` mana nodes (`nodeID`,`manaValue`) in
- ascending order. Should also update their mana value.
-- `GetManaMap(type) map[nodeID]manaValue`: return `type` mana perception of the node.
-- `GetAccessMana(nodeID) mana`: access `Base Mana Vector` of `Access Mana`, update its values with respect to time,
- and return the amount of `Access Mana` (either `Effective Base Mana 1`, `Effective Base Mana 2`, or some combination
- of the two). Trigger `ManaUpdated` event.
-- `GetConsensusMana(nodeID) mana`: access `Base Mana Vector` of `Consensus Mana`, update its values with respect to time,
- and returns the amount of `Consensus Mana` (either `Effective Base Mana 1`, `Effective Base Mana 2`, or some combination
- of the two). Trigger `ManaUpdated` event.
-- `GetNeighborsMana(type)`: returns the `type` mana of the nodes neighbors
-- `GetAllManaVectors()` Obtaining the full mana maps for comparison with the perception of other nodes.
-- `GetWeightedRandomNodes(n)`: returns a weighted random selection of `n` nodes. `Consensus Mana` is used for the weights.
-- Obtaining a list of currently known peers + their mana, sorted. Useful for knowing which high mana nodes are online.
-- `OverrideMana(nodeID, baseManaVector)`: Sets the nodes mana to a specific value. Can be useful for debugging, setting faucet mana, initialization, etc.. Triggers `ManaUpdated`
-
-Such utility functions could be used for example to visualize mana distribution in node dashboard, or send neighbor
-mana data to the analysis server for further processing.
-
-#### Booking Mana
-
-Mana is booked when a transaction is confirmed.
-
-```go
-on TransactionConfirmed (tx):
- bookAccessMana()
- bookConsensusMana()
-```
-
-#### Synchronization and Mana Calculation
-
-The mana plugin is responsible to determine when to start calculating mana locally.
-Since mana state is an extension to ledger state, it can only depict realistic mana values once the node is in sync.
-During syncing, ledger state is constructed from blocks coming from neighbors as described further above.
-
-In this first iteration, mana plugin relies on `TransactionConfirmed` event of the value transfers plugin, and has no
-explicit rules on when to start and stop mana calculation.
-
-In future, initial mana state (together with the initial ledger state) will be derived from a snapshot file.
-
-### Mana Toolkit
-
-In this section, all tools and utility functions for mana will be outlined.
-
-#### Mana Related API endpoints
-
-- `/info`: Add own mana in node info response.
-- `value/allowedManaPledge`: Endpoint that clients can query to determine which nodeIDs are allowed as part of
- `accessMana` and `consensusMana` fields in a transaction.
-- `value/sendTransactionByJson`: Add `accessMana`, `consensusMana` and `timestamp` fields to the JSON request.
-
-Add a new `mana` endpoint route:
-
-- `/mana`: Return access and consensus mana of the node.
-- `/mana/all`: Return whole mana map (mana perception of the node).
-- `/mana/access/nhighest`: Return `n` highest access mana holder `nodeIDs` and their access mana values.
-- `/mana/consensus/nhighest`: Return `n` highest consensus mana holder `nodeIDs` and their consensus mana values.
-- `/mana/percentile`: Return the top percentile the node belongs to relative to the network. For example, if there are 100 nodes in the
- network owning mana, and a node is the 13th richest, it means that is part of the top 13% of mana holders, but not the
- top 12%.
-
-#### Metrics collection
-
-To study the mana module, following metrics could be gathered:
-
-- Amount of consensus and access mana present in the network. (amount varies because of `Base Mana 2`).
-- Amount of mana each node holds.
-- Number of (and amount of mana) a node was pledged with mana in the last `t` interval.
-- Mana development of a particular node over time.
-- Mana percentile development of a node over time.
-- Average pledge amount of a node. (how much mana it receives on average with one pledge)
-- Mean and median mana holdings of nodes in the network. Shows how even mana distribution is.
-- Average mana of neighbors.
-
-#### Visualization
-
-Each node calculates mana locally, not only for themselves, but for all nodes in the network that it knows. As a result,
-mana perception of nodes may not be exactly the same at all times (due to network delay, processing capabilities), but
-should converge to the same state. A big question for visualization is which node's viewpoint to base mana visualization on?
-
-When running a node, operators will be shown the mana perception of their own node, but it also makes sense to
-display the perception of high mana nodes as the global mana perception. First, let's look at how local mana perception
-is visualized for a node:
-
-##### Local Perception
-
-There are two ways to visualize mana in GoShimmer:
-
-1. Node Local Dashboard
-2. Grafana Dashboard
-
-While `Local Dashboard` gives flexibility in what and how to visualize, `Grafana Dashboard` is better at storing historic
-data but can only visualize time series. Therefore, both of these ways will be utilized, depending on which suits the best.
-
-`Local Dashboard` visualization:
-
-- Histogram of mana distribution within the network.
-- List of `n` richest mana nodes, ordered.
-- Mana rank of node.
-
-`Grafana Dashboard` visualization:
-
-- Mana of a particular node with respect to time.
-- Amount of mana in the network.
-- Average pledge amount of a node.
-- Mean and median mana holdings of nodes.
-- Mana rank of the node over time.
-- Average mana of neighbors.
-
-##### Global Perception
-
-Additionally, the GoShimmer Analyzer (analysis server) could be updated:
-
-- Autopeering node graph, where size of a node corresponds to its mana value.
-- Some previously described metrics could be visualized here as well, to give the chance to people without
- a node to take a look. As an input, a high mana node's perception should be used.
diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/markers.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/markers.md
deleted file mode 100644
index cf2f6ac2817..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/markers.md
+++ /dev/null
@@ -1,111 +0,0 @@
----
-description: Markers is a tool to efficiently estimate the approval weight of a block and that reduces the portion of the Tangle that needs to be traversed, and which finally results in the confirmation state.
-image: /img/protocol_specification/example_1.png
-keywords:
- - approval weight
- - marker
- - block
- - sequence
- - future marker
- - new marker
- - part marker
- - past cone
----
-
-# Markers
-
-## Summary
-
-Operations that involve traversing the Tangle are very performance intensive and, thus, we need to minimize the amount of traversing to keep algorithms fast. Markers are a tool to infer structural knowledge about the Tangle without actually traversing it.
-
-:::info Note
-
-**Markers** are not a core module of the Coordicide project.
-
-:::
-
-## Motivation
-
-_Markers_ are a tool to infer knowledge about the structure of the Tangle, therefore, we use them to keep algorithms fast. Specifically, markers are used for:
-
-- past/future cone membership;
-- approximate approval weight of any block;
-- tagging sections of the Tangle (e.g., conflicts) without having to traverse each block individually.
-
-## Definitions
-
-Let's define the terms related to markers:
-
-- **Sequence:** A sequence is a chain of markers where each progressing marker contains all preceding markers of the sequence in its past cone.
-- **Sequence Identifier (`SID`):** A Sequence Identifier is the unique identifier of a Sequence.
-- **Marker Index (`MI`):** A Marker Index is the marker rank in the marker DAG. Throughout the code the marker rank will be called index.
-- **marker:** A marker is a pair of numbers: `SID` and `MI` associated to a given block. Markers carrying the same `SID` belong to the same Sequence.
-- **future marker (`FM`):** A future marker of a block is the first marker in its future cone from different sequences.
-- **past marker (`PM`):** A past marker of a block is a marker in its past cone (can be multiple markers of distinct sequences). For a given sequence it is set to the newest past marker of its parents, that is the one that has the largest `MI`. The past marker of a marker is set to itself.
-
-## Design
-
-On a high level, markers provide structural knowledge of the Tangle and each individual block without the need to traverse (aka walking the Tangle). Markers are a form of meta-information (for each block) that each node locally creates when processing blocks. They can be seen as specific, uniquely tainted blocks that, taken together, again build a DAG within the Tangle. We can then utilize this marker DAG to determine structural details.
-
-![](https://i.imgur.com/3x7H68t.png)
-
-The above example shows a Tangle with the red blocks being markers in the same sequence (more details on sequences later). A marker is uniquely identified by `sequenceID,index`, where the index is ever-increasing. Any block can be "selected" as a marker if it fulfills a certain set of rules:
-
-- every n-th block (in the example, each block is tried to be set as a marker)
-- latest marker of sequence is in its past cone.
-
-The markers build a chain/DAG and because of the rules it becomes clear that `marker 0,1` is in the past cone of `marker 0,5`. Since markers represent meta-information for the underlying blocks and each block keeps the latest marker in its past cone as _structural information_, we can infer that `block B` (`FM 0,2`) is in the past cone of `block I` (`PM 0,3`) Similarly, it is evident that `block D` is in the past cone of `block J`.
-
-### Sequences
-
-A sequence is a chain of markers where each progressing marker contains all preceding markers of the sequence in its past cone. However, this very definition entails a problem: what if there are certain parts of the Tangle that are disparate to each other. Assuming only a single sequence, this would mean that a certain part of the Tangle can't get any markers. In turn, certain operations within this part of the Tangle would involve walking.
-
-For this reason, we keep track of the _marker distance_, which signals the distance of blocks in the Tangle in a certain past cone where no marker could be assigned. If this distance gets too big, a new sequence is created as is shown in the example below (marker distance to spawn a new sequence = 3).
-
-![](https://i.imgur.com/Q44XZgk.png)
-
-The example above shows a side chain starting from `block L` to `block P` where it merges back with the "main Tangle". There can be no new marker assigned as none of the `blocks L-O` have the latest marker of `sequence 0` in their past cone. The marker distance grows and eventually a marker is created at `block N`. Following, a marker can be assigned to `block O` and `block P`. The latter is special because it combines two sequences. This is to be expected as disparate parts of the Tangle should be merged eventually. In case a block has markers from multiple sequences in its past cones the following rules apply:
-
-- Assign a marker in the highest sequence if possible. If not possible, try to assign a marker in the next lower sequence.
-- The index is `max(marker1.Index,marker2.Index,...)`
-
-With these rules in mind, it becomes clear why `block P` has the `marker 1,6` and `block R` has `marker 1,7`. In case of `block Q`, no marker can be assigned to `sequence 1`, and, thus, a new marker in `sequence 0` is created.
-
-Always continuing the highest seqeuence should result in smaller sequences being discontinued once disparate parts of the Tangle merge and overall a relatively small number of sequences (optimally just one) is expected to be active at any given moment in time.
-
-### Sequence Graph
-
-The information that markers yield about past and future cone is only valid for any given sequence individually. However, to relate markers of separate sequences, we need to track dependencies between sequences.
-Therefore, sequences build a graph between each other, where relationships between the sequences can be seen.
-
-Each sequence keeps track of **referenced sequences** and **referencing sequences** at a specific marker index so that bidirectional traversing into the future and past are possible from a sequence is possible.
-
-Specifically, in our example there are 3 bidirectional references between `sequence 0` and `sequence 1`.
-Sequence 0:
-
-- `0,1`<->`1,2`
-- `0,5`<->`1,6`
-- `0,6`<->`1,7`
-
-Sequence 1:
-
-- `1,2`<->`0,1`
-- `1,6`<->`0,5`
-- `1,7`<->`0,6`
-
-![](https://i.imgur.com/EhbJohc.png)
-
-## Usage
-
-### Markers Application: Approval Weight Estimation
-
-To approximate the approval weight of a block, we simply retrieve the approval weight of its `FM` list. Since the block is in the past cone of its `FM`s, the approval weight and the finality will be at least the same as its `FM`s. This will of course be a lower bound (which is the “safe” bound), but if the markers are set frequently enough, it should be a good approximation.
-In practice, we propagate the GoF finality to blocks in a marker's past cone until we reach another marker.
-
-For details of managing approval weight of each marker and approval weight calculation thereof please refer to [Approval Weight](consensus_mechanism.md#approval-weight-aw).
-
-### Conflict Mapping
-
-Conflicts are introduced to the Tangle when double spends occur and are carried forward (inherited) by blocks until a conflict is resolved (merge to master). As such, each block needs to carry conflict information and if a conflict arises deep within the Tangle, each block would need to be traversed individually, which makes this operation very expensive and thus attackable.
-
-Therefore, we utilize markers to store conflict information for blocks and store only a **difference** of conflicts (subtracted/added) on each block individually. In that way, propagation of conflicts can happen via structural marker information and not every block needs to be updated. When querying conflict information of a block, first all conflicts of the block's past markers are retrieved and then combined with the diff of the block itself to result in the block's overall conflict.
diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/overview.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/overview.md
deleted file mode 100644
index 7fd95fed302..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/overview.md
+++ /dev/null
@@ -1,120 +0,0 @@
----
-description: High-level description of the interaction between components of the currently implemented GoShimmer protocol. The protocol can be divided into three main elements. A P2P overlay network, an immutable data structure, and a consensus mechanism.
-image: /img/protocol_specification/layers.png
-keywords:
- - network layer
- - node
- - block
- - ledger state
- - data flow
- - past cone
- - future cone
- - timestamp
- - opinion setting
- - strong tip
- - tip pool
----
-
-# Components of the Protocol
-
-This section provides a high-level description of the interaction between components of the currently implemented GoShimmer protocol. The protocol can be divided into three main elements: a P2P overlay network, an immutable data structure, and a consensus mechanism. We abstract these three elements into layers, where—similarly to other architectures—upper layers build on the functionality provided by the layers below them. The definition of the different layers is merely about the convenience of creating a clear separation of concerns.
-
-[![Components of the Protocol](/img/protocol_specification/layers.png 'Components of the Protocol')](/img/protocol_specification/layers.png)
-
-## Network Layer
-
-The network is maintained by the network layer modules, which can be characterized as a pure P2P overlay network, meaning that it is a system that runs on top of another network (e.g., the internet), and where all nodes have the same roles and perform the same actions (in contrast to client-server systems). GoShimmer's Network Layer consists of three basic modules: the [peer discovery](autopeering.md#peer-discovery) module (which provides a list of nodes actively using the network), and the [neighbor selection](autopeering.md#neighbor-selection) module (also known as autopeering), which actually selects peers. Finally, the P2P Communication manages a node's neighbors, either selected via [autopeering](autopeering.md) or [manual peering](../../tutorials/manual_peering.md).
-
-## Communication Layer
-
-The communication layer concerns the information propagated through the network layer, which is contained in objects called blocks. This layer forms a DAG with blocks as vertices called the [Tangle](tangle.md): a replicated, shared and distributed data structure that emerges—through a combination of deterministic rules, cooperation, and virtual voting.
-Since nodes have finite capabilities, the number of blocks that the network can process is limited. Thus, the network might become overloaded, either simply because of honest heavy usage or because of malicious (spam) attacks. To protect the network from halting or even from getting inconsistent, the rate control (currently a static PoW) and [congestion control](congestion_control.md) modules control when and how many blocks can be gossiped.
-
-## (Decentralized) Application Layer
-
-On top of the communication layer lives the application layer. Anybody can develop applications that run on this layer, and nodes can choose which applications to run. Of course, these applications can also be dependent on each other.
-There are several core applications that must be run by all nodes, as the value transfer applications, which maintains the [ledger state](ledgerstate.md) (including advanced [output types](advanced_outputs.md)), and a quantity called [Mana](mana.md), that serves as a scarce resource as our Sybil protection mechanism.
-Additionally, all nodes must run what we call the consensus applications, which regulate timestamps in the blocks and resolve conflicts.
-The consensus mechanism implemented in GoShimmer is leaderless and consists out of multiple components:
-
-1. [Approval Weight](consensus_mechanism.md#approval-weight-aw) is an objective measure to determine the grade of finality of blocks and conflicts based on [active cMana](consensus_mechanism.md#Active-cMana).
-2. The [Modular Conflict Selection Function](consensus_mechanism.md#modular-conflict-selection-function) is an abstraction on how a node sets an initial opinion on conflicts based on the .
-
-## Data Flow - Overview
-
-The diagram below represents the interaction between the different modules in the protocol ([event driven](../../implementation_design/event_driven_model.md)). Each blue box represents a component of the [Tangle codebase](https://github.com/iotaledger/goshimmer/tree/develop/packages/tangle), which has events (in yellow boxes) that belong to it. Those events will trigger methods (the green boxes), that can also trigger other methods. This triggering is represented by the arrows in the diagram. Finally, the purple boxes represent events that do not belong to the component that triggered them.
-
-As an example, take the Parser component. The function `ProcessGossipBlock` will trigger the method `Parse`, which is the only entry to the component. There are three possible outcomes to the `Parser`: triggering a `ParsingFailed` event, a `BlockRejected` event, or a `BlockParsed` event. In the last case, the event will trigger the `StoreBlock` method (which is the entry to the Storage component), whereas the first two events do not trigger any other component.
-
-[![Data Flow - Overview](/img/protocol_specification/data-flow.png 'Data Flow - Overview')](/img/protocol_specification/data-flow.png)
-
-We call this the data flow, i.e., the [life cycle of a block](../protocol.md), from block reception (meaning that we focus here on the point of view of a node receiving a block issued by another node) up until acceptance in the Tangle. Notice that any block, either created locally by the node or received from a neighbor needs to pass through the data flow.
-
-### Block Factory
-
-The IssuePayload function creates a valid payload which is provided to the `CreateBlock` method, along with a set of parents chosen with the Tip Selection Algorithm. Then, the Block Factory component is responsible to find a nonce compatible with the PoW requirements defined by the rate control module. Finally, the block is signed. Notice that the block generation should follow the rates imposed by the rate setter, as defined in [rate setting](congestion_control.md#rate-setting).
-
-### Parser
-
-The first step after the arrival of the block to the block inbox is the parsing, which consists of the following different filtering processes (meaning that the blocks that don't pass these steps will not be stored):
-
-**Bytes filter**:
-
-1. Recently Seen Bytes: it compares the incoming blocks with a pool of recently seen bytes to filter duplicates.
-2. PoW check: it checks if the PoW requirements are met, currently set to the block hash starting with 22 zeroes.
-
-Followed by the bytes filters, the received bytes are parsed into a block and its corresponding payload and [syntactically validated](tangle.md#syntactical-validation). From now on, the filters operate on block objects rather than just bytes.
-
-**Block filter**:
-
-1. Signature check: it checks if the block signature is valid.
-2. [Timestamp Difference Check for transactions](tangle.md#block-timestamp-vs-transaction-timestamp): it checks if the timestamps of the payload, and the block are consistent with each other
-
-### Storage
-
-Only blocks that pass the Parser are stored, along with their metadata. Additionally, new blocks are stored as children of their parents, i.e., a reverse mapping that enables us to walk the Tangle into the future cone of a block.
-
-### Solidifier
-
-[Solidification](tangle.md#Solidification) is the process of requesting missing blocks. In this step, the node checks if all the past cone of the block is known; in the case that the node realizes that a block in the past cone is missing, it sends a request to its neighbors asking for that missing block. This process is recursively repeated until all of a block's past cone up to the genesis (or snapshot) becomes known to the node.
-This way, the protocol enables any node to retrieve the entire block history, even for nodes that have just joined the network.
-
-### Scheduler
-
-The scheduler makes sure that the network as a whole can operate with maximum throughput and minimum delays while providing consistency, fairness (according to aMana), and security. It, therefore, regulates the allowed influx of blocks to the network as a [congestion-control mechanism](congestion_control.md).
-
-### Booker
-
-After scheduling, the block goes to the booker. This step is different between blocks that contain a transaction payload and blocks that do not contain it.
-
-In the case of a non-transaction payload, booking into the Tangle occurs after the conflicting parents conflicts check, i.e., after checking if the parents' conflicts contain sets of (two or more) transactions that belong to the same conflict set. In the case of this check not being successful, the block is marked as `invalid` and not booked.
-
-In the case of a transaction as payload, initially, the following check is done:
-
-1. UTXO check: it checks if the inputs of the transaction were already booked. If the block does not pass this check, the block is not booked. If it passes the check, it goes to the next block of steps.
-2. Balances check: it checks if the sum of the values of the generated outputs equals the sum of the values of the consumed inputs. If the block does not pass this check, the block is marked as `invalid` and not booked. If it passes the check, it goes to the next step.
-3. Unlock conditions: checks if the unlock conditions are valid. If the block does not pass this check, the block is marked as `invalid` and not booked. If it passes the check, it goes to the next step.
-4. Inputs' conflicts validity check: it checks if all the consumed inputs belong to a valid conflict. If the block does not pass this check, the block is marked as `invalid` and not booked. If it passes the check, it goes to the next step.
-
-After the objective checks, the following subjective checks are done:
-
-5. Inputs' conflicts rejection check: it checks if all the consumed inputs belong to a non-rejected conflict. Notice that this is not an objective check, so the node is susceptible (even if with a small probability) to have its opinion about rejected conflicts changed by a reorganization. For that reason, if the block does not pass this check, the block is booked into the Tangle and ledger state (even though the balances are not altered by this block, since it will be booked to a rejected conflict). This is what we call "lazy booking", which is done to avoid huge re-calculations in case of a reorganization of the ledger. If it passes the check, it goes to the next step.
-6. Double spend check: it checks if any of the inputs is conflicting with a transaction that was already confirmed. As in the last step, this check is not objective and, thus, if the block does not pass this check, it is lazy booked into the Tangle and ledger state, into an invalid conflict. If it passes the check, it goes to the next step.
-
-At this point, the missing steps are the most computationally expensive:
-
-7. Inputs' conflicting conflicts check: it checks if the conflicts of the inputs are conflicting. As in the last step, if the block does not pass this check, the block is marked as `invalid` and not booked. If it passes the check, it goes to the next step.
-8. Conflict check: it checks if the inputs are conflicting with an unconfirmed transaction. In this step, the conflict to which the block belongs is computed. In both cases (passing the check or not), the transaction is booked into the ledger state and the block is booked into the Tangle, but its conflict ID will be different depending on the outcome of the check.
-
-[![Booker](/img/protocol_specification/booker.png 'Booker')](/img/protocol_specification/booker.png)
-
-Finally, after a block is booked, it might become a [marker](markers.md) (depending on the marker policy) and can be gossiped.
-
-### Consensus Mechanism
-
-A detailed description can be found [here](consensus_mechanism.md).
-
-### Tip Manager
-
-The first check done in the tip manager is the eligibility check (i.e., subjective timestamp is ok), after passing it, a block is said to be `eligible` for tip selection (otherwise, it's `not eligible`).
-If a block is eligible for [tip selection](tangle.md#tsa) and its payload is `liked`, along with all its weak past cone, the block is added to the strong tip pool and its parents are removed from the strong tip pool. If a block is eligible for tip selection, its payload is `liked` but its conflict is not liked it is added to the weak tip pool.
diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/tangle.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/tangle.md
deleted file mode 100644
index ee66e56c518..00000000000
--- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/tangle.md
+++ /dev/null
@@ -1,520 +0,0 @@
----
-description: The Tangle represents a growing partially-ordered set of blocks, linked with each other through cryptographic primitives, and replicated to all nodes in the peer-to-peer network. It enables the ledger state (i.e., the UTXO-DAG formed by transactions contained in blocks), and the possibility to store data.
-image: /img/protocol_specification/tangle.png
-keywords:
- - block
- - strong parents
- - node
- - transactions
- - level of knowledge
- - cone
- - past
- - future
- - strong block
- - weak block
- - approval weight
----
-
-# Tangle
-
-## Data Types
-
-| Name | Description |
-| ------------ | -------------------------------------------------------------------------------------------------------------- |
-| uint8 | An unsigned 8 bit integer encoded in Little Endian. |
-| uint16 | An unsigned 16 bit integer encoded in Little Endian. |
-| uint32 | An unsigned 32 bit integer encoded in Little Endian. |
-| uint64 | An unsigned 64 bit integer encoded in Little Endian. |
-| ByteArray[N] | A static size array of size N. |
-| ByteArray | A dynamically sized array. A uint32 denotes its length. |
-| string | A dynamically sized array of an UTF-8 encoded string. A uint16 denotes its length. |
-| time | Unix time in nanoseconds stored as `int64`, i.e., the number of nanoseconds elapsed since January 1, 1970 UTC. |
-
-## Subschema Notation
-
-| Name | Description |
-| :------------- | :-------------------------------------------------------- |
-| oneOf | One of the listed subschemas. |
-| optOneOf | Optionally one of the listed subschemas. |
-| anyOf | Any (one or more) of the listed subschemas. |
-| `between(x,y)` | Between (but including) x and y of the listed subschemas. |
-
-## Parameters
-
-- `MAX_MESSAGE_SIZE=64 KB` The maximum allowed block size.
-- `MAX_PAYLOAD_SIZE=65157 B` The maximum allowed payload size.
-- `MIN_STRONG_PARENTS=1` The minimum amount of strong parents a block needs to reference.
-- `MAX_PARENTS=8` The maximum amount of parents a block can reference.
-
-## General Concept
-
-[![The Tangle](/img/protocol_specification/tangle.png)](/img/protocol_specification/tangle.png)
-
-The Tangle represents a growing partially-ordered set of blocks, linked with each other through cryptographic primitives, and replicated to all nodes in the peer-to-peer network. The Tangle enables the ledger state (i.e., the UTXO-DAG formed by transactions contained in blocks), and the possibility to store data.
-
-### Terminology
-
-- **Genesis**: The genesis block is used to bootstrap the Tangle and creates the entire token supply and no other tokens will ever be created. It is the first block and does not have parents. It is marked as solid, eligible and confirmed.
-- **Past cone**: All blocks that are directly or indirectly referenced by a block are called its past cone.
-- **Future cone**: All blocks that directly or indirectly reference a block are called its future cone.
-- **Solidity**: A block is marked as solid if its entire past cone until the Genesis (or the latest snapshot) is known.
-- **Parents**: A block directly references between 1-8 previous blocks that we call its **parents**. A parent can be either **strong** or **weak** (see [approval switch](#orphanage--approval-switch)).
-- **Children**: Parents are approved by their referencing blocks called **children**. It is thus a reverse mapping of parents. As in the parents' definition, an child might be either **strong** or **weak**.
-- **Conflict**: A version of the ledger that temporarily coexists with other versions, each spawned by conflicting transactions.
-
-## Blocks
-
-Blocks are created and signed by nodes. Next to several fields of metadata, they carry a **payload**. The maximum block size is `MAX_MESSAGE_SIZE`.
-
-### Block ID
-
-BLAKE2b-256 hash of the byte contents of the block. It should be used by the nodes to index the blocks and by external APIs.
-
-### Block structure
-
-Name | -Type | -Description | -||||||||||||||||||||||||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Version | -uint8 | -The block version. The schema specified in this RFC is for version 1 only. | -||||||||||||||||||||||||||||||||||||||||||||||||
Parents blocks count | -uint8 | -The amount of parents block preceding the current block. | -||||||||||||||||||||||||||||||||||||||||||||||||
Parents Blocks anyOf |
-
-
-
- Strong Parents Block-- Defines a parents block containing strong parents references. --
-
- Weak Parents Block-- Defines a parents block containing weak parents references. --
-
- Dislike Parents Block-- Defines a parents block containing dislike parents references. --
-
- Like Parents Block-- Defines a parents block containing like parents references. --
|
- |||||||||||||||||||||||||||||||||||||||||||||||||
Issuer public key (Ed25519) | -ByteArray[32] | -The public key of the node issuing the block. | -||||||||||||||||||||||||||||||||||||||||||||||||
Issuing time | -time | -The time the block was issued. | -||||||||||||||||||||||||||||||||||||||||||||||||
Sequence number | -uint64 | -The always increasing number of issued blocks of the issuing node. | -||||||||||||||||||||||||||||||||||||||||||||||||
Payload length | -uint32 | -The length of the Payload. Since its type may be unknown to the node, it must be declared in advance. 0 length means no payload will be attached. | -||||||||||||||||||||||||||||||||||||||||||||||||
- Payload - | -
-
-
- Generic Payload-- An outline of a general payload --
|
- |||||||||||||||||||||||||||||||||||||||||||||||||
Nonce | -uint64 | -The nonce which lets this block fulfill the adaptive Proof-of-Work requirement. | -||||||||||||||||||||||||||||||||||||||||||||||||
Signature (Ed25519) | -ByteArray[64] | -Signature of the issuing node's private key signing the entire block bytes. | -