diff --git a/articleRedirects.js b/articleRedirects.js index 813d2965e9d..f2efc146abf 100644 --- a/articleRedirects.js +++ b/articleRedirects.js @@ -480,10 +480,6 @@ exports.articleRedirects = [ from: '/shimmer/smart-contracts/contribute', to: '/learn/smart-contracts/introduction', }, - { - from: '/shimmer/smart-contracts/guide/example_projects/fair_roulette', - to: '/wasp-wasm/tutorials/fair_roulette', - }, { from: '/shimmer/smart-contracts/metrics', to: '/wasp/metrics', diff --git a/cli/package.json b/cli/package.json index a6d1a14ffb3..b6819d3d94d 100644 --- a/cli/package.json +++ b/cli/package.json @@ -22,7 +22,7 @@ }, "dependencies": { "@babel/generator": "^7.21.5", - "@babel/parser": "^7.21.2", + "@babel/parser": "^7.23.0", "@babel/types": "^7.21.5", "@iota-wiki/core": "workspace:^", "@yarnpkg/shell": "^3.2.0", diff --git a/common/community/research/iota-devnet-wallet.md b/common/community/research/iota-devnet-wallet.md deleted file mode 100644 index 4210a2a8358..00000000000 --- a/common/community/research/iota-devnet-wallet.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: IOTA 2.0 DevNet Wallet -description: A Wallet for the current IOTA 2.0 Development - and Testnet. Offers - a lot of Functions that will become available in the IOTA 2.0 Mainnet. No real - IOTA Tokens are used here. ---- - -# IOTA 2.0 DevNet Wallet - -With a wallet for the IOTA 2.0 DevNet you can request tokens from the faucet, transfer them and create or manage NFTs and digital assets. Currently, there exist two wallets for the IOTA 2.0 DevNet. A Command Line Wallet (cli-wallet) and a GUI Wallet (IOTA 2.0 DevNet GUI Wallet) - -## cli-wallet - -The cli wallet is the most feature complete wallet. You can find a guide for it [here](/goshimmer/tutorials/wallet_library). -The cli wallet is located directly in the Goshimmer repo and pre-build binaries are released with each new GoShimmer version [here](https://github.com/iotaledger/goshimmer/releases). - -## IOTA 2.0 DevNet GUI Wallet - -![DevNet-Wallet](https://github.com/iotaledger/IOTA-2.0-DevNet-wallet/blob/master/images/devnet-wallet.png?raw=true) - -With the GUI wallet you can transfer token and manage your digital assets. It has its own [repo](https://github.com/iotaledger/IOTA-2.0-DevNet-wallet) and you can get prebuild binaries from the [releases](https://github.com/iotaledger/IOTA-2.0-DevNet-wallet/releases). diff --git a/common/community/the-community/how-to-support.md b/common/community/the-community/how-to-support.md index d8a49da8982..838e46076ed 100644 --- a/common/community/the-community/how-to-support.md +++ b/common/community/the-community/how-to-support.md @@ -95,7 +95,6 @@ After you have fulfilled the requirements, open the following repositories and _ ![Add a star to IOTA's repositories on Github](/img/participate/how-to-support/github_iota_star.png) - [Firefly](https://github.com/iotaledger/firefly) -- [GoShimmer](https://github.com/iotaledger/goshimmer) - [Hornet](https://github.com/iotaledger/hornet) - [Identity](https://github.com/iotaledger/identity.rs) - [iota.rs](https://github.com/iotaledger/iota.rs) diff --git a/docs/build/getting-started/networks-endpoints.mdx b/docs/build/getting-started/networks-endpoints.mdx index fdcda1851c4..bf48c1bf4cf 100644 --- a/docs/build/getting-started/networks-endpoints.mdx +++ b/docs/build/getting-started/networks-endpoints.mdx @@ -45,7 +45,7 @@ token. Stardust - + https://api.stardust-mainnet.iotaledger.net diff --git a/docs/build/identity.rs/1.0-rc.1/docs/concepts/decentralized_identifiers/alias.mdx b/docs/build/identity.rs/1.0-rc.1/docs/concepts/decentralized_identifiers/alias.mdx index 4c315bc5b04..3b5f1ace629 100644 --- a/docs/build/identity.rs/1.0-rc.1/docs/concepts/decentralized_identifiers/alias.mdx +++ b/docs/build/identity.rs/1.0-rc.1/docs/concepts/decentralized_identifiers/alias.mdx @@ -14,7 +14,7 @@ keywords: # Alias Output -The IOTA method uses the IOTA ledger which uses the [unspent transaction output (UTXO) model](/goshimmer/protocol_specification/components/ledgerstate). Also, the features of the [Stardust](/introduction/stardust/explanations/what_is_stardust) upgrade are fundamental to the IOTA DID method. +The IOTA method uses the IOTA ledger which uses the [unspent transaction output (UTXO) model](/learn/protocols/chrysalis/core-concepts/switch-to-UTXO/). Also, the features of the [Stardust](/introduction/stardust/explanations/what_is_stardust) upgrade are fundamental to the IOTA DID method. The Alias Output is used for storing the DID Document on the ledger. It is a specific implementation of the UTXO state machine that can hold arbitrary data in its `State Metadata`. The Alias Output has two kinds of controllers, a state controller and a governor. A state controller can execute a state transition which allows updating the data in the `State Metadata`. The governor, on the contrary, can't update the `State Metadata` but can change both controllers and destroy the Alias Output. A controller can be either Ed25519 Address, Alias Address or an NFT Address and at most one of each can be set for an Alias Output. diff --git a/docs/build/identity.rs/1.0-rc.1/docs/libraries/wasm/api_reference.mdx b/docs/build/identity.rs/1.0-rc.1/docs/libraries/wasm/api_reference.mdx index c9cf75b33bf..49759b579e5 100644 --- a/docs/build/identity.rs/1.0-rc.1/docs/libraries/wasm/api_reference.mdx +++ b/docs/build/identity.rs/1.0-rc.1/docs/libraries/wasm/api_reference.mdx @@ -90,6 +90,15 @@ keywords:

A span of time.

+
+ EdDSAJwsVerifier +
+
+

+ An implementor of `IJwsVerifier` that can handle the + EdDSA algorithm. +

+
IotaDID
@@ -308,14 +317,6 @@ keywords: ## Members
-
- StateMetadataEncoding -
-
-
- MethodRelationship -
-
StatusCheck
@@ -427,11 +428,41 @@ keywords:

Return after the first error occurs.

+
+ StateMetadataEncoding +
+
+
+ MethodRelationship +
+
## Functions
+
+ + verifyEd25519(alg, signingInput, decodedSignature, publicKey) + +
+
+

+ Verify a JWS signature secured with the EdDSA algorithm and + curve Ed25519. +

+

+ This function is useful when one is composing a `IJwsVerifier` that + delegates + EdDSA verification with curve Ed25519 to this function. +

+

Warning

+

+ This function does not check whether alg = EdDSA in the + protected header. Callers are expected to assert this prior to calling the + function. +

+
start()
@@ -450,28 +481,6 @@ keywords:

Decode the given url-safe base64-encoded slice into its raw bytes.

-
- - verifyEdDSA(alg, signingInput, decodedSignature, publicKey) - -
-
-

- Verify a JWS signature secured with the JwsAlgorithm::EdDSA{' '} - algorithm. Only the EdCurve::Ed25519 variant is supported for - now. -

-

- This function is useful when one is building an IJwsVerifier{' '} - that extends the default provided by the IOTA Identity Framework. -

-

Warning

-

- This function does not check whether alg = EdDSA in the - protected header. Callers are expected to assert this prior to calling the - function. -

-
@@ -735,7 +744,7 @@ A method-agnostic DID Document. - [.generateMethod(storage, keyType, alg, fragment, scope)](#CoreDocument+generateMethod) ⇒ Promise.<string> - [.purgeMethod(storage, id)](#CoreDocument+purgeMethod) ⇒ Promise.<void> - [.createJws(storage, fragment, payload, options)](#CoreDocument+createJws) ⇒ [Promise.<Jws>](#Jws) - - [.createCredentialJwt(storage, fragment, credential, options)](#CoreDocument+createCredentialJwt) ⇒ [Promise.<Jwt>](#Jwt) + - [.createCredentialJwt(storage, fragment, credential, options, custom_claims)](#CoreDocument+createCredentialJwt) ⇒ [Promise.<Jwt>](#Jwt) - [.createPresentationJwt(storage, fragment, presentation, signature_options, presentation_options)](#CoreDocument+createPresentationJwt) ⇒ [Promise.<Jwt>](#Jwt) - _static_ - [.fromJSON(json)](#CoreDocument.fromJSON) ⇒ [CoreDocument](#CoreDocument) @@ -1046,7 +1055,8 @@ Regardless of which options are passed the following conditions must be met in o take place. - The JWS must be encoded according to the JWS compact serialization. -- The `kid` value in the protected header must be an identifier of a verification method in this DID document. +- The `kid` value in the protected header must be an identifier of a verification method in this DID document, + or set explicitly in the `options`. **Kind**: instance method of [CoreDocument](#CoreDocument) @@ -1054,7 +1064,7 @@ take place. | ----------------- | -------------------------------------------------------------- | | jws | [Jws](#Jws) | | options | [JwsVerificationOptions](#JwsVerificationOptions) | -| signatureVerifier | IJwsVerifier \| undefined | +| signatureVerifier | IJwsVerifier | | detachedPayload | string \| undefined | @@ -1179,32 +1189,37 @@ See [RFC7515 section 3.1](https://www.rfc-editor.org/rfc/rfc7515#section-3.1). -### coreDocument.createCredentialJwt(storage, fragment, credential, options) ⇒ [Promise.<Jwt>](#Jwt) +### coreDocument.createCredentialJwt(storage, fragment, credential, options, custom_claims) ⇒ [Promise.<Jwt>](#Jwt) Produces a JWT where the payload is produced from the given `credential` -in accordance with [VC-JWT version 1.1](https://w3c.github.io/vc-jwt/#version-1.1). +in accordance with [VC Data Model v1.1](https://www.w3.org/TR/vc-data-model/#json-web-token). -The `kid` in the protected header is the `id` of the method identified by `fragment` and the JWS signature will be -produced by the corresponding private key backed by the `storage` in accordance with the passed `options`. +Unless the `kid` is explicitly set in the options, the `kid` in the protected header is the `id` +of the method identified by `fragment` and the JWS signature will be produced by the corresponding +private key backed by the `storage` in accordance with the passed `options`. + +The `custom_claims` can be used to set additional claims on the resulting JWT. **Kind**: instance method of [CoreDocument](#CoreDocument) -| Param | Type | -| ---------- | -------------------------------------------------------- | -| storage | [Storage](#Storage) | -| fragment | string | -| credential | [Credential](#Credential) | -| options | [JwsSignatureOptions](#JwsSignatureOptions) | +| Param | Type | +| ------------- | ----------------------------------------------------------------- | +| storage | [Storage](#Storage) | +| fragment | string | +| credential | [Credential](#Credential) | +| options | [JwsSignatureOptions](#JwsSignatureOptions) | +| custom_claims | Record.<string, any> \| undefined | ### coreDocument.createPresentationJwt(storage, fragment, presentation, signature_options, presentation_options) ⇒ [Promise.<Jwt>](#Jwt) Produces a JWT where the payload is produced from the given presentation. -in accordance with [VC-JWT version 1.1](https://w3c.github.io/vc-jwt/#version-1.1). +in accordance with [VC Data Model v1.1](https://www.w3.org/TR/vc-data-model/#json-web-token). -The `kid` in the protected header is the `id` of the method identified by `fragment` and the JWS signature will be -produced by the corresponding private key backed by the `storage` in accordance with the passed `options`. +Unless the `kid` is explicitly set in the options, the `kid` in the protected header is the `id` +of the method identified by `fragment` and the JWS signature will be produced by the corresponding +private key backed by the `storage` in accordance with the passed `options`. **Kind**: instance method of [CoreDocument](#CoreDocument) @@ -1708,6 +1723,7 @@ It does not imply anything about a potentially present proof property on the cre - [DecodedJwtCredential](#DecodedJwtCredential) - [.credential()](#DecodedJwtCredential+credential) ⇒ [Credential](#Credential) - [.protectedHeader()](#DecodedJwtCredential+protectedHeader) ⇒ [JwsHeader](#JwsHeader) + - [.customClaims()](#DecodedJwtCredential+customClaims) ⇒ Record.<string, any> \| undefined - [.intoCredential()](#DecodedJwtCredential+intoCredential) ⇒ [Credential](#Credential) @@ -1726,6 +1742,14 @@ Returns a copy of the protected header parsed from the decoded JWS. **Kind**: instance method of [DecodedJwtCredential](#DecodedJwtCredential) + + +### decodedJwtCredential.customClaims() ⇒ Record.<string, any> \| undefined + +The custom claims parsed from the JWT. + +**Kind**: instance method of [DecodedJwtCredential](#DecodedJwtCredential) + ### decodedJwtCredential.intoCredential() ⇒ [Credential](#Credential) @@ -1756,6 +1780,7 @@ It does not imply anything about a potentially present proof property on the pre - [.expirationDate()](#DecodedJwtPresentation+expirationDate) ⇒ [Timestamp](#Timestamp) \| undefined - [.issuanceDate()](#DecodedJwtPresentation+issuanceDate) ⇒ [Timestamp](#Timestamp) \| undefined - [.audience()](#DecodedJwtPresentation+audience) ⇒ string \| undefined + - [.customClaims()](#DecodedJwtPresentation+customClaims) ⇒ Record.<string, any> \| undefined @@ -1807,6 +1832,14 @@ The `aud` property parsed from JWT claims. **Kind**: instance method of [DecodedJwtPresentation](#DecodedJwtPresentation) + + +### decodedJwtPresentation.customClaims() ⇒ Record.<string, any> \| undefined + +The custom claims parsed from the JWT. + +**Kind**: instance method of [DecodedJwtPresentation](#DecodedJwtPresentation) + ## DomainLinkageConfiguration @@ -2054,6 +2087,49 @@ Deserializes an instance from a JSON object. | ----- | ---------------- | | json | any | + + +## EdDSAJwsVerifier + +An implementor of `IJwsVerifier` that can handle the +`EdDSA` algorithm. + +**Kind**: global class + +- [EdDSAJwsVerifier](#EdDSAJwsVerifier) + - [new EdDSAJwsVerifier()](#new_EdDSAJwsVerifier_new) + - [.verify(alg, signingInput, decodedSignature, publicKey)](#EdDSAJwsVerifier+verify) + + + +### new EdDSAJwsVerifier() + +Constructs an EdDSAJwsVerifier. + + + +### edDSAJwsVerifier.verify(alg, signingInput, decodedSignature, publicKey) + +Verify a JWS signature secured with the `EdDSA` algorithm. +Only the `Ed25519` curve is supported for now. + +This function is useful when one is building an `IJwsVerifier` that extends the default provided by +the IOTA Identity Framework. + +# Warning + +This function does not check whether `alg = EdDSA` in the protected header. Callers are expected to assert this +prior to calling the function. + +**Kind**: instance method of [EdDSAJwsVerifier](#EdDSAJwsVerifier) + +| Param | Type | +| ---------------- | ------------------------- | +| alg | JwsAlgorithm | +| signingInput | Uint8Array | +| decodedSignature | Uint8Array | +| publicKey | [Jwk](#Jwk) | + ## IotaDID @@ -2353,7 +2429,7 @@ Deserializes an instance from a JSON object. - [.generateMethod(storage, keyType, alg, fragment, scope)](#IotaDocument+generateMethod) ⇒ Promise.<string> - [.purgeMethod(storage, id)](#IotaDocument+purgeMethod) ⇒ Promise.<void> - [.createJwt(storage, fragment, payload, options)](#IotaDocument+createJwt) ⇒ [Promise.<Jws>](#Jws) - - [.createCredentialJwt(storage, fragment, credential, options)](#IotaDocument+createCredentialJwt) ⇒ [Promise.<Jwt>](#Jwt) + - [.createCredentialJwt(storage, fragment, credential, options, custom_claims)](#IotaDocument+createCredentialJwt) ⇒ [Promise.<Jwt>](#Jwt) - [.createPresentationJwt(storage, fragment, presentation, signature_options, presentation_options)](#IotaDocument+createPresentationJwt) ⇒ [Promise.<Jwt>](#Jwt) - _static_ - [.newWithId(id)](#IotaDocument.newWithId) ⇒ [IotaDocument](#IotaDocument) @@ -2590,7 +2666,7 @@ take place. | ----------------- | -------------------------------------------------------------- | | jws | [Jws](#Jws) | | options | [JwsVerificationOptions](#JwsVerificationOptions) | -| signatureVerifier | IJwsVerifier \| undefined | +| signatureVerifier | IJwsVerifier | | detachedPayload | string \| undefined | @@ -2845,32 +2921,37 @@ See [RFC7515 section 3.1](https://www.rfc-editor.org/rfc/rfc7515#section-3.1). -### iotaDocument.createCredentialJwt(storage, fragment, credential, options) ⇒ [Promise.<Jwt>](#Jwt) +### iotaDocument.createCredentialJwt(storage, fragment, credential, options, custom_claims) ⇒ [Promise.<Jwt>](#Jwt) Produces a JWS where the payload is produced from the given `credential` -in accordance with [VC-JWT version 1.1](https://w3c.github.io/vc-jwt/#version-1.1). +in accordance with [VC Data Model v1.1](https://www.w3.org/TR/vc-data-model/#json-web-token). -The `kid` in the protected header is the `id` of the method identified by `fragment` and the JWS signature will be -produced by the corresponding private key backed by the `storage` in accordance with the passed `options`. +Unless the `kid` is explicitly set in the options, the `kid` in the protected header is the `id` +of the method identified by `fragment` and the JWS signature will be produced by the corresponding +private key backed by the `storage` in accordance with the passed `options`. + +The `custom_claims` can be used to set additional claims on the resulting JWT. **Kind**: instance method of [IotaDocument](#IotaDocument) -| Param | Type | -| ---------- | -------------------------------------------------------- | -| storage | [Storage](#Storage) | -| fragment | string | -| credential | [Credential](#Credential) | -| options | [JwsSignatureOptions](#JwsSignatureOptions) | +| Param | Type | +| ------------- | ----------------------------------------------------------------- | +| storage | [Storage](#Storage) | +| fragment | string | +| credential | [Credential](#Credential) | +| options | [JwsSignatureOptions](#JwsSignatureOptions) | +| custom_claims | Record.<string, any> \| undefined | ### iotaDocument.createPresentationJwt(storage, fragment, presentation, signature_options, presentation_options) ⇒ [Promise.<Jwt>](#Jwt) Produces a JWT where the payload is produced from the given presentation. -in accordance with [VC-JWT version 1.1](https://w3c.github.io/vc-jwt/#version-1.1). +in accordance with [VC Data Model v1.1](https://www.w3.org/TR/vc-data-model/#json-web-token). -The `kid` in the protected header is the `id` of the method identified by `fragment` and the JWS signature will be -produced by the corresponding private key backed by the `storage` in accordance with the passed `options`. +Unless the `kid` is explicitly set in the options, the `kid` in the protected header is the `id` +of the method identified by `fragment` and the JWS signature will be produced by the corresponding +private key backed by the `storage` in accordance with the passed `options`. **Kind**: instance method of [IotaDocument](#IotaDocument) @@ -3451,6 +3532,7 @@ Returns a clone of the JWS string. - [.setAlg(value)](#JwsHeader+setAlg) - [.b64()](#JwsHeader+b64) ⇒ boolean \| undefined - [.setB64(value)](#JwsHeader+setB64) + - [.custom()](#JwsHeader+custom) ⇒ Record.<string, any> \| undefined - [.has(claim)](#JwsHeader+has) ⇒ boolean - [.isDisjoint(other)](#JwsHeader+isDisjoint) ⇒ boolean - [.jku()](#JwsHeader+jku) ⇒ string \| undefined @@ -3528,6 +3610,14 @@ Sets a value for the base64url-encode payload claim (b64). | ----- | -------------------- | | value | boolean | + + +### jwsHeader.custom() ⇒ Record.<string, any> \| undefined + +Additional header parameters. + +**Kind**: instance method of [JwsHeader](#JwsHeader) + ### jwsHeader.has(claim) ⇒ boolean @@ -3835,7 +3925,9 @@ Deserializes an instance from a JSON object. - [.setCty(value)](#JwsSignatureOptions+setCty) - [.serUrl(value)](#JwsSignatureOptions+serUrl) - [.setNonce(value)](#JwsSignatureOptions+setNonce) + - [.setKid(value)](#JwsSignatureOptions+setKid) - [.setDetachedPayload(value)](#JwsSignatureOptions+setDetachedPayload) + - [.setCustomHeaderParameters(value)](#JwsSignatureOptions+setCustomHeaderParameters) - [.toJSON()](#JwsSignatureOptions+toJSON) ⇒ any - [.clone()](#JwsSignatureOptions+clone) ⇒ [JwsSignatureOptions](#JwsSignatureOptions) - _static_ @@ -3921,6 +4013,18 @@ Replace the value of the `nonce` field. | ----- | ------------------- | | value | string | + + +### jwsSignatureOptions.setKid(value) + +Replace the value of the `kid` field. + +**Kind**: instance method of [JwsSignatureOptions](#JwsSignatureOptions) + +| Param | Type | +| ----- | ------------------- | +| value | string | + ### jwsSignatureOptions.setDetachedPayload(value) @@ -3933,6 +4037,18 @@ Replace the value of the `detached_payload` field. | ----- | -------------------- | | value | boolean | + + +### jwsSignatureOptions.setCustomHeaderParameters(value) + +Add additional header parameters. + +**Kind**: instance method of [JwsSignatureOptions](#JwsSignatureOptions) + +| Param | Type | +| ----- | --------------------------------------- | +| value | Record.<string, any> | + ### jwsSignatureOptions.toJSON() ⇒ any @@ -3971,7 +4087,8 @@ Deserializes an instance from a JSON object. - [new JwsVerificationOptions(options)](#new_JwsVerificationOptions_new) - _instance_ - [.setNonce(value)](#JwsVerificationOptions+setNonce) - - [.setScope(value)](#JwsVerificationOptions+setScope) + - [.setMethodScope(value)](#JwsVerificationOptions+setMethodScope) + - [.setMethodId(value)](#JwsVerificationOptions+setMethodId) - [.toJSON()](#JwsVerificationOptions+toJSON) ⇒ any - [.clone()](#JwsVerificationOptions+clone) ⇒ [JwsVerificationOptions](#JwsVerificationOptions) - _static_ @@ -3999,9 +4116,9 @@ Set the expected value for the `nonce` parameter of the protected header. | ----- | ------------------- | | value | string | - + -### jwsVerificationOptions.setScope(value) +### jwsVerificationOptions.setMethodScope(value) Set the scope of the verification methods that may be used to verify the given JWS. @@ -4011,6 +4128,18 @@ Set the scope of the verification methods that may be used to verify the given J | ----- | ---------------------------------------- | | value | [MethodScope](#MethodScope) | + + +### jwsVerificationOptions.setMethodId(value) + +Set the DID URl of the method, whose JWK should be used to verify the JWS. + +**Kind**: instance method of [JwsVerificationOptions](#JwsVerificationOptions) + +| Param | Type | +| ----- | ------------------------------ | +| value | [DIDUrl](#DIDUrl) | + ### jwsVerificationOptions.toJSON() ⇒ any @@ -4183,9 +4312,9 @@ Creates a new [JwtCredentialValidator](#JwtCredentialValidator). If a `signature verifying decoded JWS signatures, otherwise the default which is only capable of handling the `EdDSA` algorithm will be used. -| Param | Type | -| ----------------- | --------------------------------------------------- | -| signatureVerifier | IJwsVerifier \| undefined | +| Param | Type | +| ----------------- | ------------------------- | +| signatureVerifier | IJwsVerifier | @@ -4372,9 +4501,9 @@ Creates a new [JwtDomainLinkageValidator](#JwtDomainLinkageValidator). If a `sig verifying decoded JWS signatures, otherwise the default which is only capable of handling the `EdDSA` algorithm will be used. -| Param | Type | -| ----------------- | --------------------------------------------------- | -| signatureVerifier | IJwsVerifier \| undefined | +| Param | Type | +| ----------------- | ------------------------- | +| signatureVerifier | IJwsVerifier | @@ -4555,9 +4684,9 @@ Creates a new [JwtPresentationValidator](#JwtPresentationValidator). If a `signa verifying decoded JWS signatures, otherwise the default which is only capable of handling the `EdDSA` algorithm will be used. -| Param | Type | -| ----------------- | --------------------------------------------------- | -| signatureVerifier | IJwsVerifier \| undefined | +| Param | Type | +| ----------------- | ------------------------- | +| signatureVerifier | IJwsVerifier | @@ -5610,6 +5739,7 @@ Obtain the wrapped `JwkStorage`. **Kind**: global class - [Timestamp](#Timestamp) + - [new Timestamp()](#new_Timestamp_new) - _instance_ - [.toRFC3339()](#Timestamp+toRFC3339) ⇒ string - [.checkedAdd(duration)](#Timestamp+checkedAdd) ⇒ [Timestamp](#Timestamp) \| undefined @@ -5620,6 +5750,12 @@ Obtain the wrapped `JwkStorage`. - [.nowUTC()](#Timestamp.nowUTC) ⇒ [Timestamp](#Timestamp) - [.fromJSON(json)](#Timestamp.fromJSON) ⇒ [Timestamp](#Timestamp) + + +### new Timestamp() + +Creates a new [Timestamp](#Timestamp) with the current date and time. + ### timestamp.toRFC3339() ⇒ string @@ -5949,18 +6085,6 @@ Deserializes an instance from a JSON object. | ----- | ---------------- | | json | any | - - -## StateMetadataEncoding - -**Kind**: global variable - - - -## MethodRelationship - -**Kind**: global variable - ## StatusCheck @@ -6059,6 +6183,41 @@ Return after the first error occurs. **Kind**: global variable + + +## StateMetadataEncoding + +**Kind**: global variable + + + +## MethodRelationship + +**Kind**: global variable + + + +## verifyEd25519(alg, signingInput, decodedSignature, publicKey) + +Verify a JWS signature secured with the `EdDSA` algorithm and curve `Ed25519`. + +This function is useful when one is composing a `IJwsVerifier` that delegates +`EdDSA` verification with curve `Ed25519` to this function. + +# Warning + +This function does not check whether `alg = EdDSA` in the protected header. Callers are expected to assert this +prior to calling the function. + +**Kind**: global function + +| Param | Type | +| ---------------- | ------------------------- | +| alg | JwsAlgorithm | +| signingInput | Uint8Array | +| decodedSignature | Uint8Array | +| publicKey | [Jwk](#Jwk) | + ## start() @@ -6090,27 +6249,3 @@ Decode the given url-safe base64-encoded slice into its raw bytes. | Param | Type | | ----- | ----------------------- | | data | Uint8Array | - - - -## verifyEdDSA(alg, signingInput, decodedSignature, publicKey) - -Verify a JWS signature secured with the `JwsAlgorithm::EdDSA` algorithm. -Only the `EdCurve::Ed25519` variant is supported for now. - -This function is useful when one is building an `IJwsVerifier` that extends the default provided by -the IOTA Identity Framework. - -# Warning - -This function does not check whether `alg = EdDSA` in the protected header. Callers are expected to assert this -prior to calling the function. - -**Kind**: global function - -| Param | Type | -| ---------------- | ------------------------- | -| alg | JwsAlgorithm | -| signingInput | Uint8Array | -| decodedSignature | Uint8Array | -| publicKey | [Jwk](#Jwk) | diff --git a/docs/build/wasp-wasm/0.7/docs/tutorials/fair_roulette.md b/docs/build/wasp-wasm/0.7/docs/tutorials/fair_roulette.md deleted file mode 100644 index 871b60eb09d..00000000000 --- a/docs/build/wasp-wasm/0.7/docs/tutorials/fair_roulette.md +++ /dev/null @@ -1,329 +0,0 @@ ---- -description: An example game project with frontend and contract, demonstrating the development, setup, and interaction with a smart contract. -image: /img/logo/WASP_logo_dark.png -keywords: - - Smart Contracts - - Rust - - poc - - proof of concept - - node - - nvm - - JavaScript - - TypeScript - - Wasm - - tutorial ---- - -# Fair Roulette - -Fair roulette is an example reference implementation which demonstrates the development, setup, and interaction with a smart contract. - -## Introduction - -The Fair roulette example project is a simple betting game in which players can bet on a number within a certain range. - -The game consists of many rounds in which the player will try to bet on the right number to win a share of the bet funds. - -A round is running for a certain amount of time. In the example its 60 seconds. In this timeframe, incoming bets will be added to a list of bets. After 60 seconds have passed, a winning number will be randomly generated and all players who made the right guess will receive their share of the pot. - -If no round is _active_ when a bet gets placed, the round gets initiated immediately. - -The random number is generated by the native randomness of the IOTA Smart Contracts consensus. -It is unpredictable by anybody, including an individual validator node. -Therefore the roulette is Fair. - -## Mandatory Setup - -The mandatory setup consists out of: - -- 1 [GoShimmer](/goshimmer/welcome) node >= 0.7.5v ([25c827e8326a](https://github.com/iotaledger/goshimmer/commit/25c827e8326a)) -- 1 Beta [Wasp node](/wasp/running-a-node). -- 1 Static file server (nginx, Apache, fasthttp) - -## Technicalities - -Before you dive into the contents of the project, you should take a look at important fundamentals. - -### Fundamentals - -Wasp is part of the IOTA ecosystem that enables the execution of smart contracts. These contracts run logic and are allowed to do state (change) requests towards the Tangle. You will need a GoShimmer node to be able to store state. It receives state change requests and, if valid, saves them onto the Tangle. - -There are two ways to interact with smart contracts. - -#### On Ledger Requests - -See: [On-ledger Requests](/learn/smart-contracts/invocation#on-ledger) - -On-ledger requests are sent to GoShimmer nodes. Wasp periodically requests new On-ledger requests from GoShimmer nodes, and handles them accordingly. These messages are validated through the network and take some time to be processed. - -#### Off Ledger Requests - -See: [Off-ledger Requests](/learn/smart-contracts/invocation#off-ledger) - -Off-ledger requests are directly sent to Wasp nodes and do not require validation through GoShimmer nodes. They are therefore faster. However, they require an initial deposit of funds to a chain account as this account will initiate required On-ledger requests on behalf of the desired contract or player. - -:::note -This example uses On-ledger requests to initiate a betting request. A method to invoke Off-ledger requests is implemented inside the frontend. - -See: [placeBetOffLedger](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/frontend/src/lib/fairroulette_client/fair_roulette_service.ts#L133) -::: - -#### Funds - -As these requests cost some fees, and to be able to bet with real tokens, the player will need a source of funds. - -As the game runs on a testnet, you can request funds from the GoShimmer faucets inside the network. - -See: [How to Obtain Tokens From the Faucet](/goshimmer/tutorials/obtain_tokens) - -After you have acquired some funds, they will reside inside an address that is handled by a wallet. - -For this PoC, we have implemented a very narrowed-down wallet that runs inside the browser itself, mostly hidden from the player. - -In the future, we want to provide a solution that enables the use of [Firefly](https://firefly.iota.org/) or MetaMask as a secure external wallet. - -#### Conclusion - -To interact with a smart contract, you will need: - -- A Wasp node that hosts the contract -- A GoShimmer node to interact with the tangle -- Funds from a GoShimmer faucet -- A client that invokes the contract by either an On Ledger request or Off Ledger request. In this example, the Frontend acts as the client. - -### Implementation - -The PoC consists of two projects residing in `contracts/wasm/fairroulette`. - -One is the smart contract itself. Its boilerplate was generated using the new [Schema tool](/wasp-wasm/introduction/) which is shipped with this beta release. -The contract logic is written in Rust, but the same implementation can be achieved -interchangeably with Golang and Assemblyscript which is demonstrated in the root folder -and `./src`. - -The second project is an interactive frontend written in TypeScript, made reactive with the light Svelte framework. You can find it in the sub-folder `./frontend`. -This frontend sends On-ledger requests to place bets towards the fair roulette smart contract and makes use of the GoShimmer faucet to request funds. - -### The Smart Contract - -See: [Anatomy of a Smart Contract](/learn/smart-contracts/smart-contract-anatomy) - -As the smart contract is the only actor that is allowed to modify state in the context of the game, it needs to handle a few tasks such as: - -- Validating and accepting placed bets -- Starting and ending a betting round -- Generating a **random** winning number -- Sending payouts to the winners -- Emitting status updates through the event system - -Any incoming bet will be validated. This includes the amount of tokens which have been bet and also the number on which the player bet on. For example, any number over 8 or under 1 will be rejected. - -If the bet is valid and no round is active, the round state will be changed to `1`, marking an active round. The bet will be the first of a list of bets. - -A delayed function call will be activated which executes **after 60 seconds**. - -This function is the payout function that generates a random winning number, and pays out the winners of the round. After this, the round state will be set to `0` indicating the end of the round. - -If a round is already active, the bet will be appended to the list of bets and await processing. - -All state changes such as the `round started` ,`round ended`, `placed bets`, and the `payout of the winners` are published as events. Events are published as messages through a public web socket. - -#### Dependencies - -- [wasm-pack](https://rustwasm.github.io/docs/wasm-pack/quickstart.html) - -#### Building the Contract - -```shell -cd contracts/wasm/fairroulette -wasm-pack build -``` - -### The Frontend - -The frontend has two main tasks. - -1. **Visualize the contract's state**: This includes the list of all placed bets, if a round is currently active and how long it's still going. Any payouts will be shown as well, including a fancy animation in case the player has won. The player can also see his current available funds, his seed, and his current address. - -:::danger -The seed is the key to your funds. We display the seed for demonstration purposes only in this PoC. -**Never share your seed with anyone under any circumstance.** -::: - -2. **Enable the player to request funds and participate in the game by placing bets**: This is done by showing the player a list of eight numbers, a selection of the amount of funds to bet, and a bet placing button. - -As faucet requests require minimal proof of work, the calculation happens inside a web worker to prevent freezing the browser UI. - -To provide the frontend with the required events, it subscribes to the public web socket of Wasp to receive state changes. - -These state change events look like this: - -`vmmsg kUUCRkhjD4EGAxv3kM77ay3KMbo51UhaEoCr14TeVHc5 df79d138: fairroulette.bet.placed 2sYqEZ5GM1BnqkZ88yJgPH3CdD9wKqfgGKY1j8FYDSZb3ao5wu 531819 2` - -This event displays a placed bet from the address `12sYqEZ5GM1BnqkZ88yJgPH3CdD9wKqfgGKY1j8FYDSZb3ao5wu`, a bet of `531819i` on the number `2`. Originating from the smart contract ID `df79d138`. - -However, there is a bit more to the concept than to simply subscribe to a web socket and "perform requests". - -### The Communication Layer - -On and Off Ledger requests have a predefined structure. They need to get encoded strictly and include a list of transactions provided by Goshimmer. They also need to get signed by the client using the private key originating from a seed. - -Wasp uses the [ExtendedLockedOutput](/goshimmer/protocol_specification/components/advanced_outputs) message type, which enables certain additional properties such as: - -- A fallback address and a fallback timeout -- Unlockable by AliasUnlockBlock (if address is of Misaddress type) -- A time lock (execution after deadline) -- A data payload for arbitrary metadata (size limits apply) - -This data payload is required to act on smart contracts as it contains: - -- The smart contract ID to be selected -- The function ID to be executed -- A list of arguments to be passed into the function - -As we do not expect contract and frontend developers to write their own implementation, we have separated the communication layer into two parts: - -- [The fairroulette_service](#the-fairroulette-service) -- [The wasp_client](#the-wasp-client) - -#### The Wasp Client - -The wasp client is an example implementation of the communication protocol. - -It provides: - -- A basic wallet functionality -- Hashing algorithms -- A web worker to provide proof of work -- Construction of On/Off Ledger requests -- Construction of smart contract arguments and payloads -- Generation of seeds (including their private keys and addresses) -- Serialization of data into binary messages -- Deserialization of smart contract state - -This wasp_client can be seen as a soon-to-be external library. For now, this is a PoC client library shipped with the project. However, in the future , we want to provide a library you can simply include in your project. - -#### The Fairroulette Service - -This service is meant to be a high-level implementation of the actual app. In other words: it's the service that app or frontend developers would concentrate on. - -It does not construct message types, nor does it interact with GoShimmer directly. Besides subscribing to the web socket event system of Wasp, it does not interact directly with Wasp either. Such communications are handled by the [`wasp_client`](#the-wasp-client). - -The fairroulette service is a mere wrapper around smart contract invocation calls. It accesses the smart contract state through the `wasp_client` and does minimal decoding of data. - -Let's take a look into three parts of this service to make this more clear. - -This service comprises two parts: - -- [PlaceBetOnLedger](#placebetonledger) -- [CallView](#callview) - -##### PlaceBetOnLedger - -The [placeBetOnLedger](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/frontend/src/lib/fairroulette_client/fair_roulette_service.ts#L149) function is responsible for sending On-Ledger bet requests. It constructs a simple OnLedger object containing: - -- The smart contract ID: `fairroulette` -- The function to invoke: `placeBet` -- An argument: `-number` - - this is the number the player would bet on, the winning number - -This transaction also requires an address to send the request to, and also a variable amount of funds over `0i`. - -:::note -For Wasp, the address to send funds to is the chainId. -::: - -See: [Invoking](/wasp-wasm/how-tos/solo/invoking-sc) - -##### CallView - -The [callView](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/frontend/src/lib/fairroulette_client/fair_roulette_service.ts#L165) function is responsible for calling smart contract view functions. - -See: [Calling a view](/wasp-wasm/how-tos/solo/view-sc) - -To give access to the smart contracts state, you can use view functions to return selected parts of the state. - -In this use case, you can poll the state of the contract at the initial page load of the frontend. -State changes that happen afterwards are published through the websocket event system. - -You can find examples to guide you in building similar functions in: - -- Frontend: [getRoundStatus](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/frontend/src/lib/fairroulette_client/fair_roulette_service.ts#L181) - -- Smart Contract: [view_round_status](https://github.com/iotaledger/wasp/blob/7b3ddc54891ccf021c7aaa32db35d88361fade16/contracts/wasm/fairroulette/src/fairroulette.rs#L312) - -Since data returned by the views is encoded in Base64, the frontend needs to decode this by using simple `Buffer` methods. -The `view_round_status` view returns an `UInt16` which has a state of either `0` or `1`. - -This means that to get a proper value from a view call, you should use `readUInt16LE` to decode the matching value. - -#### Dependencies - -- [NodeJS >= 14](https://nodejs.org/en/download/) - If you use a different version of node, you can use [nvm](https://github.com/nvm-sh/nvm) to switch node versions. -- [NPM](https://www.npmjs.com/) - -#### Install Dependencies - -1. Go to your frontend directory ( contracts/wasm/fairroulette/frontend for example) - - ```shell - cd contracts/wasm/fairroulette/frontend - ``` - -2. Install dependencies running: - - ```shell - npm install - ``` - -#### Configuration - -The frontend requires that you create a config file. You can copy the template from `contracts/wasm/fairroulette/frontend/config.dev.sample.js`, and rename it to `config.dev.js` inside the same folder. - -```shell -cp config.dev.sample.js config.dev.js -``` - -Make sure to update the config values according to your setup. - -The `chainId` is the chainId which gets defined after [deploying a chain](/wasp-cli/how-tos/setting-up-a-chain/#deploy-the-isc-chain). You can get your chain id from your dashboard, or list all chains by running: - -```shell -wasp-cli chain list -``` - -`waspWebSocketUrl`, `waspApiUrl`, and `goShimmerApiUrl` are dependent on the location of your Wasp and GoShimmer nodes. Make sure to keep the path of the `waspWeb SocketUrl` (`/chain/%chainId/ws`) at the end. - -`seed` can be either `undefined` or a predefined 44 length seed. If `seed` is set to `undefined` a new seed will be generated as soon a user opens the page. A predefined seed will be set for all users. This can be useful for development purposes. - -#### Building The Frontend - -You can build the frontend by running the following commands: - -```shell -cd contracts/wasm/fairroulette/frontend -npm run build_worker -``` - -After this, you can run `npm run dev` which will run a development server that exposes the transpiled frontend on [`http://localhost:5000`](http://localhost:5000). - -If you want to expose the dev server to the public, it might be required to bind the server to any endpoint like `HOST=0.0.0.0 PORT=5000 npm run dev`. - -## Deployment - -You should follow the [Deployment](/wasp-cli/how-tos/setting-up-a-chain/#deploy-the-isc-chain) documentation until you reach the `deploy-contract` command. - -The deployment of a contract requires funds to be deposited to the **chain**. -You can do this by executing the following command from the directory where your Wasp node was configured: - -```shell -wasp-cli chain deposit IOTA:10000 -``` - -Make sure to [Build](#building-the-contract) the contract before deploying it. - -Now, you can deploy the contract with a wasmtime configuration. - -```shell -wasp-cli chain deploy-contract wasmtime fairroulette "fairroulette" contracts/wasm/fairroulette/pkg/fairroulette_bg.wasm -``` diff --git a/docs/build/wasp-wasm/0.7/sidebars.js b/docs/build/wasp-wasm/0.7/sidebars.js index 77e9b238512..5fa60186648 100644 --- a/docs/build/wasp-wasm/0.7/sidebars.js +++ b/docs/build/wasp-wasm/0.7/sidebars.js @@ -279,16 +279,5 @@ module.exports = { }, ], }, - { - type: 'category', - label: 'Tutorials', - items: [ - { - type: 'doc', - label: 'Fair Roulette', - id: 'tutorials/fair_roulette', - }, - ], - }, ], }; diff --git a/docs/get-started/community-links.md b/docs/get-started/community-links.md index 8b33e9ef074..6dfa1dfc22d 100644 --- a/docs/get-started/community-links.md +++ b/docs/get-started/community-links.md @@ -63,7 +63,6 @@ Starring repositories on GitHub helps indicate the popularity and quality of a p repositories related to IOTA: - [Firefly](https://github.com/iotaledger/firefly) -- [GoShimmer](https://github.com/iotaledger/goshimmer) - [Hornet](https://github.com/iotaledger/hornet) - [Identity](https://github.com/iotaledger/identity.rs) - [iota.rs](https://github.com/iotaledger/iota.rs) diff --git a/docs/get-started/faq.md b/docs/get-started/faq.md index 87ef9405317..68476e84a8a 100644 --- a/docs/get-started/faq.md +++ b/docs/get-started/faq.md @@ -3,7 +3,6 @@ title: FAQ's description: Questions and answers collections. Clear outlined information for the most common questions around IOTA. --- -- [Chrysalis mainnet FAQ](/introduction/explanations/faq/) +- [Mainnet FAQ](/introduction/explanations/faq/) - [Firefly wallet FAQ](/use/wallets/firefly/faq-and-troubleshooting) - [Identity FAQ](/identity.rs/faq/) -- [GoShimmer devnet FAQ](/goshimmer/faq/) diff --git a/docs/get-started/glossary.md b/docs/get-started/glossary.md index 8a76db08ac9..ba65256bc8a 100644 --- a/docs/get-started/glossary.md +++ b/docs/get-started/glossary.md @@ -8,7 +8,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and ## A - **Address Checksum:** Checksum validation is a way to determine if an address is valid and does not contain typos. -- **Dynamic PoW** (In development for IOTA 1.5 and Shimmer): With this feature, the coordinator can issue a milestone and simultaneously set the future PoW score. This means that if the network is not fully utilized, the PoW will be reduced to the point where it can be executed by microdevices. Accordingly, the coordinator can also raise the PoW difficulty in case of high utilization and thus make an attack very expensive. - **Auto peering:** A mechanism that allows nodes to automatically select their neighbors without manual intervention by the node operator. - **API(Application Programming Interfaces):** The way for applications to interact with the Tangle. - **Atomic Transactions:** Instead of the bundle construct, IOTA and Shimmer use simpler Atomic Transactions. An Atomic Transaction includes everything related to a transaction in a single message instead of splitting it up (Bundles). This reduces network overhead and signature verification load, improves spam protection and rate control, and shortens the length of Merkle proofs (for future sharding). It also reduces implementation overhead and increases maintainability of the core node software. @@ -22,12 +21,10 @@ description: Glossary of all specialized names and phrases used in the IOTA and - **Balance:** Funds on the addresses (account). These are always available and cannot be deleted or forgotten. - **Blockchain Bottleneck:** The more transactions are issued, the more the block rate and size become a bottleneck in the system. It is no longer possible to capture all incoming transactions in a prompt manner. Attempts to speed up block rates result in more orphaned blocks (blocks are left behind) and reduce the security of the blockchain. - **Branch (IOTA 2.0):** A version of the ledger that temporarily coexists with other versions, each spawned by conflicting transactions. -- **Bee:** Node software developed by the IOTA foundation using the Rust programming language. - **Bootstrapping attack:** An attack in which a node downloads malicious snapshot files, including invalid transactions and balances. ## C -- **Curl:** This is one of the hash functions currently in use. It is based on the “sponge” construction of the Keccak inventors (SHA-3). - **Confirmed:** Confirmed transactions. Messages in the Tangle are considered for confirmation only when they are directly or indirectly referenced by a milestone that the Coordinator node has validated. To allow the nodes to recognize the milestones, all nodes that participate in the same network are configured with the Merkle root address of a Coordinator that they trust to confirm messages. Using this address, nodes can validate the signatures in milestones to verify whether the trusted Coordinator signs them. To make sure that new messages always have a chance of being confirmed, the Coordinator sends indexed milestones at regular intervals. This way, nodes can compare the indexes of their milestones to check whether they are synchronized with the rest of the network. - **CTPS:** Confirmed transactions per second. - **Cumulative Weight:** A system for valuing transactions. Each additional transaction that references a transaction increases its cumulative weight. When tips are selected, a path through transactions that has a higher cumulative weight is preferred. @@ -35,8 +32,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and - **Consensus:** Agreement on a specific date or value in distributed multi-agent systems, in the presence of faulty processes. - **Coordinator (only up to IOTA 2.0):** A trusted entity, as protection against malicious transactions. The Tangle is not yet a final product, it is still in beta. The network currently relies on a kind of shield, the so-called coordinator. It is open-source and runs on a Hornet node. The COO acts as a centralized, voluntary, and temporary alternative consensus mechanism for the Tangle. To do this, the COO sends honest transactions to the full nodes at regular intervals. These packets contain a signed message with no value, called a milestone. The full nodes in the Tangle consider a transaction as confirmed only if it is approved by a milestone. Important: The coordinator can only confirm transactions, but he cannot bypass the consensus rules. To create, freeze or steal tokens is not possible for him. This fixed rule and the COO address is hardcoded on each full node, so the coordinator’s influence on the tangle is very limited, since the tangle is also constantly monitored by all the other full nodes. > The Coo will be switched off with the IOTA 2.0 upgrade. - **Communication Layer (IOTA 2.0):** This layer stores and communicates information. This layer contains the distributed ledger or tangle. The rate control and timestamps are also located in this layer. -- **Core Object type (IOTA 2.0):** An object type that must be parsed by all nodes. Parsers are computer programs responsible for decomposing and converting an input into a format more suitable for further processing. -- **Core Application (IOTA 2.0):** Core application that must be executed by all nodes, for example the value transfer application. - **Child (IOTA 2.0):** A transaction that gets referenced by Parents. - **Chrysalis:** The name of the IOTA 1.5 network upgrade. - **Stardust:** The name of the first Shimmer network upgrade. @@ -65,24 +60,20 @@ description: Glossary of all specialized names and phrases used in the IOTA and - **Faucet:** A pool of tokens (funds). Upon uncomplicated request, one gets a limited number of tokens for testing, especially for developers of own apps this is a great help. - **Firefly:** Firefly is a wallet, intended to serve as a platform for the current and future IOTA and Shimmer ecosystem. - **Finality:** The property that once a transaction has been completed, there is no way to reverse or change it. This is the moment when the parties involved in a transfer can consider the transaction completed. Finality can be deterministic or probabilistic. -- **Full nodes (Hornet, Bee):** They form the core (infrastructure) of the network. In order to participate in the peer-to-peer network, the full node must always be online and connected to neighbors (other full nodes). In addition, the transaction database must be synchronized with all other full nodes in the network. The role of full nodes is to interact with clients (wallets, DApps, etc.) and attach their transactions to the ledger, make transactions known to all other full nodes in the network, validate transactions and store them in the ledger. +- **Full nodes (Hornet):** They form the core (infrastructure) of the network. In order to participate in the peer-to-peer network, the full node must always be online and connected to neighbors (other full nodes). In addition, the transaction database must be synchronized with all other full nodes in the network. The role of full nodes is to interact with clients (wallets, DApps, etc.) and attach their transactions to the ledger, make transactions known to all other full nodes in the network, validate transactions and store them in the ledger. - **Future Cone:** All messages that directly or indirectly reference a message are called its future cone. - **Fork:** In IT, this is a new development branch after a project is split into a second follow-on project; the source code or parts of it are developed independently of the original parent project. -- **FPC(Fast Probabilistic Consensus):** Consensus that uses a random number and node opinions to reach consensus. In On-Tangle Voting, it is only used in a specific edge case. Check out OTVFPCS. ## G - **Genesis transaction:** The Genesis transaction is the first transaction that created all IOTA and Shimmer tokens and distributed them to the addresses of the buyers. -- **GoShimmer (No Main net):** Prototype of the coordinator less version of IOTA written in the Go programming language. GoShimmer implements the various modules of Coordicide, such as auto peering, node identities, Mana, etc. GoShimmer serves as a test environment for the first alpha version and the test network. Everything tested here will be gradually merged with Hornet and Bee. -- **Generic Data Object (IOTA 2.0):** The most basic object type. All unrecognized data objects are defined this way. ## H - **History:** The list of transactions that were directly or indirectly authorized by a particular transaction. - **Hash values:** Checksums that are applied to the encryption of messages of variable length. Hash values are like fingerprints of a very long data set. Each message is assigned a very specific hash value. - **Hooks:** An interface that allows foreign program code to be integrated into an existing application to extend it, change its flow, or intercept certain events. -- **Hornet Node (IOTA 1.5):** Community-developed IOTA Node written in the Go programming language. In addition, the coordinator also runs as a plugin via Hornet. -- **Hornet Node (Shimmer):** Shimmer Node written in the Go programming language. In addition, the coordinator also runs as a plugin via Hornet. +- **Hornet Node:** Node software written in the Go programming language. ## I @@ -119,7 +110,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and - **Object (IOTA 2.0):** the most basic unit of information in the IOTA protocol. Each object has a type and size and contains data. - **Oracles:** Oracles are designed to build a secure bridge between the digital and physical worlds in a decentralized, permissionless way. They bring off-chain data to decentralized applications and smart contracts on the network. - **OTV (IOTA 2.0):** On Tangle Voting is the official name for the multiverse consensus described by Hans Moog. It is a new consensus mechanism that allows nodes to vote on conflicts directly by publishing a message to the tangle. -- **OTVFPCS (IOTA 2.0):** On Tangle Voting with FPCS (Fast Probabilistic Consensus on a Set) is a mechanism for breaking metastability, which can be used in addition to OTV (On Tangle Voting). Generally, in IOTA2.0, reaching a high approval weight is the finality criteria. If the approval weight is high enough, the message / transaction is finalized. With OTVFPC the initial opinion is created with OTV, if after some time the opinions of the nodes are still split, for whatever reason, FPC is activated to break this metastable state. The finality of value transactions should be reached faster this way. ## P @@ -133,7 +123,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and - **Peer to Peer Network:** A decentralized network of different network nodes that are connected to each other and exchange data. - **Peering:** The process of discovering and connecting to other network nodes. - **Payload (IOTA 2.0):** A field in a message that determines the type. Examples are value payload (TransactionType type), FPC opinion payload (StatementType type), dRNG payload (Payload), Salt declaration payload, generic data payload. -- **Private Tangle:** A private tangle is comparable to a test network under complete control of the operator. This allows companies and developers to test their applications under self-defined environment variables without external influences and protected from prying eyes. There is no interoperability between a private Tangle and the IOTA or Shimmer Tangle. So, sending from one to the other does not work either. Each private Tangle is an independent network with its own nodes, tokens, and coordinator. - **Proof of Work (PoW):** A time-consuming (expensive) mathematical calculation that uses computational power to prevent spam attacks. It consists of a difficult cryptographic puzzle that is easy to verify. - **Proof of Inclusion (PoI):** With PoI, one is able to provide evidence that a transaction was indirectly referenced by another transaction without having to present the full chain of actual transactions between the two transactions. This is done by using a sequence of hashes instead of the actual transaction data to prove the inclusion of a transaction (inclusion) in the referenced subtangle. - **Pruning:** In computer science, this is a term for simplifying, shortening, and optimizing decision trees. In Shimmer, this is done by local snapshots on each full node. Old transactions that have already been confirmed are deleted from the database, leaving only a file (list) of credits on each address. @@ -167,7 +156,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and - **Tangle:** The Tangle is the underlying core data structure. In mathematical terms it is a directed acyclic graph (DAG). The Tangle is the distributed ledger that stores all transactions. - **Ternary system:** A trit (trinary digit) can have exactly three states (3 x 1 = 3): -1, 0 and 1. Three trits result in one tryte (33 = 27) and can thus represent 27 combinations. In IOTA, the letters A-Z (26 pieces) and the number 9 are used for this purpose. - **Token:** The digital currency form (cryptocurrency). It is a powerful tool for value transfer between people and machines. Total number: 2,779,530,283,277,761 IOTA. The base units are IOTA and micros. -- **Trinity (IOTA 1.0):** Depreciated IOTA Wallet - **Tip:** A transaction that has not yet been approved. - **Tip Selection:** The process of selecting previous transactions to be referenced by a new transaction. In these references, a transaction ties into the existing data structure. IOTA and Shimmer only enforces that a transaction approves up to eight other transactions, the tip selection strategy is left to the user (with a good default provided by Shimmer). - **Tip Transaction:** A solid end transaction that is not yet a parent. @@ -179,7 +167,6 @@ description: Glossary of all specialized names and phrases used in the IOTA and ## V -- **Value Layer (IOTA 2.0):** The Value layer builds on the Communication layer. It works exclusively with payloads of type Value object. This layer has several tasks: Forming the ledger state, processing, validation and output of transactions, conflict detection, conflict resolution via FPC, forming a DAG from value objects, tip selection (on value object tips). - **Value Transactions:** Value transactions either withdraw tokens from an address or deposit them to an address. Nodes must verify these transactions to ensure that the sender actually owns the Shimmer tokens and that additional tokens are never generated. To do this, the following checks are performed: All Shimmer tokens withdrawn from an address are also deposited into one or more other addresses; the value of each transaction does not exceed the total global supply; signatures are valid. - **Version Number (IOTA 2.0):** Indicates the correct format of each type. diff --git a/docs/maintain/getting-started/welcome.md b/docs/maintain/getting-started/welcome.md index 09b7d3bce60..b5e2718a22a 100644 --- a/docs/maintain/getting-started/welcome.md +++ b/docs/maintain/getting-started/welcome.md @@ -17,11 +17,6 @@ IOTA network as it is made entirely of Hornet nodes. You can use the documentati [Chronicle](/chronicle/welcome) is a permanode solution that allows you to store and retrieve IOTA messages and data in real time. -### GoShimmer - -[GoShimmer](/goshimmer/welcome) is an experimental node software for IOTA's Coordicide aimed at -removing the Coordinator from the IOTA networks. - ## Layer 2 ### WASP - IOTA Smart Contracts diff --git a/docs/maintain/goshimmer/0.9/docs/apis/autopeering.md b/docs/maintain/goshimmer/0.9/docs/apis/autopeering.md deleted file mode 100644 index ff4409e801e..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/autopeering.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -description: The peering API allows retrieving basic information about autopeering using the /autopeering/neighbors endpoint or the GetAutopeeringNeighbors() function in the client lib. -image: /img/logo/goshimmer_light.png -keywords: - - client library - - HTTP API - - peering api methods - - neighbors - - accepted neighbors - - known peer ---- - -# Peering API Methods - -The peering API allows retrieving basic information about autopeering. - -The API provides the following functions and endpoints: - -- [/autopeering/neighbors](#autopeeringneighbors) - -Client lib APIs: - -- [GetAutopeeringNeighbors()](#client-lib---getautopeeringneighbors) - -## `/autopeering/neighbors` - -Returns the chosen and accepted neighbors of the node. - -### Parameters - -| **Parameter** | `known` | -| ------------------------ | ------------------------------------------------- | -| **Required or Optional** | optional | -| **Description** | Return all known peers, set to `1` (default: `0`) | -| **Type** | int | - -### Examples - -#### cURL - -```shell -curl --location 'http://localhost:8080/autopeering/neighbors?known=1' -``` - -#### Client lib - `GetAutopeeringNeighbors` - -Blocks can be retrieved via `GetAutopeeringNeighbors(knownPeers bool) (*jsonmodels.GetNeighborsResponse, error)` - -```go -neighbors, err := goshimAPI.GetAutopeeringNeighbors(false) -if err != nil { - // return error -} - -// will print the response -fmt.Println(string(neighbors)) -``` - -#### Response examples - -```json -{ - "chosen": [ - { - "id": "PtBSYhniWR2", - "publicKey": "BogpestCotcmbB2EYKSsyVMywFYvUt1MwGh6nUot8g5X", - "services": [ - { - "id": "peering", - "address": "178.254.42.235:14626" - }, - { - "id": "gossip", - "address": "178.254.42.235:14666" - } - ] - } - ], - "accepted": [ - { - "id": "CRPFWYijV1T", - "publicKey": "GUdTwLDb6t6vZ7X5XzEnjFNDEVPteU7tVQ9nzKLfPjdo", - "services": [ - { - "id": "peering", - "address": "35.214.101.88:14626" - }, - { - "id": "gossip", - "address": "35.214.101.88:14666" - } - ] - } - ] -} -``` - -#### Results - -- Returned type - -| Return field | Type | Description | -| :----------- | :----------- | :-------------------------------------------------------- | -| `known` | `[]Neighbor` | List of known peers. Only returned when parameter is set. | -| `chosen` | `[]Neighbor` | List of chosen peers. | -| `accepted` | `[]Neighbor` | List of accepted peers. | -| `error` | `string` | Error block. Omitted if success. | - -- Type `Neighbor` - -| field | Type | Description | -| :---------- | :-------------- | :------------------------------------ | -| `id` | `string` | Comparable node identifier. | -| `publicKey` | `string` | Public key used to verify signatures. | -| `services` | `[]PeerService` | List of exposed services. | - -- Type `PeerService` - -| field | Type | Description | -| :-------- | :------- | :------------------------------ | -| `id` | `string` | Type of service. | -| `address` | `string` | Network address of the service. | diff --git a/docs/maintain/goshimmer/0.9/docs/apis/client_lib.md b/docs/maintain/goshimmer/0.9/docs/apis/client_lib.md deleted file mode 100644 index c861672f72b..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/client_lib.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -description: GoShimmer ships with a client Go library which communicates with the HTTP API. -image: /img/logo/goshimmer_light.png -keywords: - - client library - - api - - HTTP API - - golang ---- - -# Client Lib: Interaction With Layers - -:::info - -This guide is meant for developers familiar with the Go programming language. - -::: - -GoShimmer ships with a client Go library which communicates with the HTTP API. Please refer to the [godoc.org docs](https://godoc.org/github.com/iotaledger/goshimmer/client) for function/structure documentation. There is also a set of APIs which do not directly have anything to do with the different layers. Since they are so simple, simply extract their usage from the GoDocs. - -# Use the API - -Simply `go get` the lib via: - -```shell -go get github.com/iotaledger/goshimmer/client -``` - -Init the API by passing in the API URI of your GoShimmer node: - -```go -goshimAPI := client.NewGoShimmerAPI("http://mynode:8080") -``` - -Optionally, define your own `http.Client` to use, in order for example to define custom timeouts: - -```go -goshimAPI := client.NewGoShimmerAPI("http://mynode:8080", client.WithHTTPClient{Timeout: 30 * time.Second}) -``` - -#### A note about errors - -The API issues HTTP calls to the defined GoShimmer node. Non 200 HTTP OK status codes will reflect themselves as `error` in the returned arguments. Meaning that for example calling for attachments with a non existing/available transaction on a node, will return an `error` from the respective function. (There might be exceptions to this rule) diff --git a/docs/maintain/goshimmer/0.9/docs/apis/communication.md b/docs/maintain/goshimmer/0.9/docs/apis/communication.md deleted file mode 100644 index 08f7742ba49..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/communication.md +++ /dev/null @@ -1,309 +0,0 @@ ---- -description: The communication layer represents the base Tangle layer where so called `Blocks` are gossiped around. A `Block` contains payloads, and it is up to upper layers to interpret and derive functionality out of them. -image: /img/logo/goshimmer_light.png -keywords: - - client library - - HTTP API - - block - - encoded block id - - consensus - - payload ---- - -# Communication Layer APIs - -The communication layer represents the base Tangle layer where so called `Blocks` are gossiped around. A `Block` contains payloads and it is up to upper layers to interpret and derive functionality out of them. - -The API provides the following functions to interact with this primitive layer: - -- [/blocks/:blockID](#blocksblockid) -- [/blocks/:blockID/metadata](#blocksblockidmetadata) -- [/data](#data) -- [/blocks/payload](#blockspayload) - -Client lib APIs: - -- [GetBlock()](#client-lib---getblock) -- [GetBlockMetadata()](#client-lib---getblockmetadata) -- [Data()](#client-lib---data) -- [SendPayload()](#client-lib---sendpayload) - -## `/blocks/:blockID` - -Return block from the tangle. - -### Parameters - -| **Parameter** | `blockID` | -| ------------------------ | ------------------------- | -| **Required or Optional** | required | -| **Description** | ID of a block to retrieve | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl --location --request GET 'http://localhost:8080/blocks/:blockID' -``` - -where `:blockID` is the base58 encoded block ID, e.g. 4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc. - -#### Client lib - `GetBlock` - -Blocks can be retrieved via `GetBlock(base58EncodedID string) (*jsonmodels.Block, error)` - -```go -block, err := goshimAPI.GetBlock(base58EncodedBlockID) -if err != nil { - // return error -} - -// will print "Hello GoShimmer World" -fmt.Println(string(block.Payload)) -``` - -Note that we're getting actual `Block` objects from this call which represent a vertex in the communication layer Tangle. It does not matter what type of payload the block contains, meaning that this will also return blocks which contain a transactions or DRNG payloads. - -### Response Examples - -```json -{ - "id": "4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc", - "strongParents": [ - "6LrXyDCorw8bTWKFaEmm3CZG6Nb6Ga8Bmosi1GPypGc1", - "B89koPthm9zDx1p1fbkHwoyC1Buq896Spu3Mx1SmSete" - ], - "weakParents": [], - "strongChildren": [ - "4E4ucAA9UTTd1UC6ri4GYaS4dpzEnHPjs5gMEYhpUK8p", - "669BRH69afQ7VfZGmNTMTeh2wnwXGKdBxtUCcRQ9CPzq" - ], - "weakChildren": [], - "issuerPublicKey": "9DB3j9cWYSuEEtkvanrzqkzCQMdH1FGv3TawJdVbDxkd", - "issuingTime": 1621873309, - "sequenceNumber": 4354, - "payloadType": "GenericDataPayloadType(0)", - "payload": "BAAAAAAAAAA=", - "signature": "2J5XuVnmaHo54WipirWo7drJeXG3iRsnLYfzaPPuy6TXKiVBqv6ZYg2NjYP75xvgvut1SKNm8oYTchGi5t2SjyWJ" -} -``` - -### Results - -| Return field | Type | Description | -| :---------------- | :--------- | :---------------------------------- | -| `id` | `string` | Block ID. | -| `strongParents` | `[]string` | List of strong parents' block IDs. | -| `weakParents` | `[]string` | List of weak parents' block IDs. | -| `strongChildren` | `[]string` | List of strong children' block IDs. | -| `weakChildren` | `[]string` | List of weak children' block IDs. | -| `issuerPublicKey` | `[]string` | Public key of issuing node. | -| `issuingTime` | `int64` | Time this block was issued | -| `sequenceNumber` | `uint64` | Block sequence number. | -| `payloadType` | `string` | Payload type. | -| `payload` | `[]byte` | The contents of the block. | -| `signature` | `string` | Block signature. | -| `error` | `string` | Error block. Omitted if success. | - -## `/blocks/:blockID/metadata` - -Return block metadata. - -### Parameters - -| **Parameter** | `blockID` | -| ------------------------ | ------------------------- | -| **Required or Optional** | required | -| **Description** | ID of a block to retrieve | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl --location --request GET 'http://localhost:8080/blocks/:blockID/metadata' -``` - -where `:blockID` is the base58 encoded block ID, e.g. 4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc. - -#### Client lib - `GetBlockMetadata` - -Block metadata can be retrieved via `GetBlockMetadata(base58EncodedID string) (*jsonmodels.BlockMetadata, error)` - -```go -block, err := goshimAPI.GetBlockMetadata(base58EncodedBlockID) -if err != nil { - // return error -} - -// will print whether block is finalized -fmt.Println(string(block.Finalized)) -``` - -### Response Examples - -```json -{ - "id": "4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc", - "receivedTime": 1621873309, - "solid": true, - "solidificationTime": 1621873309, - "structureDetails": { - "rank": 23323, - "pastMarkerGap": 0, - "isPastMarker": true, - "pastMarkers": { - "markers": { - "1": 21904 - }, - "highestIndex": 21904, - "lowestIndex": 21904 - } - }, - "conflictID": "ConflictID(MasterConflictID)", - "scheduled": false, - "booked": true, - "invalid": false, - "confirmationState": 3, - "confirmationStateTime": 1621873310 -} -``` - -### Results - -| Return field | Type | Description | -| :------------------- | :----------------- | :---------------------------------------------- | -| `id` | `string` | Block ID. | -| `receivedTime` | `int64` | Time when block was received by the node. | -| `solid` | `bool` | Flag indicating whether the block is solid. | -| `solidificationTime` | `int64` | Time when block was solidified by the node. | -| `structureDetails` | `StructureDetails` | List of weak children' block IDs. | -| `conflictID` | `string` | Name of conflict that the block is part of. | -| `scheduled` | `bool` | Flag indicating whether the block is scheduled. | -| `booked` | `bool` | Flag indicating whether the block is booked. | -| `eligible` | `bool` | Flag indicating whether the block is eligible. | -| `invalid` | `bool` | Flag indicating whether the block is invalid. | -| `finalized` | `bool` | Flag indicating whether the block is finalized. | -| `finalizedTime` | `string` | Time when block was finalized. | -| `error` | `string` | Error block. Omitted if success. | - -## `/data` - -Method: `POST` - -A data block is simply a `Block` containing some raw data (literally bytes). This type of block has therefore no real functionality other than that it is retrievable via `GetBlock`. - -### Parameters - -| **Parameter** | `data` | -| ------------------------ | ----------------------- | -| **Required or Optional** | required | -| **Description** | data bytes | -| **Type** | base64 serialized bytes | - -#### Body - -```json -{ - "data": "dataBytes" -} -``` - -### Examples - -#### cURL - -```shell -curl --location --request POST 'http://localhost:8080/data' \ ---header 'Content-Type: application/json' \ ---data-raw '{"data": "dataBytes"}' -``` - -#### Client lib - `Data` - -##### `Data(data []byte) (string, error)` - -```go -blockID, err := goshimAPI.Data([]byte("Hello GoShimmer World")) -if err != nil { - // return error -} -``` - -Note that there is no need to do any additional work, since things like tip-selection, PoW and other tasks are done by the node itself. - -### Response Examples - -```json -{ - "id": "blockID" -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------- | :--------------------------------------- | -| `id` | `string` | Block ID of the block. Omitted if error. | -| `error` | `string` | Error block. Omitted if success. | - -## `/blocks/payload` - -Method: `POST` - -`SendPayload()` takes a `payload` object of any type (data, transaction, drng, etc.) as a byte slice, issues a block with the given payload and returns its `blockID`. Note that the payload must be valid, otherwise an error is returned. - -### Parameters - -| **Parameter** | `payload` | -| ------------------------ | ----------------------- | -| **Required or Optional** | required | -| **Description** | payload bytes | -| **Type** | base64 serialized bytes | - -#### Body - -```json -{ - "payload": "payloadBytes" -} -``` - -### Examples - -#### cURL - -```shell -curl --location --request POST 'http://localhost:8080/blocks/payload' \ ---header 'Content-Type: application/json' \ ---data-raw '{"payload": "payloadBytes"}' -``` - -#### Client lib - `SendPayload` - -##### `SendPayload(payload []byte) (string, error)` - -```go -helloPayload := payload.NewData([]byte{"Hello GoShimmer World!"}) -blockID, err := goshimAPI.SendPayload(helloPayload.Bytes()) -``` - -### Response Examples - -```shell -{ - "id": "blockID" -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------- | :--------------------------------------- | -| `id` | `string` | Block ID of the block. Omitted if error. | -| `error` | `string` | Error block. Omitted if success. | - -Note that there is no need to do any additional work, since things like tip-selection, PoW and other tasks are done by the node itself. diff --git a/docs/maintain/goshimmer/0.9/docs/apis/faucet.md b/docs/maintain/goshimmer/0.9/docs/apis/faucet.md deleted file mode 100644 index 335b25f4fdc..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/faucet.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -description: The Faucet endpoint allows requesting funds from the Faucet. -image: /img/logo/goshimmer_light.png -keywords: - - client library - - HTTP API - - tokens - - funds - - address - - faucet - - testnet - - node Id ---- - -# Faucet API Methods - -Faucet endpoint allows requesting funds from the Faucet. - -The API provides the following functions and endpoints: - -- [/faucet](#faucet) - -Client lib APIs: - -- [SendFaucetRequest()](#client-lib---sendfaucetrequest) - -## `/faucet` - -Method: `POST` - -POST request asking for funds from the faucet to be transferred to address in the request. - -### Parameters - -| **Parameter** | `address` | -| ------------------------ | -------------------------- | -| **Required or Optional** | required | -| **Description** | address to pledge funds to | -| **Type** | string | - -| **Parameter** | `accessManaPledgeID` | -| ------------------------ | -------------------------------- | -| **Required or Optional** | optional | -| **Description** | node ID to pledge access mana to | -| **Type** | string | - -| **Parameter** | `consensusManaPledgeID` | -| ------------------------ | ----------------------------------- | -| **Required or Optional** | optional | -| **Description** | node ID to pledge consensus mana to | -| **Type** | string | - -| **Parameter** | `powTarget` | -| ------------------------ | ------------------------------------------------------ | -| **Required or Optional** | required | -| **Description** | proof of the PoW being done, **only used in HTTP api** | -| **Type** | uint64 | - -| **Parameter** | `nonce` | -| ------------------------ | ----------------------------------------------------------- | -| **Required or Optional** | required | -| **Description** | target Proof of Work difficulty,**only used in client lib** | -| **Type** | uint64 | - -#### Body - -```json -{ - "address": "target address", - "accessManaPledgeID": "nodeID", - "consensusManaPledgeID": "nodeID", - "nonce": 50 -} -``` - -### Examples - -#### cURL - -```shell -curl --location --request POST 'http://localhost:8080/faucet' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "address": "target address", - "accessManaPledgeID": "nodeID", - "consensusManaPledgeID": "nodeID", - "nonce": 50 -}' -``` - -#### Client lib - SendFaucetRequest - -##### `SendFaucetRequest(base58EncodedAddr string, powTarget int, pledgeIDs ...string) (*jsonmodels.FaucetResponse, error)` - -```go -_, err = webConnector.client.SendFaucetRequest(addr.Address().Base58(), powTarget) -if err != nil { - // return error -} -``` - -### Response examples - -```json -{ - "id": "4MSkwAPzGwnjCJmTfbpW4z4GRC7HZHZNS33c2JikKXJc" -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------- | :------------------------------------------------ | -| `id` | `string` | Block ID of the faucet request. Omitted if error. | -| `error` | `string` | Error block. Omitted if success. | diff --git a/docs/maintain/goshimmer/0.9/docs/apis/info.md b/docs/maintain/goshimmer/0.9/docs/apis/info.md deleted file mode 100644 index c165841f6e4..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/info.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -description: Info API returns basic info about the node with the /info and /healthz endpoints and the info() function. -image: /img/logo/goshimmer_light.png -keywords: - - info - - endpoint - - function - - health - - healthz - - client lib ---- - -# Info API Methods - -Info API returns basic info about the node - -The API provides the following functions and endpoints: - -- [/info](#info) -- [/healthz](#healthz) - -Client lib APIs: - -- [Info()](#client-lib---info) - -## `/info` - -Returns basic info about the node. - -### Parameters - -None. - -### Examples - -#### cURL - -```shell -curl --location 'http://localhost:8080/info' -``` - -#### Client lib - `Info` - -Information of a node can be retrieved via `Info() (*jsonmodels.InfoResponse, error)` - -```go -info, err := goshimAPI.Info() -if err != nil { - // return error -} - -// will print the response -fmt.Println(string(info)) -``` - -#### Response example - -```json -{ - "version": "v0.6.2", - "networkVersion": 30, - "tangleTime": { - "blockID": "6ndfmfogpH9H8C9X9Fbb7Jmuf8RJHQgSjsHNPdKUUhoJ", - "time": 1621879864032595415, - "synced": true - }, - "identityID": "D9SPFofAGhA5V9QRDngc1E8qG9bTrnATmpZMdoyRiBoW", - "identityIDShort": "XBgY5DsUPng", - "publicKey": "9DB3j9cWYSuEEtkvanrzqkzCQMdH1FGv3TawJdVbDxkd", - "solidBlockCount": 74088, - "totalBlockCount": 74088, - "enabledPlugins": [ - "Activity", - "AnalysisClient", - "AutoPeering", - "Banner", - "CLI", - "Clock", - "Config", - "Consensus", - "DRNG", - "Dashboard", - "Database", - "Gossip", - "GracefulShutdown", - "Logger", - "Mana", - "ManaRefresher", - "ManualPeering", - "BlockLayer", - "Metrics", - "NetworkDelay", - "PoW", - "PortCheck", - "Profiling", - "Prometheus", - "RemoteLog", - "RemoteLogMetrics", - "WebAPI", - "WebAPIDRNGEndpoint", - "WebAPIManaEndpoint", - "WebAPIWeightProviderEndpoint", - "WebAPIAutoPeeringEndpoint", - "WebAPIDataEndpoint", - "WebAPIFaucetEndpoint", - "WebAPIHealthzEndpoint", - "WebAPIInfoEndpoint", - "WebAPILedgerstateEndpoint", - "WebAPIBlockEndpoint", - "WebAPIToolsEndpoint", - "snapshot" - ], - "disabledPlugins": [ - "AnalysisDashboard", - "AnalysisServer", - "Faucet", - "ManaEventLogger", - "Spammer", - "TXStream" - ], - "mana": { - "access": 1, - "accessTimestamp": "2021-05-24T20:11:05.451224937+02:00", - "consensus": 10439991680906, - "consensusTimestamp": "2021-05-24T20:11:05.451228137+02:00" - }, - "manaDelegationAddress": "1HMQic52dz3xLY2aeDXcDhX53LgbsHghdfD8eGXR1qVHy", - "mana_decay": 0.00003209, - "scheduler": { - "running": true, - "rate": "5ms", - "nodeQueueSizes": {} - }, - "rateSetter": { - "rate": 20000, - "size": 0 - } -} -``` - -#### Results - -| Return field | Type | Description | -| :---------------------- | :----------- | :---------------------------------------------------------------------------- | -| `version` | `String` | Version of GoShimmer. | -| `networkVersion` | `uint32` | Network Version of the autopeering. | -| `tangleTime` | `TangleTime` | TangleTime sync status | -| `identityID` | `string` | Identity ID of the node encoded in base58. | -| `identityIDShort` | `string` | Identity ID of the node encoded in base58 and truncated to its first 8 bytes. | -| `publicKey` | `string` | Public key of the node encoded in base58 | -| `blockRequestQueueSize` | `int` | The number of blocks a node is trying to request from neighbors. | -| `solidBlockCount` | `int` | The number of solid blocks in the node's database. | -| `totalBlockCount` | `int` | The number of blocks in the node's database. | -| `enabledPlugins` | `[]string` | List of enabled plugins. | -| `disabledPlugins` | `[]string` | List if disabled plugins. | -| `mana` | `Mana` | Mana values. | -| `manaDelegationAddress` | `string` | Mana Delegation Address. | -| `mana_decay` | `float64` | The decay coefficient of `bm2`. | -| `scheduler` | `Scheduler` | Scheduler is the scheduler used. | -| `rateSetter` | `RateSetter` | RateSetter is the rate setter used. | -| `error` | `string` | Error block. Omitted if success. | - -- Type `TangleTime` - -| field | Type | Description | -| :-------- | :------- | :------------------------------------------- | -| `blockID` | `string` | ID of the last confirmed block. | -| `time` | `int64` | Issue timestamp of the last confirmed block. | -| `synced` | `bool` | Flag indicating whether node is in sync. | - -- Type `Scheduler` - -| field | Type | Description | -| :--------------- | :--------------- | :--------------------------------------------- | -| `running` | `bool` | Flag indicating whether Scheduler has started. | -| `rate` | `string` | Rate of the scheduler. | -| `nodeQueueSizes` | `map[string]int` | The size for each node queue. | - -- Type `RateSetter` - -| field | Type | Description | -| :----- | :-------- | :----------------------------- | -| `rate` | `float64` | The rate of the rate setter.. | -| `size` | `int` | The size of the issuing queue. | - -- Type `Mana` - -| field | Type | Description | -| :------------------- | :---------- | :------------------------------------------- | -| `access` | `float64` | Access mana assigned to the node. | -| `accessTimestamp` | `time.Time` | Time when the access mana was calculated. | -| `consensus` | `float64` | Consensus mana assigned to the node. | -| `consensusTimestamp` | `time.Time` | Time when the consensus mana was calculated. | - -## `/healthz` - -Returns HTTP code 200 if everything is running correctly. - -### Parameters - -None. - -### Examples - -#### cURL - -```shell -curl --location 'http://localhost:8080/healthz' -``` - -#### Client lib - -This method is not available in client lib - -#### Results - -Empty response with HTTP 200 success code if everything is running correctly. -Error block is returned if failed. diff --git a/docs/maintain/goshimmer/0.9/docs/apis/ledgerstate.md b/docs/maintain/goshimmer/0.9/docs/apis/ledgerstate.md deleted file mode 100644 index af14ff8ecee..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/ledgerstate.md +++ /dev/null @@ -1,1174 +0,0 @@ ---- -description: The ledgerstate API provides endpoints to retrieve address details, unspent outputs for an address, get conflict details, and list child conflicts amongst others. -image: /img/logo/goshimmer_light.png -keywords: - - client library - - HTTP API - - addresses - - conflicts - - outputs - - transactions - - UTXO - - unspent outputs ---- - -# Ledgerstate API Methods - -## HTTP APIs - -- [/ledgerstate/addresses/:address](#ledgerstateaddressesaddress) -- [/ledgerstate/addresses/:address/unspentOutputs](#ledgerstateaddressesaddressunspentoutputs) -- [/ledgerstate/conflicts/:conflictID](#ledgerstateconflictsconflictid) -- [/ledgerstate/conflicts/:conflictID/children](#ledgerstateconflictsconflictidchildren) -- [/ledgerstate/conflicts/:conflictID/conflicts](#ledgerstateconflictsconflictidconflicts) -- [/ledgerstate/conflicts/:conflictID/voters](#ledgerstateconflictsconflictidvoters) -- [/ledgerstate/outputs/:outputID](#ledgerstateoutputsoutputid) -- [/ledgerstate/outputs/:outputID/consumers](#ledgerstateoutputsoutputidconsumers) -- [/ledgerstate/outputs/:outputID/metadata](#ledgerstateoutputsoutputidmetadata) -- [/ledgerstate/transactions/:transactionID](#ledgerstatetransactionstransactionid) -- [/ledgerstate/transactions/:transactionID/metadata](#ledgerstatetransactionstransactionidmetadata) -- [/ledgerstate/transactions/:transactionID/attachments](#ledgerstatetransactionstransactionidattachments) -- [/ledgerstate/transactions](#ledgerstatetransactions) -- [/ledgerstate/addresses/unspentOutputs](#ledgerstateaddressesunspentoutputs) - -## Client Lib APIs - -- [GetAddressOutputs()](#client-lib---getaddressoutputs) -- [GetAddressUnspentOutputs()](#client-lib---getaddressunspentoutputs) -- [GetConflict()](#client-lib---getconflict) -- [GetConflictChildren()](#client-lib---getconflictchildren) -- [GetConflictConflicts()](#client-lib---getconflictconflicts) -- [GetConflictVoters()](#client-lib---getconflictvoters) -- [GetOutput()](#client-lib---getoutput) -- [GetOutputConsumers()](#client-lib---getoutputconsumers) -- [GetOutputMetadata()](#client-lib---getoutputmetadata) -- [GetTransaction()](#client-lib---gettransaction) -- [GetTransactionMetadata()](#client-lib---gettransactionmetadata) -- [GetTransactionAttachments()](#client-lib---gettransactionattachments) -- [PostTransaction()](#client-lib---posttransaction) -- [PostAddressUnspentOutputs()](#client-lib---postaddressunspentoutputs) - -## `/ledgerstate/addresses/:address` - -Get address details for a given base58 encoded address ID, such as output types and balances. For the client library API call balances will not be directly available as values because they are stored as a raw block. Balance can be read after retrieving `ledgerstate.Output` instance, as presented in the examples. - -### Parameters - -| **Parameter** | `address` | -| ------------------------ | ------------------------------ | -| **Required or Optional** | required | -| **Description** | The address encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/addresses/:address \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:address` is the base58 encoded address, e.g. 6PQqFcwarCVbEMxWFeAqj7YswK842dMtf84qGyKqVH7s1kK. - -#### Client lib - `GetAddressOutputs()` - -```Go -resp, err := goshimAPI.GetAddressOutputs("6PQqFcwarCVbEMxWFeAqj7YswK842dMtf84qGyKqVH7s1kK") -if err != nil { - // return error -} -fmt.Println("output address: ", resp.Address) - -for _, output := range resp.Outputs { - fmt.Println("outputID: ", output.OutputID) - fmt.Println("output type: ", output.Type) - // get output instance - out, err = output.ToLedgerstateOutput() -} -``` - -### Response Examples - -```json -{ - "address": { - "type": "AddressTypeED25519", - "base58": "18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp" - }, - "outputs": [ - { - "outputID": { - "base58": "gdFXAjwsm5kDeGdcZsJAShJLeunZmaKEMmfHSdoX34ZeSs", - "transactionID": "32yHjeZpghKNkybd2iHjXj7NsUdR63StbJcBioPGAut3", - "outputIndex": 0 - }, - "type": "SigLockedColoredOutputType", - "output": { - "balances": { - "11111111111111111111111111111111": 1000000 - }, - "address": "18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp" - } - } - ] -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------ | :---------------------------------------------- | -| `address` | Address | The address corresponding to provided outputID. | -| `outputs` | Output | List of transactions' outputs. | - -#### Type `Address` - -| Field | Type | Description | -| :------- | :----- | :------------------------------- | -| `type` | string | The type of an address. | -| `base58` | string | The address encoded with base58. | - -#### Type `Output` - -| Field | Type | Description | -| :----------- | :------- | :------------------------------------------------------------------- | -| `outputID` | OutputID | The identifier of an output. | -| `outputType` | string | The type of the output. | -| `output` | string | An output raw block containing balances and corresponding addresses. | - -#### Type `OutputID` - -| Field | Type | Description | -| :-------------- | :----- | :---------------------------------------------- | -| `base58` | string | The output identifier encoded with base58. | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `outputIndex` | int | The index of an output. | - -## `/ledgerstate/addresses/:address/unspentOutputs` - -Gets list of all unspent outputs for the address based on a given base58 encoded address ID. - -### Parameters - -| **Parameter** | `address` | -| ------------------------ | ------------------------------ | -| **Required or Optional** | required | -| **Description** | The address encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/addresses/:address/unspentOutputs \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:address` is the base58 encoded address, e.g. 6PQqFcwarCVbEMxWFeAqj7YswK842dMtf84qGyKqVH7s1kK. - -#### Client lib - `GetAddressUnspentOutputs()` - -```Go -address := "6PQqFcwarCVbEMxWFeAqj7YswK842dMtf84qGyKqVH7s1kK" -resp, err := goshimAPI.GetAddressUnspentOutputs(address) -if err != nil { - // return error -} -fmt.Println("output address: ", resp.Address) - -for _, output := range resp.Outputs { - fmt.Println("outputID: ", output.OutputID) - fmt.Println("output type: ", output.Type) - // get output instance - out, err = output.ToLedgerstateOutput() -} -``` - -### Response Examples - -```json -{ - "address": { - "type": "AddressTypeED25519", - "base58": "18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp" - }, - "outputs": [ - { - "outputID": { - "base58": "gdFXAjwsm5kDeGdcZsJAShJLeunZmaKEMmfHSdoX34ZeSs", - "transactionID": "32yHjeZpghKNkybd2iHjXj7NsUdR63StbJcBioPGAut3", - "outputIndex": 0 - }, - "type": "SigLockedColoredOutputType", - "output": { - "balances": { - "11111111111111111111111111111111": 1000000 - }, - "address": "18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp" - } - } - ] -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------ | :------------------------------------------------------ | -| `address` | Address | The address corresponding to provided unspent outputID. | -| `outputs` | Output | List of transactions' unspent outputs. | - -#### Type `Address` - -| Field | Type | Description | -| :------- | :----- | :------------------------------- | -| `type` | string | The type of an address. | -| `base58` | string | The address encoded with base58. | - -#### Type `Output` - -| Field | Type | Description | -| :----------- | :------- | :------------------------------------------------------------------ | -| `outputID` | OutputID | The identifier of an output. | -| `outputType` | string | The type of the output. | -| `output` | string | An output raw block containing balances and corresponding addresses | - -#### Type `OutputID` - -| Field | Type | Description | -| :-------------- | :----- | :---------------------------------------------- | -| `base58` | string | The output identifier encoded with base58. | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `outputIndex` | int | The index of an output. | - -## `/ledgerstate/conflicts/:conflictID` - -Gets a conflict details for a given base58 encoded conflict ID. - -### Parameters - -| **Parameter** | `conflictID` | -| ------------------------ | ---------------------------------- | -| **Required or Optional** | required | -| **Description** | The conflict ID encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/conflicts/:conflictID \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:conflictID` is the ID of the conflict, e.g. 2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ. - -#### Client lib - `GetConflict()` - -```Go -resp, err := goshimAPI.GetConflict("2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ") -if err != nil { - // return error -} -fmt.Println("conflict ID: ", resp.ID) -fmt.Println("conflict type: ", resp.Type) -fmt.Println("conflict inclusion state: ", resp.ConfirmationState) -fmt.Println("conflict parents IDs: ", resp.Parents) -fmt.Println("conflict conflicts IDs: ", resp.ConflictIDs) -fmt.Printf("liked: %v, finalized: %v, monotonically liked: %v", resp.Liked, resp.Finalized, resp.MonotonicallyLiked) -``` - -### Response Examples - -```json -{ - "id": "5v6iyxKUSSF73yoZa6YngNN5tqoX8hJQWKGXrgcz3XTg", - "type": "ConflictConflictType", - "parents": ["4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM"], - "conflictIDs": ["3LrHecDf8kvDGZKTAYaKmvdsqXA18YBc8A9UePu7pCxw5ks"], - "liked": false, - "monotonicallyLiked": false, - "finalized": false, - "confirmationState": "ConfirmationState(Pending)" -} -``` - -### Results - -| Return field | Type | Description | -| :------------------- | :------- | :-------------------------------------------------------- | -| `id` | string | The conflict identifier encoded with base58. | -| `type` | string | The type of the conflict. | -| `parents` | []string | The list of parent conflicts IDs. | -| `conflictIDs` | []string | The list of conflicts identifiers. | -| `liked` | bool | The boolean indicator if conflict is liked. | -| `monotonicallyLiked` | bool | The boolean indicator if conflict is monotonically liked. | -| `finalized` | bool | The boolean indicator if conflict is finalized. | -| `confirmationState` | string | Confirmation state of a conflict. | - -## `/ledgerstate/conflicts/:conflictID/children` - -Gets a list of all child conflicts for a conflict with given base58 encoded conflict ID. - -### Parameters - -| **Parameter** | `conflictID` | -| ------------------------ | ---------------------------------- | -| **Required or Optional** | required | -| **Description** | The conflict ID encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/conflicts/:conflictID/children \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:conflictID` is the ID of the conflict, e.g. 2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ. - -#### Client lib - `GetConflictChildren()` - -```Go -resp, err := goshimAPI.GetConflictChildren("2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ") -if err != nil { - //return error -} -fmt.Printf("All children conflicts for conflict %s:\n", resp.ConflictID) -for _, conflict := range resp.ChildConflicts { - fmt.Println("conflictID: ", conflict.ConflictID) - fmt.Printf("type: %s\n", conflict.ConflictID) -} -``` - -### Response Examples - -```json -{ - "conflictID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV", - "childConflicts": [ - { - "conflictID": "4SdXm5NXEcVogiJNEKkecqd5rZzRYeGYBj8oBNsdX91W", - "type": "AggregatedConflictType" - } - ] -} -``` - -### Results - -| Return field | Type | Description | -| :--------------- | :-------------- | :------------------------------------------- | -| `conflictID` | string | The conflict identifier encoded with base58. | -| `childConflicts` | []ChildConflict | The child conflicts data. | - -#### Type `ChildConflict` - -| Field | Type | Description | -| :----------- | :----- | :------------------------------------------- | -| `conflictID` | string | The conflict identifier encoded with base58. | -| `type` | string | The type of the conflict. | - -## `/ledgerstate/conflicts/:conflictID/conflicts` - -Get all conflicts for a given conflict ID, their outputs and conflicting conflicts. - -### Parameters - -| **Parameter** | `conflictID` | -| ------------------------ | ---------------------------------------------- | -| **Required or Optional** | required | -| **Description** | The conflicting conflict ID encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/conflicts/:conflictID/conflicts \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:conflictID` is the ID of the conflict, e.g. 2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ. - -#### Client lib - `GetConflictConflicts()` - -```Go -resp, err := goshimAPI.GetConflictConflicts("2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ") -if err != nil { - // return error -} -fmt.Printf("All conflicts for conflict %s:\n", resp.ConflictID) -// iterate over all conflicts -for _, conflict := range resp.Conflicts { - fmt.Println("output ID: ", conflict.OutputID.Base58) - fmt.Println("conflicting transaction ID: ", conflict.OutputID.TransactionID) - fmt.Printf("related conflicts: %v\n", conflict.ConflictIDs) -} -``` - -### Response Examples - -```json -{ - "conflictID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV", - "conflicts": [ - { - "outputID": { - "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK", - "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g", - "outputIndex": 0 - }, - "conflictIDs": [ - "b8QRhHerfg14cYQ4VFD7Fyh1HYTCbjt9aK1XJmdoXwq", - "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV" - ] - } - ] -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :--------- | :------------------------------------------- | -| `conflictID` | string | The conflict identifier encoded with base58. | -| `conflicts` | []Conflict | The conflict data. | - -#### Type `Conflict` - -| Field | Type | Description | -| :------------ | :------- | :---------------------------------------------------------- | -| `outputID` | OutputID | The conflict identifier encoded with base58. | -| `conflictIDs` | []string | The identifiers of all related conflicts encoded in base58. | - -#### Type `OutputID` - -| Field | Type | Description | -| :-------------- | :----- | :---------------------------------------------- | -| `base58` | string | The output identifier encoded with base58. | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `outputIndex` | int | The index of an output. | - -## `/ledgerstate/conflicts/:conflictID/voters` - -Get a list of voters of a given conflictID. - -| **Parameter** | `conflictID` | -| ------------------------ | ---------------------------------- | -| **Required or Optional** | required | -| **Description** | The conflict ID encoded in base58. | -| **Type** | string | - -### Examples - -### cURL - -```shell -curl http://localhost:8080/ledgerstate/conflicts/:conflictID/voters \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:conflictID` is the ID of the conflict, e.g. 2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ. - -#### Client lib - `GetConflictVoters()` - -```Go -resp, err := goshimAPI.GetConflictVoters("2e2EU6fhxRhrXVnYQ6US4zmUkE5YJip25ecafn8gZeoZ") -if err != nil { - // return error -} -fmt.Printf("All voters for conflict %s:\n", resp.ConflictID) -// iterate over all voters -for _, voter := range resp.Voters { - fmt.Println("ID: ", voter) -} -``` - -### Response examples - -```json -{ - "conflictID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV", - "voters": [ - "b8QRhHerfg14cYQ4VFD7Fyh1HYTCbjt9aK1XJmdoXwq", - "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK" - ] -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :-------- | :------------------------------------------- | -| `conflictID` | string | The conflict identifier encoded with base58. | -| `voters` | [] string | The list of conflict voter IDs | - -## `/ledgerstate/outputs/:outputID` - -Get an output details for a given base58 encoded output ID, such as output types, addresses, and their corresponding balances. -For the client library API call balances will not be directly available as values because they are stored as a raw block. - -### Parameters - -| **Parameter** | `outputID` | -| ------------------------ | -------------------------------- | -| **Required or Optional** | required | -| **Description** | The output ID encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/outputs/:outputID \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:outputID` is the ID of the output, e.g. 41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK. - -#### Client lib - `GetOutput()` - -```Go -resp, err := goshimAPI.GetOutput("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK") -if err != nil { - // return error -} -fmt.Println("outputID: ", resp.OutputID.Base58) -fmt.Println("output type: ", resp.Type) -fmt.Println("transactionID: ", resp.OutputID.TransactionID) -``` - -### Response Examples - -```json -{ - "outputID": { - "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK", - "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g", - "outputIndex": 0 - }, - "type": "SigLockedColoredOutputType", - "output": { - "balances": { - "11111111111111111111111111111111": 1000000 - }, - "address": "1F95a2yceDicNLvqod6P3GLFZDAFdwizcTTYow4Y1G3tt" - } -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------- | :------------------------------------------------------------------ | -| `outputID` | OutputID | The identifier of an output. | -| `outputType` | string | The type of the output. | -| `output` | string | An output raw block containing balances and corresponding addresses | - -#### Type `OutputID` - -| Field | Type | Description | -| :-------------- | :----- | :---------------------------------------------- | -| `base58` | string | The output identifier encoded with base58. | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `outputIndex` | int | The index of an output. | - -## `/ledgerstate/outputs/:outputID/consumers` - -Get a list of consumers based on a provided base58 encoded output ID. Transactions that contains the output and information about its validity. - -### Parameters - -| **Parameter** | `outputID` | -| ------------------------ | -------------------------------- | -| **Required or Optional** | required | -| **Description** | The output ID encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/outputs/:outputID/consumers \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:outputID` is the ID of the output, e.g. 41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK. - -#### Client lib - `GetOutputConsumers()` - -```Go -resp, err := goshimAPI.GetOutputConsumers("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK") -if err != nil { - // return error -} -fmt.Println("outputID: ", resp.OutputID.Base58) -// iterate over output consumers -for _, consumer := range resp.Consumers { - fmt.Println("transactionID: ", consumer.TransactionID) - fmt.Println("valid: ", consumer.Valid) -} -``` - -### Response Examples - -```json -{ - "outputID": { - "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK", - "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g", - "outputIndex": 0 - }, - "consumers": [ - { - "transactionID": "b8QRhHerfg14cYQ4VFD7Fyh1HYTCbjt9aK1XJmdoXwq", - "valid": "true" - }, - { - "transactionID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV", - "valid": "true" - } - ] -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :--------- | :----------------------------------------- | -| `outputID` | OutputID | The output identifier encoded with base58. | -| `consumers` | []Consumer | Consumers of the requested output. | - -#### Type `OutputID` - -| Field | Type | Description | -| :-------------- | :----- | :---------------------------------------------- | -| `base58` | string | The output identifier encoded with base58. | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `outputIndex` | int | The index of an output. | - -#### Type `Consumers` - -| Field | Type | Description | -| :-------------- | :----- | :------------------------------------------------- | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `valid` | string | The boolean indicator if the transaction is valid. | - -## `/ledgerstate/outputs/:outputID/metadata` - -Gets an output metadata for a given base58 encoded output ID. - -### Parameters - -| **Parameter** | `outputID` | -| ------------------------ | -------------------------------- | -| **Required or Optional** | required | -| **Description** | The output ID encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/outputs/:outputID/metadata \ --X GET \ --H 'Content-Type: application/json' - -``` - -where `:outputID` is the ID of the output, e.g. 41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK. - -#### Client lib - `GetOutputMetadata()` - -```Go -resp, err := goshimAPI.GetOutputMetadata("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK") -if err != nil { - // return error -} -fmt.Printf("Metadata of an output %s:\n", resp.OutputID.Base58) -fmt.Println("conflictID: ", resp.ConflictID) -fmt.Println("first consumer: ", resp.FirstConsumer) -fmt.Println("number of consumers: ", resp.ConsumerCount) -fmt.Printf("finalized: %v, solid: %v\n", resp.Finalized, resp.Solid) -fmt.Println("solidification time: ", time.Unix(resp.SolidificationTime, 0)) -``` - -### Response Examples - -```json -{ - "outputID": { - "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK", - "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g", - "outputIndex": 0 - }, - "conflictID": "4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM", - "solid": true, - "solidificationTime": 1621889327, - "consumerCount": 2, - "firstConsumer": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV", - "finalized": true -} -``` - -### Results - -| Return field | Type | Description | -| :------------------- | :------- | :----------------------------------------------------- | -| `outputID` | OutputID | The output identifier encoded with base58. | -| `conflictID` | string | The identifier of the conflict encoded with base58. | -| `solid` | bool | The boolean indicator if the block is solid. | -| `solidificationTime` | int64 | The time of solidification of a block. | -| `consumerCount` | int | The number of consumers. | -| `firstConsumer` | string | The first consumer of the output. | -| `finalized` | bool | The boolean indicator if the transaction is finalized. | - -#### Type `OutputID` - -| Field | Type | Description | -| :-------------- | :----- | :---------------------------------------------- | -| `base58` | string | The output identifier encoded with base58. | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `outputIndex` | int | The index of an output. | - -## `/ledgerstate/transactions/:transactionID` - -Gets a transaction details for a given base58 encoded transaction ID. - -### Parameters - -| **Parameter** | `transactionID` | -| ------------------------ | ------------------------------------- | -| **Required or Optional** | required | -| **Description** | The transaction ID encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/transactions/:transactionID \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:transactionID` is the ID of the conflict, e.g. HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV. - -#### Client lib - `GetTransaction()` - -```Go -resp, err := goshimAPI.GetTransaction("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK") -if err != nil { - // return error -} -fmt.Println("transaction inputs:") -for _, input := range resp.Inputs { - fmt.Println("inputID:", input.ReferencedOutputID.Base58) -} -fmt.Println("transaction outputs:") -for _, output := range resp.Outputs{ - fmt.Println("outputID:", output.OutputID.Base58) - fmt.Println("output type:", output.Type) -} -fmt.Println("access mana pledgeID:", resp.AccessPledgeID) -fmt.Println("consensus mana pledgeID:", resp.ConsensusPledgeID) -``` - -### Response Examples - -```json -{ - "version": 0, - "timestamp": 1621889348, - "accessPledgeID": "DsHT39ZmwAGrKQe7F2rAjwHseUnJeY89gDPEH1FJxYdH", - "consensusPledgeID": "DsHT39ZmwAGrKQe7F2rAjwHseUnJeY89gDPEH1FJxYdH", - "inputs": [ - { - "type": "UTXOInputType", - "referencedOutputID": { - "base58": "41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK", - "transactionID": "9wr21zza46Y5QonKEHNQ6x8puA7Rbq5LAbsQZJCK1g1g", - "outputIndex": 0 - } - } - ], - "outputs": [ - { - "outputID": { - "base58": "6gMWUCgJDozmyLeGzW3ibGFicEq2wbhsxgAw8rUVPvn9bj5", - "transactionID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV", - "outputIndex": 0 - }, - "type": "SigLockedColoredOutputType", - "output": { - "balances": { - "11111111111111111111111111111111": 1000000 - }, - "address": "1HrUn1jWAjrMU58LLdFhfnWBwUKVdWjP5ojp7oCL9mVWs" - } - } - ], - "unlockBlocks": [ - { - "type": "SignatureUnlockBlockType", - "publicKey": "12vNcfgRHLSsobeqZFrjFRcVAmFQbDVniguPnEoxmkbG", - "signature": "4isq3qzhY4MwbSeYM2NgRn5noWAyh5rqD12ruiTQ7P89TfXNecwHZ5nbpDc4UB7md1bkfM1xYtSh18FwLqK8HAC6" - } - ], - "dataPayload": "" -} -``` - -### Results - -| Return field | Type | Description | -| :------------------ | :------------ | :--------------------------------------------------------------------------------------------------- | -| `version` | uint8 | The version of the transaction essence. | -| `timestamp` | int64 | The issuing time of the transaction. | -| `accessPledgeID` | string | The node ID indicating to which node pledge the access mana. | -| `consensusPledgeID` | string | The node ID indicating to which node pledge the consensus mana. | -| `inputs` | []Input | The inputs of the transaction. | -| `outputs` | []Output | The outputs of the transaction. | -| `unlockBlocks` | []UnlockBlock | The unlock block containing signatures unlocking the inputs or references to previous unlock blocks. | -| `dataPayload` | []byte | The raw data payload that can be attached to the transaction. | - -#### Type `Input` - -| Field | Type | Description | -| :------------------- | :----------------- | :---------------------------------------------------------- | -| `Type` | string | The type of input. | -| `ReferencedOutputID` | ReferencedOutputID | The output ID that is used as an input for the transaction. | - -#### Type `ReferencedOutputID` - -| Field | Type | Description | -| :-------------- | :----- | :---------------------------------------------------- | -| `base58` | string | The referenced output identifier encoded with base58. | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `outputIndex` | int | The index of a referenced output. | - -#### Type `Output` - -| Field | Type | Description | -| :----------- | :------- | :------------------------------------------------------------------- | -| `outputID` | OutputID | The identifier of an output. | -| `outputType` | string | The type of the output. | -| `output` | string | An output raw block containing balances and corresponding addresses. | - -#### Type `OutputID` - -| Field | Type | Description | -| :-------------- | :----- | :---------------------------------------------- | -| `base58` | string | The output identifier encoded with base58. | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `outputIndex` | int | The index of an output. | - -#### Type `UnlockBlock` - -| Field | Type | Description | -| :---------------- | :----- | :---------------------------------------------------------------------------------------------- | -| `type` | string | The unlock block type: signature or reference. | -| `referencedIndex` | uint16 | The reference index of an unlock block. | -| `signatureType` | uint8 | The unlock block signature type: ED25519 or BLS. | -| `publicKey` | string | The public key of a transaction owner. | -| `signature` | string | The string representation of a signature encoded with base58 signed over a transaction essence. | - -## `/ledgerstate/transactions/:transactionID/metadata` - -Gets a transaction metadata for a given base58 encoded transaction ID. - -### Parameters - -| **Parameter** | `transactionID` | -| ------------------------ | ------------------------------------- | -| **Required or Optional** | required | -| **Description** | The transaction ID encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/transactions/:transactionID/metadata \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:transactionID` is the ID of the conflict, e.g. HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV. - -#### Client lib - `GetTransactionMetadata()` - -```Go -resp, err := goshimAPI.GetTransactionMetadata("41GvDSQnd12e4nWnd2WzmdLmffruXqsE46jgeUbnB8s1QnK") -if err != nil { - // return error -} -fmt.Println("transactionID:", resp.TransactionID) -fmt.Println("conflictID:", resp.ConflictID) -fmt.Printf("conflict lazy booked: %v, solid: %v, finalized: %v\n", resp.LazyBooked, resp.Solid, resp.Finalized) -fmt.Println("solidification time:", time.Unix(resp.SolidificationTime, 0)) -``` - -### Response Examples - -```json -{ - "transactionID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV", - "conflictID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV", - "solid": true, - "solidificationTime": 1621889358, - "finalized": true, - "lazyBooked": false -} -``` - -### Results - -| Return field | Type | Description | -| :------------------- | :----- | :--------------------------------------------------------- | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `conflictID` | string | The conflict identifier of the transaction. | -| `solid` | bool | The boolean indicator if the transaction is solid. | -| `solidificationTime` | uint64 | The time of solidification of the transaction. | -| `finalized` | bool | The boolean indicator if the transaction is finalized. | -| `lazyBooked` | bool | The boolean indicator if the transaction is lazily booked. | - -## `/ledgerstate/transactions/:transactionID/attachments` - -Gets the list of blocks IDs with attachments of the base58 encoded transaction ID. - -### Parameters - -| **Parameter** | `transactionID` | -| ------------------------ | ------------------------------------- | -| **Required or Optional** | required | -| **Description** | The transaction ID encoded in base58. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/transactions/:transactionID/attachments \ --X GET \ --H 'Content-Type: application/json' -``` - -where `:transactionID` is the ID of the conflict, e.g. HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV. - -#### Client lib - `GetTransactionAttachments()` - -```Go -resp, err := goshimAPI.GetTransactionAttachments("DNSN8GaCeep6CVuUV6KXAabXkL3bv4PUP4NkTNKoZMqS") -if err != nil { - // return error -} -fmt.Printf("Blocks IDs containing transaction %s:\n", resp.TransactionID) -for _, blkID := range resp.BlockIDs { - fmt.Println(blkID) -} -``` - -### Response Examples - -```json -{ - "transactionID": "HuYUAwCeexmBePNXx5rNeJX1zUvUdUUs5LvmRmWe7HCV", - "blockIDs": ["J1FQdMcticXiiuKMbjobq4zrYGHagk2mtTzkVwbqPgSq"] -} -``` - -### Results - -| Return field | Type | Description | -| :-------------- | :------- | :------------------------------------------------------ | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `blockIDs` | []string | The blocks IDs that contains the requested transaction. | - -## `/ledgerstate/transactions` - -Sends transaction provided in form of a binary data, validates transaction before issuing the block payload. For more detail on how to prepare transaction bytes see the [tutorial](../tutorials/send_transaction.md). - -### Examples - -#### Client lib - `PostTransaction()` - -```GO -// prepare tx essence and signatures -... -// create transaction -tx := ledgerstate.NewTransaction(txEssence, ledgerstate.UnlockBlocks{unlockBlock}) -resp, err := goshimAPI.PostTransaction(tx.Bytes()) -if err != nil { - // return error -} -fmt.Println("Transaction sent, txID: ", resp.TransactionID) -``` - -### Results - -| Return field | Type | Description | -| :-------------- | :----- | :------------------------------------------------------------------------------- | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `Error` | error | The error returned if transaction was not processed correctly, otherwise is nil. | - -## `/ledgerstate/addresses/unspentOutputs` - -Gets all unspent outputs for a list of addresses that were sent in the body block. Returns the unspent outputs along with inclusion state and metadata for the wallet. - -### Request Body - -```json -{ - "addresses": ["18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp"] -} -``` - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/ledgerstate/addresses/unspentOutputs \ --X POST \ --H 'Content-Type: application/json' ---data-raw '{"addresses": ["18LhfKUkWt4M9YR6Q3au4LT8wWCERwzHaqn153K78Eixp"]}' -``` - -#### Client lib - `PostAddressUnspentOutputs()` - -```Go -resp, err := goshimAPI.PostAddressUnspentOutputs([]string{"H36sZQkopfoEzP3WCMThSjUv5v9MLVYuaQ73tsKgVzXo"}) -if err != nil { - return -} -for _, outputs := range resp.UnspentOutputs { - fmt.Println("address ID:", outputs.Address.Base58) - fmt.Println("address type:", outputs.Address.Type) - - for _, output := range outputs.Outputs { - fmt.Println("output ID:", output.Output.OutputID.Base58) - fmt.Println("output type:", output.Output.Type) - } -} -``` - -### Response Examples - -```json -{ - "unspentOutputs": [ - { - "address": { - "type": "AddressTypeED25519", - "base58": "1Z4t5KEKU65fbeQCbNdztYTB1B4Cdxys1XRzTFrmvAf3" - }, - "outputs": [ - { - "output": { - "outputID": { - "base58": "4eGoQWG7UDtBGK89vENQ5Ea1N1b8xF26VD2F8nigFqgyx5m", - "transactionID": "BqzgVk4yY9PDZuDro2mvT36U52ZYbJDfM41Xng3yWoQK", - "outputIndex": 0 - }, - "type": "SigLockedColoredOutputType", - "output": { - "balances": { - "11111111111111111111111111111111": 1000000 - }, - "address": "1Z4t5KEKU65fbeQCbNdztYTB1B4Cdxys1XRzTFrmvAf3" - } - }, - "confirmationState": { - "confirmed": true, - "rejected": false, - "conflicting": false - }, - "metadata": { - "timestamp": "2021-05-25T15:47:04.50470213+02:00" - } - } - ] - } - ] -} -``` - -### Results - -| Return field | Type | Description | -| :--------------- | :--------------------- | :----------------------------------------- | -| `unspentOutputs` | WalletOutputsOnAddress | Unspent outputs representation for wallet. | - -#### Type `WalletOutputsOnAddress` - -| Return field | Type | Description | -| :----------- | :------------- | :----------------------------------------------- | -| `Address` | Address | The address corresponding to the unspent output. | -| `Outputs` | []WalletOutput | Unspent outputs representation for wallet. | - -#### Type `Address` - -| Field | Type | Description | -| :------- | :----- | :------------------------------- | -| `type` | string | The type of an address. | -| `base58` | string | The address encoded with base58. | - -#### Type `WalletOutput` - -| Field | Type | Description | -| :------------------ | :------------------- | :------------------------------------------------------------ | -| `output` | Output | The unspent output. | -| `confirmationState` | ConfirmationState | The inclusion state of the transaction containing the output. | -| `metadata` | WalletOutputMetadata | The metadata of the output for the wallet lib. | - -#### Type `Output` - -| Field | Type | Description | -| :----------- | :------- | :------------------------------------------------------------------- | -| `outputID` | OutputID | The identifier of an output. | -| `outputType` | string | The type of the output. | -| `output` | string | An outputs raw block containing balances and corresponding addresses | - -#### Type `OutputID` - -| Field | Type | Description | -| :-------------- | :----- | :---------------------------------------------- | -| `base58` | string | The output identifier encoded with base58. | -| `transactionID` | string | The transaction identifier encoded with base58. | -| `outputIndex` | int | The index of an output. | - -#### Type `ConfirmationState` - -| Field | Type | Description | -| :------------ | :--- | :---------------------------------------------------------------------------------------------------------------------- | -| `confirmed` | bool | The boolean indicating if the transaction containing the output is confirmed. | -| `rejected` | bool | The boolean indicating if the transaction that contains the output was rejected and is booked to the rejected conflict. | -| `conflicting` | bool | The boolean indicating if the output is in conflicting transaction. | - -#### Type `WalletOutputMetadata` - -| Field | Type | Description | -| :---------- | :-------- | :------------------------------------------------------ | -| `timestamp` | time.Time | The timestamp of the transaction containing the output. | diff --git a/docs/maintain/goshimmer/0.9/docs/apis/mana.md b/docs/maintain/goshimmer/0.9/docs/apis/mana.md deleted file mode 100644 index 2d9b18dd42c..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/mana.md +++ /dev/null @@ -1,864 +0,0 @@ ---- -description: The mana APIs provide methods for people to retrieve the amount of access/consensus mana of nodes and outputs, as well as the event logs. -image: /img/logo/goshimmer_light.png -keywords: - - client library - - HTTP API - - mana - - percentile - - online - - consensus - - pledge ---- - -# Mana API Methods - -The mana APIs provide methods for people to retrieve the amount of access/consensus mana of nodes and outputs, as well as the event logs. - -HTTP APIs: - -- [/mana](#mana) -- [/mana/all](#manaall) -- [/mana/percentile](#manapercentile) -- [/mana/access/online](#manaaccessonline) -- [/mana/consensus/online](#manaconsensusonline) -- [/mana/access/nhighest](#manaaccessnhighest) -- [/mana/consensus/nhighest](#manaconsensusnhighest) -- [/mana/pending](#manapending) -- [/mana/consensus/past](#manaconsensuspast) -- [/mana/consensus/logs](#manaconsensuslogs) -- [/mana/allowedManaPledge](#manaallowedmanapledge) - -Client lib APIs: - -- [GetOwnMana()](#getownmana) -- [GetManaFullNodeID()](#getmanafullnodeid) -- [GetMana with short node ID()](#getmana-with-short-node-id) -- [GetAllMana()](#client-lib---getallmana) -- [GetManaPercentile()](#client-lib---getmanapercentile) -- [GetOnlineAccessMana()](#client-lib---getonlineaccessmana) -- [GetOnlineConsensusMana()](#client-lib---getonlineconsensusmana) -- [GetNHighestAccessMana()](#client-lib---getnhighestaccessmana) -- [GetNHighestConsensusMana()](#client-lib---getnhighestconsensusmana) -- [GetPending()](#client-lib---getpending) -- [GetPastConsensusManaVector()](#client-lib---getpastconsensusmanavector) -- [GetConsensusEventLogs()](#client-lib---getconsensuseventlogs) -- [GetAllowedManaPledgeNodeIDs()](#client-lib---getallowedmanapledgenodeids) - -## `/mana` - -Get the access and consensus mana of the node. - -### Parameters - -| **Parameter** | `node ID` | -| ------------------------ | ------------ | -| **Required or Optional** | optional | -| **Description** | full node ID | -| **Type** | string | - -#### **Note** - -If no node ID is given, it returns the access and consensus mana of the node you're communicating with. - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana?2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5 \ --X GET \ --H 'Content-Type: application/json' -``` - -#### client lib - -There are 3 APIs to get mana of a node, which is based on the same HTTP API `/mana`. - -##### `GetOwnMana` - -Get the access and consensus mana of the node this API client is communicating with. - -```go -manas, err := goshimAPI.GetOwnMana() -if err != nil { - // return error -} - -// print the node ID -fmt.Println("full ID: ", manas.NodeID, "short ID: ", manas.ShortNodeID) - -// get access mana of the node -fmt.Println("access mana: ", manas.Access, "access mana updated time: ", manas.AccessTimestamp) - -// get consensus mana of the node -fmt.Println("consensus mana: ", manas.Consensus, "consensus mana updated time: ", manas.ConsensusTimestamp) -``` - -##### `GetManaFullNodeID` - -Get Mana of a node with its full node ID. - -```go -manas, err := goshimAPI.GetManaFullNodeID("2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5") -if err != nil { - // return error -} -``` - -##### `GetMana` with short node ID - -```go -manas, err := goshimAPI.GetMana("2GtxMQD9") -if err != nil { - // return error -} -``` - -### Response examples - -```json -{ - "shortNodeID": "2GtxMQD9", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "access": 26.5, - "accessTimestamp": 1614924295, - "consensus": 26.5, - "consensusTimestamp": 1614924295 -} -``` - -### Results - -| Return field | Type | Description | -| :------------------- | :------ | :--------------------------------------- | -| `shortNodeID` | string | The short ID of a node. | -| `nodeID` | string | The full ID of a node. | -| `access` | float64 | The amount of access mana. | -| `accessTimestamp` | int64 | The timestamp of access mana updates. | -| `consensus` | float64 | The amount of consensus mana. | -| `consensusTimestamp` | int64 | The timestamp of consensus mana updates. | - -## `/mana/all` - -Get the mana perception of the node in the network. You can retrieve the full/short node ID, consensus mana, access mana of each node, and the mana updated time. - -### Parameters - -None. - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/all \ --X GET \ --H 'Content-Type: application/json' -``` - -#### Client lib - `GetAllMana()` - -```go -manas, err := goshimAPI.GetAllMana() -if err != nil { - // return error -} - -// mana updated time -fmt.Println("access mana updated time: ", manas.AccessTimestamp) -fmt.Println("consensus mana updated time: ", manas.ConsensusTimestamp) - -// get access mana of each node -for _, m := range manas.Access { - fmt.Println("full node ID: ", m.NodeID, "short node ID:", m.ShortNodeID, "access mana: ", m.Mana) -} - -// get consensus mana of each node -for _, m := range manas.Consensus { - fmt.Println("full node ID: ", m.NodeID, "short node ID:", m.ShortNodeID, "consensus mana: ", m.Mana) -} -``` - -### Response examples - -```json -{ - "access": [ - { - "shortNodeID": "2GtxMQD9", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "mana": 26.5 - } - ], - "accessTimestamp": 1614924295, - "consensus": [ - { - "shortNodeID": "2GtxMQD9", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "mana": 26.5 - } - ], - "consensusTimestamp": 1614924295 -} -``` - -### Results - -| Return field | Type | Description | -| :------------------- | :----------- | :--------------------------------------- | -| `access` | mana.NodeStr | A list of node that has access mana. | -| `accessTimestamp` | int64 | The timestamp of access mana updates. | -| `consensus` | mana.NodeStr | A list of node that has access mana. | -| `consensusTimestamp` | int64 | The timestamp of consensus mana updates. | - -#### Type `mana.NodeStr` - -| field | Type | Description | -| :------------ | :------ | :---------------------- | -| `shortNodeID` | string | The short ID of a node. | -| `nodeID` | string | The full ID of a node. | -| `mana` | float64 | The amount of mana. | - -## `/mana/percentile` - -To learn the top percentile the node belongs to relative to the network in terms of mana. The input should be a full node ID. - -### Parameters - -| | | -| ------------------------ | ------------ | -| **Parameter** | `node ID` | -| **Required or Optional** | Required | -| **Description** | full node ID | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/percentile?2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5 \ --X GET \ --H 'Content-Type: application/json' -``` - -#### Client lib - `GetManaPercentile()` - -```go -mana, err := goshimAPI.GetManaPercentile("2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5") -if err != nil { - // return error -} - -// mana updated time -fmt.Println("access mana percentile: ", mana.Access, "access mana updated time: ", manas.AccessTimestamp) -fmt.Println("consensus mana percentile: ", mana.Consensus, "consensus mana updated time: ", manas.ConsensusTimestamp) -``` - -### Response examples - -```json -{ - "shortNodeID": "2GtxMQD9", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "access": 75, - "accessTimestamp": 1614924295, - "consensus": 75, - "consensusTimestamp": 1614924295 -} -``` - -### Results - -| Return field | Type | Description | -| :------------------- | :------ | :--------------------------------------- | -| `shortNodeID` | string | The short ID of a node. | -| `nodeID` | string | The full ID of a node. | -| `access` | float64 | Access mana percentile of a node. | -| `accessTimestamp` | int64 | The timestamp of access mana updates. | -| `consensus` | float64 | Access mana percentile of a node. | -| `consensusTimestamp` | int64 | The timestamp of consensus mana updates. | - -## `/mana/access/online` - -You can get a sorted list of online access mana of nodes, sorted from the highest access mana to the lowest. The highest access mana node has OnlineRank 1, and increases 1 by 1 for the following nodes. - -### Parameters - -None. - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/access/online \ --X GET \ --H 'Content-Type: application/json' -``` - -#### Client lib - `GetOnlineAccessMana()` - -```go -// online access mana -accessMana, err := goshimAPI.GetOnlineAccessMana() -if err != nil { - // return error -} - -for _, m := accessMana.Online { - fmt.Println("full node ID: ", m.ID, "mana rank: ", m.OnlineRank, "access mana: ", m.Mana) -} -``` - -### Response examples - -```json -{ - "online": [ - { - "rank": 1, - "shortNodeID": "2GtxMQD9", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "mana": 75 - } - ], - "timestamp": 1614924295 -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------------ | :------------------------------------------- | -| `online` | OnlineNodeStr | The access mana information of online nodes. | -| `timestamp` | int64 | The timestamp of mana updates. | - -#### Type `OnlineNodeStr` - -| Field | Type | Description | -| :------------ | :------ | :------------------------- | -| `rank` | int | The rank of a node. | -| `shortNodeID` | string | The short ID of a node. | -| `nodeID` | string | The full ID of a node. | -| `mana` | float64 | The amount of access mana. | - -## `/mana/consensus/online` - -You can get a sorted list of online consensus mana of nodes, sorted from the highest consensus mana to the lowest. The highest consensus mana node has OnlineRank 1, and increases 1 by 1 for the following nodes. - -### Parameters - -None. - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/consensus/online \ --X GET \ --H 'Content-Type: application/json' -``` - -#### Client lib - `GetOnlineConsensusMana()` - -```go -// online access mana -accessMana, err := goshimAPI.GetOnlineConsensusMana() -if err != nil { - // return error -} - -for _, m := accessMana.Online { - fmt.Println("full node ID: ", m.ID, "mana rank: ", m.OnlineRank, "consensus mana: ", m.Mana) -} -``` - -### Response examples - -```json -{ - "online": [ - { - "rank": 1, - "shortNodeID": "2GtxMQD9", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "mana": 75 - } - ], - "timestamp": 1614924295 -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------------ | :---------------------------------------------- | -| `online` | OnlineNodeStr | The consensus mana information of online nodes. | -| `timestamp` | int64 | The timestamp of mana updates. | - -#### Type `OnlineNodeStr` - -| Field | Type | Description | -| :------------ | :------ | :---------------------------- | -| `rank` | int | The rank of a node. | -| `shortNodeID` | string | The short ID of a node. | -| `nodeID` | string | The full ID of a node. | -| `mana` | float64 | The amount of consensus mana. | - -## `/mana/access/nhighest` - -You can get the N highest access mana holders in the network, sorted in descending order. -If N=0, all nodes that have access mana are returned sorted. - -### Parameters - -| | | -| ------------------------ | --------------------------------- | -| **Parameter** | `N` | -| **Required or Optional** | Required | -| **Description** | The number of highest mana nodes. | -| **Type** | int | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/access/nhighest?number=5 \ --X GET \ --H 'Content-Type: application/json' -``` - -#### Client lib - `GetNHighestAccessMana()` - -```go -// get the top 5 highest access mana nodes -accessMana, err := goshimAPI.GetNHighestAccessMana(5) -if err != nil { - // return error -} - -for _, m := accessMana.Nodes { - fmt.Println("full node ID: ", m.NodeID, "access mana: ", m.Mana) -}v -``` - -### Response examples - -```json -{ - "nodes": [ - { - "shortNodeID": "2GtxMQD9", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "mana": 26.5 - } - ], - "timestamp": 1614924295 -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :----------- | :------------------------------- | -| `nodes` | mana.NodeStr | The N highest access mana nodes. | -| `timestamp` | int64 | The timestamp of mana updates. | - -#### Type `mana.NodeStr` - -| field | Type | Description | -| :------------ | :------ | :---------------------- | -| `shortNodeID` | string | The short ID of a node. | -| `nodeID` | string | The full ID of a node. | -| `mana` | float64 | The amount of mana. | - -## `/mana/consensus/nhighest` - -You can get the N highest consensus mana holders in the network, sorted in descending order. - -### Parameters - -| | | -| ------------------------ | ------------------------------------------- | -| **Parameter** | `N` | -| **Required or Optional** | Required | -| **Description** | The number of highest consensus mana nodes. | -| **Type** | int | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/consensus/nhighest?number=5 \ --X GET \ --H 'Content-Type: application/json' -``` - -#### Client lib - `GetNHighestConsensusMana()` - -```go -// get the top 5 highest consensus mana nodes -consensusMana, err := goshimAPI.GetNHighestConsensusMana(5) -if err != nil { - // return error -} - -for _, m := consensusMana.Nodes { - fmt.Println("full node ID: ", m.NodeID, "consensus mana: ", m.Mana) -}v -``` - -### Response examples - -```json -{ - "nodes": [ - { - "shortNodeID": "2GtxMQD9", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "mana": 26.5 - } - ], - "timestamp": 1614924295 -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :----------- | :---------------------------------- | -| `nodes` | mana.NodeStr | The N highest consensus mana nodes. | -| `timestamp` | int64 | The timestamp of mana updates. | - -#### Type `mana.NodeStr` - -| field | Type | Description | -| :------------ | :------ | :---------------------- | -| `shortNodeID` | string | The short ID of a node. | -| `nodeID` | string | The full ID of a node. | -| `mana` | float64 | The amount of mana. | - -## `/mana/pending` - -Get the amount of base access mana that would be pledged if the given output was spent. - -### Parameters - -| | | -| ------------------------ | ------------------------- | -| **Parameter** | `outputID` | -| **Required or Optional** | Required | -| **Description** | The requesting output ID. | -| **Type** | string | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/pending?outputid="4a5KkxVfsdFVbf1NBGeGTCjP8Ppsje4YFQg9bu5YGNMSJK1" \ --X GET \ --H 'Content-Type: application/json' -``` - -#### Client lib - `GetPending()` - -```go -res, err := goshimAPI.GetPending("4a5KkxVfsdFVbf1NBGeGTCjP8Ppsje4YFQg9bu5YGNMSJK1") -if err != nil { - // return error -} - -// get the amount of mana -fmt.Println("mana be pledged: ", res.Mana) -fmt.Println("the timestamp of the output (decay duration)", res.Timestamp) -``` - -### Response examples - -```json -{ - "mana": 26.5, - "outputID": "4a5KkxVfsdFVbf1NBGeGTCjP8Ppsje4YFQg9bu5YGNMSJK1", - "timestamp": 1614924295 -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------ | :-------------------------------------------- | -| `mana` | float64 | The amount of access base mana to be pledged. | -| `outputID` | string | The output ID of the request. | -| `timestamp` | int64 | The timestamp of mana updates. | - -## `/mana/consensus/past` - -Get the consensus base mana vector of a time (int64) in the past. - -### Parameters - -| | | -| ------------------------ | ----------------------------- | -| **Parameter** | `timestamp` | -| **Required or Optional** | Required | -| **Description** | The timestamp of the request. | -| **Type** | int64 | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/consensus/past?timestamp=1614924295 \ --X GET \ --H 'Content-Type: application/json' -``` - -#### Client lib - `GetPastConsensusManaVector()` - -```go -res, err := goshimAPI.GetPastConsensusManaVector(1614924295) -if err != nil { - // return error -} - -// the mana vector of each node -for _, m := range res.Consensus { - fmt.Println("node ID:", m.NodeID, "consensus mana: ", m.Mana) -} -``` - -### Response examples - -```json -{ - "consensus": [ - { - "shortNodeID": "2GtxMQD9", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "mana": 26.5 - } - ], - "timestamp": 1614924295 -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :----------- | :----------------------------- | -| `consensus` | mana.NodeStr | The consensus mana of nodes. | -| `timestamp` | int64 | The timestamp of mana updates. | - -#### Type `mana.NodeStr` - -| field | Type | Description | -| :------------ | :------ | :---------------------- | -| `shortNodeID` | string | The short ID of a node. | -| `nodeID` | string | The full ID of a node. | -| `mana` | float64 | The amount of mana. | - -## `/mana/consensus/logs` - -Get the consensus event logs of the given node IDs. - -### Parameters - -| | | -| ------------------------ | --------------------------------- | -| **Parameter** | `nodeIDs` | -| **Required or Optional** | Required | -| **Description** | A list of node ID of the request. | -| **Type** | string array | - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/consensus/logs \ --X GET \ --H 'Content-Type: application/json' --d '{ - "nodeIDs": [ - "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux6" - ] -}' -``` - -#### Client lib - `GetConsensusEventLogs()` - -```go -res, err := goshimAPI.GetConsensusEventLogs([]string{"2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5"}) -if err != nil { - // return error -} - -for nodeID, e := range res.Logs { - fmt.Println("node ID:", nodeID) - - // pledge logs - for _, p := e.Pledge { - fmt.Println("mana type: ", p.ManaType) - fmt.Println("node ID: ", p.NodeID) - fmt.Println("time: ", p.Time) - fmt.Println("transaction ID: ", p.TxID) - fmt.Println("mana amount: ", p.Amount) - } - - // revoke logs - for _, r := e.Revoke { - fmt.Println("mana type: ", r.ManaType) - fmt.Println("node ID: ", r.NodeID) - fmt.Println("time: ", r.Time) - fmt.Println("transaction ID: ", r.TxID) - fmt.Println("mana amount: ", r.Amount) - fmt.Println("input ID: ", r.InputID) - } -} -``` - -### Response examples - -```json -{ - "logs": [ - "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5": { - "pledge": [ - { - "manaType": "Consensus", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "time": 1614924295, - "txID": "7oAfcEhodkfVyGyGrobBpRrjjdsftQknpj5KVBQjyrda", - "amount": 28 - } - ], - "revoke": [ - { - "manaType": "Consensus", - "nodeID": "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", - "time": 1614924295, - "txID": "7oAfcEhodkfVyGyGrobBpRrjjdsftQknpj5KVBQjyrda", - "amount": 28, - "inputID": "35P4cW9QfzHNjXJwZMDMCUxAR7F9mfm6FvPbdpJWudK2nBZ" - } - ] - } - ], - "startTime": 1614924295, - "endTime": 1614924300 -} -``` - -### Results - -| Return field | Type | Description | -| :----------- | :------------------------- | :------------------------------------------------------ | -| `logs` | map[string]\*EventLogsJSON | The consensus mana of nodes. The key of map is node ID. | -| `startTime` | int64 | The starting time of collecting logs. | -| `endTime` | int64 | The ending time of collecting logs. | - -#### Type `EventLogsJSON` - -| field | Type | Description | -| :------- | :--------------- | :------------------ | -| `pledge` | PledgedEventJSON | Pledged event logs. | -| `revoke` | RevokedEventJSON | Revoked event logs. | - -#### Type `PledgedEventJSON` - -| field | Type | Description | -| :--------- | :------ | :---------------------------------- | -| `manaType` | string | Type of mana. | -| `nodeID` | string | The full ID of a node. | -| `time` | int64 | The time of transaction. | -| `txID` | string | The transaction ID of pledged mana. | -| `amount` | float64 | The amount of pledged mana. | - -#### Type `RevokedEventJSON` - -| field | Type | Description | -| :--------- | :------ | :---------------------------------- | -| `manaType` | string | Type of mana. | -| `nodeID` | string | The full ID of a node. | -| `time` | int64 | The time of transaction. | -| `txID` | string | The transaction ID of revoked mana. | -| `amount` | float64 | The amount of revoked mana. | -| `inputID` | string | The input ID of revoked mana. | - -## `/mana/allowedManaPledge` - -This returns the list of allowed mana pledge node IDs. - -### Parameters - -None. - -### Examples - -#### cURL - -```shell -curl http://localhost:8080/mana/allowedManaPledge \ --X GET \ --H 'Content-Type: application/json' -``` - -#### Client lib - `GetAllowedManaPledgeNodeIDs()` - -```go -res, err := goshimAPI.GetAllowedManaPledgeNodeIDs() -if err != nil { - // return error -} - -// print the list of nodes that access mana is allowed to be pledged to -for _, id := range res.Access.Allowed { - fmt.Println("node ID:", id) -} - -// print the list of nodes that consensus mana is allowed to be pledged to -for _, id := range res.Consensus.Allowed { - fmt.Println("node ID:", id) -} -``` - -### Response examples - -```json -{ - "accessMana": { - "isFilterEnabled": false, - "allowed": [ - "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5" - ] - } - "consensusMana": { - "isFilterEnabled": false, - "allowed": [ - "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5" - ] - } -} -``` - -### Results - -| Return field | Type | Description | -| :-------------- | :------------ | :--------------------------------------------------- | -| `accessMana` | AllowedPledge | A list of nodes that allow to pledge access mana. | -| `consensusMana` | AllowedPledge | A list of nodes that allow to pledge consensus mana. | - -#### Type `AllowedPledge` - -| field | Type | Description | -| :---------------- | :------- | :-------------------------------------------------------------------------------------------------------- | -| `isFilterEnabled` | bool | A flag shows that if mana pledge filter is enabled. | -| `allowed` | []string | A list of node ID that allow to be pledged mana. This list has effect only if `isFilterEnabled` is `true` | diff --git a/docs/maintain/goshimmer/0.9/docs/apis/manual_peering.md b/docs/maintain/goshimmer/0.9/docs/apis/manual_peering.md deleted file mode 100644 index 07b0d0bae32..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/manual_peering.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -description: The manual peering APIs allows you to add, get and remove the list of known peers of the node. -image: /img/logo/goshimmer_light.png -keywords: - - client library - - HTTP API - - known peer - - peer - - public key - - gossip port ---- - -# Manual Peering API methods - -The manual peering APIs allow managing the list of known peers of the node. - -HTTP APIs: - -- POST [/manualpeering/peers](#post-manualpeeringpeers) -- GET [/manualpeering/peers](#get-manualpeeringpeers) -- DELETE [/manualpeering/peers](#delete-manualpeeringpeers) - -Client lib APIs: - -- [AddManualPeers()](#addmanualpeers) -- [GetManualPeers()](#getmanualpeers) -- [RemoveManualPeers()](#removemanualpeers) - -## POST `/manualpeering/peers` - -Add peers to the list of known peers of the node. - -### Request Body - -```json -[ - { - "publicKey": "CHfU1NUf6ZvUKDQHTG2df53GR7CvuMFtyt7YymJ6DwS3", - "address": "127.0.0.1:14666" - } -] -``` - -#### Description - -| Field | Description | -| :---------- | :------------------------------------------------- | -| `publicKey` | Public key of the peer. | -| `address` | IP address of the peer's node and its gossip port. | - -### Response - -HTTP status code: 204 No Content - -### Examples - -#### cURL - -```shell -curl --location --request POST 'http://localhost:8080/manualpeering/peers' \ ---header 'Content-Type: application/json' \ ---data-raw '[ - { - "publicKey": "CHfU1NUf6ZvUKDQHTG2df53GR7CvuMFtyt7YymJ6DwS3", - "address": "172.19.0.3:14666" - } -]' -``` - -### Client library - -#### `AddManualPeers` - -```go -import "github.com/iotaledger/goshimmer/packages/manualpeering" - -peersToAdd := []*manualpeering.KnownPeerToAdd{{PublicKey: publicKey, Address: address}} -err := goshimAPI.AddManualPeers(peersToAdd) -if err != nil { -// return error -} -``` - -## GET `/manualpeering/peers` - -Get the list of all known peers of the node. - -### Request Body - -```json -{ - "onlyConnected": true -} -``` - -#### Description - -| Field | Description | -| :-------------- | :-------------------------------------------------------------------------------- | -| `onlyConnected` | Optional, if set to true only peers with established connection will be returned. | - -### Response - -HTTP status code: 200 OK - -```json -[ - { - "publicKey": "CHfU1NUf6ZvUKDQHTG2df53GR7CvuMFtyt7YymJ6DwS3", - "address": "127.0.0.1:14666", - "connectionDirection": "inbound", - "connectionStatus": "connected" - } -] -``` - -#### Description - -| Field | Description | -| :-------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `publicKey` | The public key of the peer node. | -| `address` | IP address of the peer's node and its gossip port. | -| `connectionDirection` | Enum, possible values: "inbound", "outbound". Inbound means that the local node accepts the connection. On the other side, the other peer node dials, and it will have "outbound" connectionDirection. | -| `connectionStatus` | Enum, possible values: "disconnected", "connected". Whether the actual TCP connection has been established between peers. | - -### Examples - -#### cURL - -```shell -curl --location --request GET 'http://localhost:8080/manualpeering/peers' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "onlyConnected": true -}' -``` - -### Client library - -#### `GetManualPeers` - -```go -import "github.com/iotaledger/goshimmer/packages/manualpeering" - -peers, err := goshimAPI.GetManualPeers(manualpeering.WithOnlyConnectedPeers()) -if err != nil { -// return error -} -fmt.Println(peers) -``` - -## DELETE `/manualpeering/peers` - -Remove peers from the list of known peers of the node. - -### Request Body - -```json -[ - { - "publicKey": "CHfU1NUf6ZvUKDQHTG2df53GR7CvuMFtyt7YymJ6DwS3" - } -] -``` - -#### Description - -| Field | Description | -| :---------- | :---------------------------------------------- | -| `publicKey` | Public key of the peer to remove from the list. | - -### Response - -HTTP status code: 204 No Content - -### Examples - -#### cURL - -```shell -curl --location --request DELETE 'http://localhost:8080/manualpeering/peers' \ ---header 'Content-Type: application/json' \ ---data-raw '[ - { - "publicKey": "8qN1yD95fhbfDZtKX49RYFEXqej5fvsXJ2NPmF1LCqbd" - } -]' -``` - -### Client library - -#### `RemoveManualPeers` - -```go -import "github.com/iotaledger/hive.go/crypto/ed25519" -import "github.com/iotaledger/goshimmer/packages/manualpeering" - -publicKeysToRemove := []ed25519.PublicKey{publicKey1, publicKey2} -err := goshimAPI.RemoveManualPeers(publicKeysToRemove) -if err != nil { -// return error -} -``` diff --git a/docs/maintain/goshimmer/0.9/docs/apis/snapshot.md b/docs/maintain/goshimmer/0.9/docs/apis/snapshot.md deleted file mode 100644 index b341efce89c..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/snapshot.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -description: The snapshot API allows retrieving current snapshot. -image: /img/logo/goshimmer_light.png -keywords: - - client library - - HTTP API - - snapshot - - retrieve - - current ---- - -# Snapshot API Methods - -Snapshot API allows retrieving current snapshot. - -The API provides the following functions and endpoints: - -- [/snapshot](#snapshot) - -## `/snapshot` - -Returns a snapshot file. - -### Parameters - -None - -### Examples - -#### cURL - -```shell -curl --location 'http://localhost:8080/snapshot' -``` - -#### Client lib - -Method not available in the client library. - -#### Results - -Snapshot file is returned. diff --git a/docs/maintain/goshimmer/0.9/docs/apis/spammer.md b/docs/maintain/goshimmer/0.9/docs/apis/spammer.md deleted file mode 100644 index a64ed6879e6..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/spammer.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -description: The Spammer tool lets you add blocks to the tangle when running GoShimmer. -image: /img/logo/goshimmer_light.png -keywords: - - client library - - HTTP API - - spammer - - add blocks - - interval - - tangle ---- - -# Spammer API Methods - -The Spammer tool lets you add blocks to the tangle when running GoShimmer. -**Note:** Make sure you enable the **spammer plugin** before interacting with the API. - -The API provides the following functions and endpoints: - -- [/spammer](#spammer) - -Client lib APIs: - -- [ToggleSpammer()](#client-lib---togglespammer) - -## `/spammer` - -In order to start the spammer, you need to send GET requests to a `/spammer` API endpoint with the following parameters: - -### Parameters - -| **Parameter** | `cmd` | -| ------------------------ | ------------------------------------------------------------------ | -| **Required or Optional** | required | -| **Description** | Action to perform. One of two possible values: `start` and `stop`. | -| **Type** | `string` | - -| **Parameter** | `rate` | -| ------------------------ | -------------------------------------------------------------------- | -| **Required or Optional** | optional | -| **Description** | Blocks per time unit. Only applicable when `cmd=start`. (default: 1) | -| **Type** | `int` | - -| **Parameter** | `unit` | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | -| **Required or Optional** | optional | -| **Description** | Indicates the unit for the spam rate: block per minute or second. One of two possible values: `mpm` and `mps`. (default: `mps`) | -| **Type** | `string` | - -| **Parameter** | `imif` (Inter Block Issuing Function) | -| ------------------------ | ------------------------------------------------------------------------------------------------ | -| **Required or Optional** | optional | -| **Description** | Parameter indicating time interval between issued blocks. Possible values: `poisson`, `uniform`. | -| **Type** | `string` | - -Description of `imif` values: - -- `poisson` - emit blocks modeled with Poisson point process, whose time intervals are exponential variables with mean 1/rate -- `uniform` - issues blocks at constant rate - -### Examples - -#### cURL - -```shell -curl --location 'http://localhost:8080/spammer?cmd=start&rate=100' -curl --location 'http://localhost:8080/spammer?cmd=start&rate=100&imif=uniform&unit=mpm' -curl --location 'http://localhost:8080/spammer?cmd=stop' -``` - -#### Client lib - `ToggleSpammer()` - -Spammer can be enabled and disabled via `ToggleSpammer(enable bool, rate int, imif string) (*jsonmodels.SpammerResponse, error)` - -```go -res, err := goshimAPI.ToggleSpammer(true, 100, "mps", "uniform") -if err != nil { - // return error -} - -// will print the response -fmt.Println(res.Block) -``` - -#### Response examples - -```json -{ "block": "started spamming blocks" } -``` - -#### Results - -| Return field | Type | Description | -| :----------- | :------- | :------------------------------- | -| `block` | `string` | Block with resulting block. | -| `error` | `string` | Error block. Omitted if success. | diff --git a/docs/maintain/goshimmer/0.9/docs/apis/webAPI.md b/docs/maintain/goshimmer/0.9/docs/apis/webAPI.md deleted file mode 100644 index 27aeb1c7a76..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/apis/webAPI.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -description: The web API interface allows access to functionality of the node software via exposed HTTP endpoints. -image: /img/logo/goshimmer_light.png -keywords: - - web API - - POST - - GET - - node software - - http endpoint - - port - - handler ---- - -# WebAPI - clientLib - -The web API interface allows access to functionality of the node software via exposed HTTP endpoints. - -## How to Use the API - -The default port to access the web API is set to `8080:8080/tcp` in `docker-compose.yml`, where the first port number is the internal port number within the node software, and the second for the access from an http port. An example where these two would be set to different values, or the external port is not utilized, can be found in the docker-network tool (see also the `docker-compose.yml` file in the docker-network tool folder). - -The server instance of the web API is contacted via `webapi.Server()`. Next we need to register a route with a matching handler. - -```go -webapi.Server().ROUTE(path string, h HandlerFunc) -``` - -where `ROUTE` will be replaced later in this documentation by `GET` or `POST`. The `HandlerFunc` defines a function to serve HTTP requests that gives access to the Context - -```go -func HandlerFunc(c Context) error -``` - -We can then use the Context to send a JSON response to the node: - -```go -JSON(statuscode int, i interface{}) error -``` - -An implementation example is shown later for the POST method. - -## GET and POST - -Two methods are currently used. First, with `GET` we register a new GET route for a handler function. The handler is accessed via the address `path`. The handler for a GET method can set the node to perform certain actions. - -```go -webapi.Server().GET("path", HandlerFunc) -``` - -A command can be sent to the node software to the API, e.g. via command prompt: - -```shell -curl "http://127.0.0.1:8080/path?command" -``` - -$$ . $$ - -Second, with `POST` we register a new POST route for a handler function. The handler can receive a JSON body input and send specific blocks to the tangle. - -```go -webapi.Server().POST("path", HandlerFunc) -``` - -For example, the following Handler `broadcastData` sends a data block to the tangle - -```go -func broadcastData(c echo.Context) error { - var request Request - if err := c.Bind(&request); err != nil { - log.Info(err.Error()) - return c.JSON(http.StatusBadRequest, Response{Error: err.Error()}) - } - - blk, err := blocklayer.IssuePayload( - payload.NewGenericDataPayload(request.Data), blocklayer.Tangle()) - if err != nil { - return c.JSON(http.StatusBadRequest, Response{Error: err.Error()}) - } - return c.JSON(http.StatusOK, Response{ID: blk.ID().String()}) -} -``` - -As an example the JSON body - -```json -{ - "data": "HelloWorld" -} -``` - -can be sent to `http://127.0.0.1:8080/data`, which will issue a data block containing "HelloWor" (note that in this example the data input is size limited.) diff --git a/docs/maintain/goshimmer/0.9/docs/faq.md b/docs/maintain/goshimmer/0.9/docs/faq.md deleted file mode 100644 index 6504322bb9d..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/faq.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: Frequently Asked Questions. What is GoShimmer?, What Kind of Confirmation Time Can I Expect?, Where Can I See the State of the GoShimmer testnet?,How Many Transactions Per Second(TPS) can GoShimmer Sustain?, How is Spamming Prevented?, What Happens if I Issue a Double Spend?, Who's the Target Audience for Operating a GoShimmer Node? -image: /img/logo/goshimmer_light.png -keywords: - - average network delay - - testnet - - analysis - - dashboard - - vote - - frequently asked questions - - node software - - double spend - - transactions ---- - -# FAQ - -## What is GoShimmer? - -GoShimmer is a research and engineering project from the IOTA Foundation seeking to evaluate Coordicide concepts by implementing them in a node software. - -## What Kind of Confirmation Time Can I Expect? - -Since non-conflicting transactions aren't even voted on, they materialize after 2x the average network delay parameter we set. This means that a transaction usually confirms within a time boundary of ~10 seconds. - -## Where Can I See the State of the GoShimmer testnet? - -You can access the global analysis dashboard in the [Pollen Analyzer](http://analysisentry-01.devnet.shimmer.iota.cafe:28080/) which showcases the network graph and active ongoing votes on conflicts. - -## How Many Transactions per Second (TPS) Can GoShimmer Sustain? - -The transactions per second metric is irrelevant for the current development state of GoShimmer. We are evaluating components from Coordicide, and aren't currently interested in squeezing out every little ounce of performance. Since the primary goal is to evaluate Coordicide components, we value simplicity over optimization . Even if we would put out a TPS number, it would not reflect an actual metric in a finished production ready node software. - -## How is Spamming Prevented? - -The Coordicide lays out concepts for spam prevention through the means of rate control and such. However, in the current version, GoShimmer relies on Proof of Work (PoW) to prevent over saturation of the network. Doing the PoW for a block will usually take a couple of seconds on commodity hardware. - -## What Happens if I Issue a Double Spend? - -If issue simultaneous transactions spending the same funds, there is high certainty that your transaction will be rejected by the network. This rejection will block your funds indefinitely, though this may change in the future. - -If you issue a transaction, await the average network delay, and then issue the double spend, then the first issued transaction should usually become confirmed, and the 2nd one rejected. - -## Who's the Target Audience for Operating a GoShimmer Node? - -Our primary focus is testing out Coordicide components. We are mainly interested in individuals who have a strong IT background, rather than giving people of any knowledge-level the easiest way to operate a node. We welcome people interested in trying out the bleeding edge of IOTA development and providing meaningful feedback or problem reporting in form of [issues](https://github.com/iotaledger/goshimmer/issues/new/choose). diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/configuration_parameters.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/configuration_parameters.md deleted file mode 100644 index c2cc76d28c1..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/implementation_design/configuration_parameters.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -description: You can pass configuration parameters in two ways when running GoShimmer, through a JSON configuration file or through command line arguments. Parameters are grouped into embedded objects containing parameters for a single plugin or functionality. There is no limit on how deep the configuration object may be embedded. -image: /img/logo/goshimmer_light.png -keywords: - - json - - command line - - embedded object - - parameters ---- - -# Configuration Parameters - -## Customizing Configuration - -Users can pass configuration parameters in two ways when running GoShimmer. One way is through a JSON configuration file and another way is through command line arguments. -Settings passed through command line arguments take precedence. The JSON configuration file is structured as a JSON object containing parameters and their values. -Parameters are grouped into embedded objects containing parameters for a single plugin or functionality. There is no limit on how deep the configuration object may be embedded. -For example, the config below contains example parameters for the PoW plugin. - -```json -{ - "pow": { - "difficulty": 2, - "numThreads": 1, - "timeout": "10s" - } -} -``` - -The same arguments can be passed through command line arguments in the following way. Embedded objects' values are described using JSON dot-notation. -Additionally,the user can pass the path of the JSON config file through a command-line argument as well, as shown in an example below. - -```shell -goshimmer \ ---config=/tmp/config.json \ ---pow.difficulty=2 \ ---pow.numThreads=1 \ ---pow.timeout=10s -``` - -## Custom Parameter Fields - -Currently, in the code there are two ways in which parameters are registered with GoShimmer. However, one is deprecated way, while the second should be used any longer when adding new parameters. - -### New Way - -Defining configuration parameters using the new way is really similar, however the parameters are not registered directly with the package reading the configuration, -but rather with our custom package that contains all the logic required to make it work seamlessly. - -In this approach, instead of defining a parameter name, a new type is defined with all necessary parameters, their default values and usage descriptions using Go's struct field tags. -A variable is then initialized with the defined type. - -One difference is that parameter names do not contain the namespace they belong to, the namespace is set when registering the parameters structure with the `configuration` package. One `parameters.go` file can contain definitions and register multiple parameter structures. - -```go -package customPlugin - -import "github.com/iotaledger/hive.go/app/configuration" - -// Parameters contains the configuration parameters used by the custom plugin. -type ParametersDefinition struct { - // ParamName contains some value used within the plugin - ParamName float64 `default:"0.31" usage:"ParamName used in some calculation"` - - // ParamGroup contains an example of embedded configuration definitions. - ParamGroup struct { - // DetailedParam1 is the example value - DetailedParam1 string `default:"defaultValue" usage:"DetailedParam1 used in the plugin"` - // DetailedParam2 is the example value - DetailedParam2 string `default:"defaultValue" usage:"DetailedParam2 used in the plugin"` - } -} - -var Parameters = &ParametersDefinition{} - -func init() { - configuration.BindParameters(Parameters, "customPlugin") -} -``` - -In order to access the parameter value, a user can simply access the structure's field: `Parameters.ParamName` or `Parameters.ParamGroup.DetailedParam1` -and it will be populated either with the default value or values passed through a JSON config or command-line argument. - -This approach makes it more simple to define new parameters as well as makes accessing configuration values more clear. - -### Old, Deprecated Way - -The old way is described shortly to give a basic understanding of how it works, but it should not be used any longer when adding new parameters. - -In a package where the parameters will be used, create a `parameters.go` file, that contains the definition of constants, which define parameter names in JSON dot-notation. -The constants will be later used in the code to access the parameter value. -The file should also contain an `init()` function, which registers the parameters with the `flag` library responsible for parsing configuration along with its default value and short description. -It should include comments describing what the parameter is for. Here is an example `parameters.go` file: - -```go -package customPackage - -import ( - flag "github.com/spf13/pflag" -) -const ( - // ParamName contains some value used within the plugin - ParamName = "customPlugin.paramName" -) - -func init() { - flag.Float64(paramName, 0.31, "ParamName used in some calculation") -} -``` - -The parameter values can be accessed in the code in the following way through the `config` plugin: - -```go -import "github.com/iotaledger/goshimmer/plugins/config" - -config.Node().Int(CfgGossipPort) -``` diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/event_driven_model.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/event_driven_model.md deleted file mode 100644 index 78300651d04..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/implementation_design/event_driven_model.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -description: When an event is triggered, an event handler (or a collection of handlers) is executed and the state of the application is updated as necessary. In GoShimmer some of those events can be the arrival of new tangle block, peering request or plugin start. -image: /img/logo/goshimmer_light.png -keywords: - - events - - plugin - - handler - - trigger - - specific type ---- - -# Event Driven Model - -Event driven model is popular approach often used for example in GUI applications, where a program is waiting for some external event to take place (e.g. mouse click) in order to perform some action. -In case of GoShimmer there is no GUI, however it applies this architecture approach as it's really flexible and is used to handle communication with other nodes and other internal parts. -In GoShimmer some of those events can be e.g. arrival of new tangle block, peering request or plugin start. -When an event is triggered, an event handler (or a collection of handlers) is executed and the state of the application is updated as necessary. - -## Glossary - -At first let's define some terms used further to avoid misunderstandings: - -### Event - -Represents the type of event (e.g. new block or peering request) as well as set of handlers and trigger functions. Each type of event is separately defined -which means that events are independent of each other - each event has its own set of handlers and is triggered separately. - -### Event handler (callback) - -A function that is executed when an event of given type occurs. An event handler can accept multiple arguments (e.g. block ID or plugin) so that it can perform appropriate actions. -Every handler must accept the same set of parameters. Each event has a different set of handlers (there can be multiple handlers) that are executed when the event is triggered. - -### Trigger - -A method that triggers execution of event handlers with given parameter values. - -## Creating a New Event With Custom Callbacks - -Below are the steps that show the example code necessary to create a custom event, attach a handler and trigger the event. - -1. Create a function that will call event handlers (handler caller) for a specific event. - Each event has only one handler caller. It enforces that all handlers for the event must share the same interface, because the caller will pass a fixed set of arguments of specific types to handler function. - It's not possible to pass different number of arguments or types to the handler function. - Callers for all events must also share the same interface - the first argument represents the handler function that will be called represented by a generic argument. - Further arguments represent parameters that will be passed to the handler during execution. Below are example callers that accept one and two parameters respectively. - More arguments can be passed in similar manner. - -```go -func singleArgCaller(handler interface{}, params ...interface{}) { - handler.(func (*Plugin))(params[0].(*Plugin)) -} - -func twoArgsCaller(handler interface{}, params ...interface{}) { - handler.(func(*peer.Peer, error))(params[0].(*peer.Peer), params[1].(error)) -} -``` - -`handler.(func (*Plugin))(params[0].(*Plugin))` - this code seems a little complicated, so to make things simpler we will divide into smaller parts and explain each: - -- `handler.(func (*Plugin))` (A) - this part does type-cast the handler from generic type onto type of desired, specific function type - in this case it's a function that accepts `*Plugin` as its only parameter. -- `params[0].(*Plugin)` (B)- similarly to previous part, first element of parameter slice is type-casted onto `*Plugin` type, so that it matches the handler function interface. -- `handler.(func (*Plugin))(params[0].(*Plugin))` - the whole expression calls the type-casted handler function with the type-casted parameter value. We can also write this as `A(B)` to make things simpler. - -The above explanation also allows a better understanding of why all handlers must share the same interface - handler caller passes fixed number of parameters and does type-casting of arguments onto specific types. - -2. Next, a new event object needs to be created. We pass the handler caller as an argument, which is saved inside the object to be called when the event is triggered. - -```go -import "github.com/iotaledger/hive.go/runtime/events" - -ThisEvent := events.NewEvent(singleArgCaller) -``` - -3. After creating the event, handlers (or callbacks) can be attached to it. An event can have multiple callbacks, however they all need to share the same interface. - One thing to note, is that functions are not passed directly - first they are wrapped into a `events.Closure` object like in the example below. - -```go -ThisEvent.Attach(events.NewClosure(func (arg *Plugin) { - // do something -})) -``` - -4. In order to trigger the event with some parameters we need to run the `.Trigger` method on the event object with parameters that handler functions will receive: - -```go -somePlugin Plugin -ThisEvent.Trigger(&somePlugin) -``` diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/object_storage.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/object_storage.md deleted file mode 100644 index dda51c0b224..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/implementation_design/object_storage.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -description: ObjectStorage is used as a base data structure for many data collection elements such as `conflictStorage`, `conflictStorage`, `blockStorage` amongst others. -image: /img/logo/goshimmer_light.png -keywords: - - storage - - dynamic creation - - database - - parameters - - object types - - stream of bytes - - cached ---- - -# Object Storage - -In GoShimmer `ObjectStorage` is used as a base data structure for many data collection elements such as `conflictStorage`, `conflictStorage`, `blockStorage` and others. -It can be described by the following characteristics, it: - -- is a manual cache which keeps objects in memory as long as consumers are using it -- uses key-value storage type -- provides mutex options for guarding shared variables and preventing changing the object state by multiple goroutines at the same time -- takes care of dynamic creation of different object types depending on the key, and the serialized data it receives through the utility `objectstorage.Factory` -- helps with the creation of multiple `ObjectStorage` instances from the same package and automatic configuration. - -In order to create an object storage we need to provide the underlying `kvstore.KVStore` structure backed by the database. - -## Database - -GoShimmer stores data in the form of an object storage system. The data is stored in one large repository with flat structure. It is a scalable solution that allows for fast data retrieval because of its categorization structure. - -Additionally, GoShimmer leaves the possibility to store data only in memory that can be specified with the parameter `CfgDatabaseInMemory` value. In-memory storage is purely based on a Go map, package `mapdb` from hive.go. -For the persistent storage in a database it uses `RocksDB`. It is a fast key-value database that performs well for both reads and writes simultaneously that was chosen due to its low memory consumption. - -Both solutions are implemented in the `database` package, along with prefix definitions that can be used during the creation of new object storage elements. - -The database plugin is responsible for creating a `store` instance of the chosen database under the directory specified with `CfgDatabaseDir` parameter. It will manage a proper closure of the database upon receiving a shutdown signal. During the start configuration, the database is marked as unhealthy, and it will be marked as healthy on shutdown. Then the garbage collector is run and the database can be closed. - -## ObjectStorage - -Assume we need to store data for some newly created object `A`. Then we need to define a new prefix for our package in the `database` package, and prefixes for single storage objects. They will be later used during `ObjectStorage` creation. A package prefix will be combined with a store specific prefix to create a specific realm. - -```Go -package example - -type Storage struct { - A *generic.ObjectStorage - ... - shutdownOnce sync.Once -} -``` - -### ObjectStorage Factory - -To easily create multiple storage objects instances for one package, the most convenient way is to use the factory function. - -```Go -osFactory := objectstorage.NewFactory(store, database.Prefix) -``` - -It needs two parameters: - -- `store` - the key value `kvstore` instance -- `database.Prefix` - a prefix defined in the `database` package for our new `example` package. It will be responsible for automatic configuration of the newly provided `kvstore` instance. - -After defining the storage factory for the group of objects, we can use it to create an `*objectstorage.ObjectStorage` instance: - -```Go -AStorage = osFactory.New(objPrefix, FromObjectStorage) -AStorage = osFactory.New(objPrefix, FromObjectStorage, optionalOptions...) -``` - -For the function parameter we should provide: - -- `objPrefix` - mentioned before, we provide the object specific prefix. -- `FromObjectStorage` - a function that allows the dynamic creation of different object types depending on the stored data. -- `optionalOptions` - an optional parameter provided in the form of options array `[]objectstorage.Option`. All possible options are defined in `objectstorage.Options`. If we do not specify them during creation, the default values will be used, such as enabled persistence or setting cache time to 0. - -### StorableObject - -`StorableObject` is an interface that allows the dynamic creation of different object types depending on the stored data. We need to make sure that all methods required by the interface are implemented to use the object storage factory. - -- `SetModified` - marks the object as modified, which will be written to the disk (if persistence is enabled). -- `IsModified` - returns true if the object is marked as modified -- `Delete` - marks the object to be deleted from the persistence layer -- `IsDeleted` - returns true if the object was marked as deleted -- `Persist` - enables or disables persistence for this object -- `ShouldPersist` - returns true if this object is going to be persisted -- `Update` - updates the object with the values of another object - requires an explicit implementation -- `ObjectStorageKey` - returns the key that is used to store the object in the database - requires an explicit implementation -- `ObjectStorageValue` - marshals the object data into a sequence of bytes that are used as the value part in the object storage - requires an explicit implementation - -Most of these have their default implementation in `objectstorage` library, except from `Update`, `ObjectStorageKey`, `ObjectStorageValue` which need to be provided. - -### StorableObjectFactory Function - -The function `ObjectFromObjectStorage` from object storage provides functionality to restore objects from the `ObjectStorage`. By convention the implementation of this function usually follows the schema: -`ObjectFromObjectStorage` uses `ObjectFromBytes` - -```Go -func ObjectFromObjectStorage(key []byte, data []byte) (result StorableObject, err error) { - result, err := ObjectFromBytes(marshalutil.New(data)) - ... - return -} -``` - -`ObjectFromBytes` unmarshals the object sequence of bytes with a help of `marshalutil` library. The returned `consumedBytes` can be used for the testing purposes. -The created `marshalUtil` instance stores the stream of bytes and keeps track of what has been already read (`readOffset`). - -```Go -func ObjectFromBytes(bytes []byte) (object *ObjectType, consumedBytes int, err error) { - marshalUtil := marshalutil.New(bytes) - if object, err = ObjectFromMarshalUtil(marshalUtil); err != nil { - ... - consumedBytes = marshalUtil.ReadOffset() - return -} -``` - -The key logic is implemented in `ObjectFromMarshalUtil` that takes the marshaled object and transforms it into the object of specified type. -Because the data is stored in a sequence of bytes, it has no information about the form of an object and any data types it had before writing to the database. -Thus, we need to serialize any data into a stream of bytes in order to write it (marshaling), and deserialize the stream of bytes back into correct data structures when reading it (unmarshaling). -Let's consider as an example, unmarshaling of the `Child` object. - -```Go -type Child struct { - childType ChildType // 8 bytes - referencedBlockID BlockID // 32 bytes - childBlockID BlockID // 32 bytes -} -``` - -The order in which we read bytes has to reflect the order in which it was written down during marshaling. As in the example, the order: `referencedBlockID`, `childType`, `childBlockID` is the same in both marshalling and unmarshalling. - -```Go -// Unmarshalling -func ChildFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (result *Child) { - result = &Child{} - result.referencedBlockID = BlockIDFromMarshalUtil(marshalUtil) - result.childType = ChildTypeFromMarshalUtil(marshalUtil) - result.childBlockID = BlockIDFromMarshalUtil(marshalUtil) - return -} -// Marshalling -func (a *Child) ObjectStorageChild() []byte { - return marshalutil.New(). - Write(a.referencedBlockID). - Write(a.childType). - Write(a.childBlockID). - Bytes() -} -``` - -We continue to decompose our object into smaller pieces with help of `MarshalUtil` struct that keeps track of bytes, and a read offset. -Then we use `marshalutil` build in methods on the appropriate parts of the byte stream with its length defined by the data -type of the struct field. This way, we are able to parse bytes to the correct Go data structure. - -### ObjectStorage Methods - -After defining marshalling and unmarshalling mechanism for`objectStorage` bytes conversion, -we can start using it for its sole purpose, to actually store and read the particular parts of the project elements. - -- `Load` allows retrieving the corresponding object based on the provided id. For example, the method on the block `objectStorage` - is getting the cached object. -- To convert an object retrieved in the form of a cache to its own corresponding type, we can use `Unwrap`. - In the code below it will return the block wrapped by the cached object. -- `Exists` - checks weather the object has been deleted. If so it is released from memory with the `Release` method. - - ```Go - func (s *Storage) Block(blockID BlockID) *CachedBlock { - return &CachedBlock{CachedObject: s.blockStorage.Load(blockID[:])} - } - - cachedBlock := blocklayer.Tangle().Storage.Block(blkID) - if !cachedBlock.Exists() { - blkObject.Release() - } - block := cachedBlock.Unwrap() - ``` - -- `Consume` will be useful when we want to apply a function on the cached object. `Consume` unwraps the `CachedObject` and passes a type-casted version to the consumer function. - Right after the object is consumed and when the callback is finished, the object is released. - - ```Go - cachedBlock.Consume(func(block *tangle.Block) { - doSomething(block) - }) - ``` - -- `ForEach` - allows to apply a `Consumer` function for every object residing within the cache and the underlying persistence layer. - For example, this is how we can count the number of blocks. - - ```Go - blockCount := 0 - blockStorage.ForEach(func(key []byte, cachedObject generic.CachedObject) bool { - cachedObject.Consume(func(object generic.StorableObject) { - blockCount++ - }) - } - ``` - -- `Store` - storing an object in the objectStorage. An extended version is method `StoreIfAbsent` - that stores an object only if it was not stored before and returns boolean indication if the object was stored. - `ComputeIfAbsent` works similarly but does not access the value log. - - ```Go - cachedBlock := blockStorage.Store(newBlock) - cachedBlock, stored := blockStorage.StoreIfAbsent(newBlock) - cachedBlock := blockStorage.ComputeIfAbsent(newBlock, remappingFunction) - ``` diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/packages_plugins.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/packages_plugins.md deleted file mode 100644 index 70e1a4a3cbd..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/implementation_design/packages_plugins.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -description: GoShimmer uses the adapter design pattern to easily switch between different implementations and internal interfaces just by using a different plugin, without having to rewrite the code using it. -image: /img/logo/goshimmer_light.png -keywords: - - dependency - - plugins - - plugin system - - code - - internal logic - - package - - adapter design pattern - - adapter - - circular dependency ---- - -# Dependency of Packages and Plugins - -In GoShimmer, new features are added through the [plugin system](plugin.md). -When creating a new plugin, it must implement an interface shared with all other plugins, so it's easy to add new -plugins and change their internal implementation without worrying about compatibility. -Because of this, to make the code clean and easily manageable the plugin's internal logic has to be implemented in a different package. -This is an example of an [adapter design pattern](https://en.wikipedia.org/wiki/Adapter_pattern) that is often used in plugin systems. -It's really useful in a prototype software like GoShimmer, because it's possible to easily switch between different implementations -and internal interfaces just by using a different plugin, without having to rewrite the code using it. - -When creating a new plugin, the logic should be implemented in a separate package stored in the `packages/` directory. -The package should contain all struct and interface definitions used, as well as the specific logic. -It should not reference any `plugin` packages from the `plugin/` directory as this could lead to circular dependencies between packages. - -There are no special interfaces or requirements that packages in the `packages/` directory are forced to follow. However, they should be independent of other packages if possible, -to avoid problems due to changing interfaces in other packages. diff --git a/docs/maintain/goshimmer/0.9/docs/implementation_design/plugin.md b/docs/maintain/goshimmer/0.9/docs/implementation_design/plugin.md deleted file mode 100644 index 20243e3e8c7..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/implementation_design/plugin.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -description: The Plugin system allows to quickly and easily add and remove modules that need to be started. In GoShimmer, this is taken to an extreme, everything is run through plugins. -image: /img/logo/goshimmer_light.png -keywords: - - plugin - - events - - configure - - event handlers - - handler function ---- - -# Plugin System - -GoShimmer is a complex application that is used in a research environment where requirements often changed and new ideas arise. -The Plugin system allows to quickly and easily add and remove modules that need to be started. However, one thing that might be non-intuitive about the use of plugins is that it's taken to an extreme - everything is run through plugins. -The only code that is not executed through a plugin system is the code responsible for configuring and starting the plugins. -All new future features added to the GoShimmer must be added by creating a new plugin. - -## Plugin Structure - -`Plugin` structure is defined as following. - -```go -type Plugin struct { - Node *Node - Name string - Status int - Events pluginEvents - wg *sync.WaitGroup -} -``` - -Below is a brief description of each field: - -- `Node` - contains a pointer to `Node` object which contains references to all the plugins and node-level logger. #TODO: figure out why it is there - not really used anywhere -- `Name` - descriptive name of the plugin. -- `Status` - flag indicating whether plugin is enabled or disabled. -- `Events` - structure containing events used to properly deploy the plugin. Details described below. -- `wg` - a private field containing WaitGroup. #TODO: figure out why it is there - not really used anywhere - -## Plugin Events - -Each plugin defines 3 events: `Init`, `Configure`, `Run`. -Those events are triggered during different stages of node startup, but the plugin doesn't have to define handlers for all of those events in order to do what it's been designed for. -Execution order and purpose of each event is described below: - -1. `Init` - is triggered almost immediately after a node is started. It's used in plugins that are critical for GoShimmer such as reading config file or initializing global logger. Most plugins don't need to use this event. -2. `Configure` - this event is used to configure the plugin before it is started. It is used to define events related to internal plugin logic or initialize objects used by the plugin. -3. `Run` - this event is triggered as the last one. The event handler function contains the main logic of the plugin. - For many plugins, the event handler function creates a separate worker that works in the background, so that the handler function for one plugin can finish and allow other plugins to be started. - -Each event could potentially have more than one handler, however currently all existing plugins follow a convention where each event has only one handler. - -It is important to note that each event is triggered for all plugins sequentially, so that the event `Init` is triggered for all plugins, then `Configure` is triggered for all plugins and finally `Run`. -Such order is crucial, because some plugins rely on other plugins' initialization or configuration. The order in which plugins are initialized, configured and run is also important and this is described below. - -Handler functions for all plugin events share the same interface, so they could potentially be used interchangeably. Sample handler functions look like this: - -```go -func configure(_ *node.Plugin) { - // configure stuff -} - -func run(*node.Plugin) { - // run plugin -} -``` - -The handler functions receive one argument of type `*Plugin`. The code responsible for triggering those events passes a pointer to the plugin object itself. -The object needs to be passed so that the handler function can access plugin fields (e.g. plugin name to configure logger). - -## Creating a New Plugin - -A plugin object can be created by calling the `node.NewPlugin` method. -The method creates and returns a new plugin object, as well as registers it so that GoShimmer knows the plugin is available. -It accepts the following arguments: - -- `name string` - plugin name. -- `status int` - flag indicating whether plugin is enabled or disabled by default. This can be overridden by enabling/disabling the plugin in the external configuration file. Possible values: `node.Enabled`, `node.Disabled`. -- `callbacks ...Callback` - list of event handler functions. The method will correctly create a plugin when passing up to 2 callbacks. Note: `type Callback = func(plugin *Plugin)`, which is a raw function type without being wrapped in `events.Closure`. - -There is a couple of ways that the method can be called, depending on which plugin events need to be configured. - -- Define `Configure` and `Run` event handlers. It's the most common usage that plugins currently use. - -```go -plugin = node.NewPlugin(PluginName, node.Enabled, configure, run) -``` - -- Define only `Configure` event. It's used for plugins that are used to configure objects used (or managed) by other plugins, such as creating API endpoints. - -```go -plugin = node.NewPlugin(PluginName, node.Enabled, configure) -``` - -- Define a plugin without `Configure` or `Run` event handlers. This is used to create plugins that perform some action when the `Init` event is triggered. - -```go -plugin = node.NewPlugin(PluginName, node.Enabled) -``` - -However, the `Init` event handler cannot be attached using the `node.NewPlugin` method. -In order to specify this handler, plugin creator needs to attach it manually to the event, for example inside the package's `init()` method in the file containing the rest of the plugin definition. - -```go -func init() { - plugin.Events.Init.Attach(events.NewClosure(func(*node.Plugin) { - // do something - })) -} -``` - -It's important to note, that the `node.NewPlugin` accepts handler functions in a raw format, that is, without being wrapped by the `events.Closure` object as the method does the wrapping inside. -However, when attaching the `Init` event handler manually, it must be wrapped by the `events.Closure` object. - -It's crucial that each plugin is created only once and `sync.Once` class is used to guarantee that. Contents of a file containing sample plugin definition is presented. All plugins follow this format. - -```go -const PluginName = "SamplePlugin" - -var ( - // plugin is the plugin instance of the new plugin plugin. - plugin *node.Plugin - pluginOnce sync.Once -) - -// Plugin gets the plugin instance. -func Plugin() *node.Plugin { - pluginOnce.Do(func() { - plugin = node.NewPlugin(PluginName, node.Enabled, configure, run) - }) - return plugin -} - -// Handler functions -func init() { - plugin.Events.Init.Attach(events.NewClosure(func(*node.Plugin) { - // do something - })) -} -func configure(_ *node.Plugin) { - // configure stuff -} - -func run(*node.Plugin) { - // run stuff -} -``` - -## Running a New Plugin - -In order to correctly add a new plugin to GoShimmer, apart from defining it, it must also be passed to the `node.Run` method. -Because there are plenty of plugins, in order to improve readability and make managing plugins easier, they are grouped into separate wrappers passed to the `node.Run` method. -When adding a new plugin, it must be added into one of those groups, or a new group must be created. - -```go -node.Run( - plugins.Core, - plugins.Research, - plugins.UI, - plugins.WebAPI, -) -``` - -You can add a plugin simply by calling the `Plugin()` method of the newly created plugin and passing the argument further. An example group definition is presented below. When it's added, the plugin is correctly added and will be run when GoShimmer starts. - -```go -var Core = node.Plugins( - banner.Plugin(), - newPlugin.Plugin(), - // other plugins ommited -) -``` - -## Background workers - -In order to run plugins beyond the scope of the short-lived `Run` event handler, possibly multiple `daemon.BackgroundWorker` instances can be started inside the handler function. -This allows the `Run` event handler to finish quickly, and the plugin logic can continue running concurrently in a separate goroutine. - -Background worker can be started by running the `daemon.BackgroundWorker` method, which accepts following arguments: - -- `name string` - background worker name -- `handler WorkerFunc` - long-running function that will be started in its own goroutine. It accepts a single argument of type `<-chan struct{}`. When something is sent to that channel, the worker will shut down. Note: `type WorkerFunc = func(shutdownSignal <-chan struct{})` -- `order ...int` - value used to define in which shutdown order this particular background worker must be shut down (higher = earlier). - The parameter can either accept one or zero values, more values will be ignored. When passing zero values, default value of `0` is assumed. - Values are normalized in the `github.com/iotaledger/goshimmer/packages/shutdown` package, and it should be used instead of passing integers manually. - Correct shutdown order is as important as correct start order, because different plugins depend on others working correctly, so when one plugin shuts down too soon, other plugins may run into errors, crash and leave an incorrect state. - -An example code for creating a background worker: - -```go -func start(shutdownSignal <-chan struct{}) { - // long-running function - // possibly start goroutines here - // wait for shutdown signal - <-shutdownSignal -} - -if err := daemon.BackgroundWorker(backgroundWorkerName, start, shutdown.PriorityGossip); err != nil { - log.Panicf("Failed to start as daemon: %s", err) -} -``` diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/advanced_outputs.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/advanced_outputs.md deleted file mode 100644 index 04d6f98fc03..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/advanced_outputs.md +++ /dev/null @@ -1,337 +0,0 @@ ---- -description: IOTA strives to provide output types beyond the basic functionality of a cryptocurrency application such as Smart Contracts. -image: /img/protocol_specification/bob_alias.png -keywords: - - smart contract chain - - state metadata - - state controller - - governance controller - - alias - - smart contract - - transactions - - NFT ---- - -# UTXO Output Types - -## Motivation - -In the previous [section](ledgerstate.md) two basic output types were introduced that enable the use of the UTXO ledger -as a payment application between addresses. Each `SigLockedSingleOutput` and `SigLockedAssetOutput` encodes a list of -balances and an address in the output. The output can be unlocked by providing a valid signature for the address, hence -only the owner of the address can initiate a payment. - -While these two output types provide the basic functionality for a cryptocurrency application, IOTA aims to strive -for more. The first and foremost application the UTXO ledger should support besides payments is the IOTA Smart Contract -Protocol (ISCP). Due to the lack of total ordering of the Tangle (that is a direct result of the scalable, parallel -architecture), it is not possible to implement Turing-complete smart contracts directly on layer 1. Therefore, -IOTA aims to develop a layer 2 protocol called ISCP for smart contracts. - -After carefully evaluating the proposed architecture of ISCP and the required properties of the layer 2 protocol, we -came up with special types of outputs for layer 1 UTXO support: `AliasOutput` and `ExtendedLockedOutput`. -These output types are experimental: the IOTA 2.0 DevNet serves as their testing ground. Bear in mind that there is no -guarantee that they will not change as the protocol evolves. - -It will be demonstrated later that these outputs can also be used for enhanced cryptocurrency payment application, such -as conditional payments or time locked sending, but also open up the world of native non-fungible tokens (NFTs). - -## Functional Requirements of ISCP - -Designing the output types starts with a proper requirement analysis. Below you can read the summary of the functional -requirements imposed by the layer 2 smart contract protocol. You can read more about ISCP -[here](https://blog.iota.org/an-introduction-to-iota-smart-contracts-16ea6f247936/), -[here](https://blog.iota.org/iota-smart-contracts-protocol-alpha-release/) -or check out this [presentation](https://youtu.be/T1CJFr6gz8I). - -- Smart contract chains need a globally unique account in the UTXO ledger, that does not change if the controlling entities changes. -- An account state is identified by balances and state metadata. -- Two levels of control: **state controller** and **governance controller**. -- State controller can change state metadata (state transition) and balance (min required). -- Governance controller can change state controller and governance controller. -- An account shall have only one valid state in the ledger. -- Smart contract chain state transitions are triggered by requests in the ledger. -- A request is a ledger entity belonging to the account with tokens and data. -- The account can identify and control requests. -- Fallback mechanism needs to be in place in case the requests are not picked up. -- When request is completed in a state transition, it should be atomically removed from the ledger. - -## Output Design - -### Introducing Alias Account - -Previously, the account concept in the ledger was realized with cryptographic entities called addresses, that are backed -by public and private key pairs. Addresses are present in the ledger through outputs and define who can spend this -output by providing a digital signature. - -Addresses are not able to provide the necessary functionality needed for smart contract chain accounts, because: - -- addresses change with the rotation of the controlling body (committee), -- and there is no notion of separate control levels for an address account. - -We define a new account type in the ledger, called **Alias**, to represent smart contract chain accounts. An alias -account can hold token balances, but also has state metadata, which stores the state of the smart contract chain. -The alias account defines two to controlling entities: a state controller and a governance controller. The state -controller can transition the account into a new state, and can manipulate account balances. The governance controller -can change the state controller or the governance controller. - -An alias is not a cryptographic entity, but it is controlled via either regular addresses or other aliases. - -### Representing a Smart Contract Chain Account in Ledger - -An alias is translated into the ledger as a distinct output type, called **AliasOutput**. The output contains: - -- the unique identifier of the alias, called **AliasID**, -- the **State Controller** entity, -- **State Metadata**, -- the **Governance Controller**, -- **Governance Metadata**, -- **Immutable Metadata**, -- and token **balances**. - -The state controller and governance controller entities can either be private key backed addresses (cryptographic -entities) or `AliasAddress`, that is the unique identifier of another alias. Note, that an alias cannot be controlled by -its own `aliasID`. - -An alias output itself can be regarded as a non-fungible token with a unique identifier `aliasID`, metadata and token -balances. An NFT that can hold tokens, can control its metadata and has a governance model. - -Alias output can be created in a transaction that spends the minimum required amount of tokens into a freshly created -alias output. The new transaction output specifies the state and governance controller next to the balances, but aliasID -is assigned by the protocol once the transaction is processed. Once the output is booked, aliasID becomes the hash of -the outputID that created it. - -An alias output can only be destroyed by the governance controller by simply consuming it as an input but not creating -a corresponding output in the transaction. - -The alias account is transitioned into a new state by spending its alias output in a transaction and creating an -updated alias output with the same aliasID. Depending on what unlocking conditions are met, there are certain -restrictions on how the newly created alias output can look like. - -### Consuming an Alias Output - -As mentioned above, an alias output can be unlocked by both the state controller and the governance controller. - -#### Unlocking via State Controller - -When the state controller is an address, the alias output is unlocked by providing a signature of the state controller -address in the output that signs the essence of the transaction. When state controller is another alias, unlocking is -done by providing a reference to the state controller unlocked other alias within the transaction. - -When an alias output is unlocked as input in a transaction by the state controller, the transaction must contain a -corresponding alias output. Only the state metadata and the token balances of the alias output are allowed to change, -and token balances must be at least a protocol defined constant. - -#### Unlocking via governance controller - -The governance controller is either an address, or another alias. In the former case, unlocking is done via the regular -signature. In the latter case, unlocking is done by providing a reference to the unlocked governance alias within the -transaction. - -When an alias output is unlocked as input by the governance controller, the transaction doesn't need to have a -corresponding output. If there is no such output in the transaction, the alias is destroyed. If however the output -is present, only the state and governance controller fields are allowed to be changed. - -A governance controller therefore can: - -- destroy the alias all together, -- assign the state controller of the alias, -- assign the governance controller of the alias. - -## Locking Funds Into Aliases - -Address accounts in the ledger can receive funds by the means of signature locking. Outputs specify an address field, -which essentially gives the control of the funds of the output to the owner of the address account, the holder of the -corresponding private key. - -In order to make alias accounts (smart contract chains) able to receive funds, we need to define a new fund locking -mechanism, called alias locking. An alias locked output can be unlocked by unlocking the given alias output for -state transition in the very same transaction. - -An alias account (smart contract chain) can receive funds now, but there are additional requirements to be satisfied -for smart contracts: - -- Alias locked outputs represent smart contract requests, and hence, need to contain metadata that is interpreted on - layer 2. -- A dormant smart contract chain might never consume alias locked outputs, therefore, there needs to be a fallback - mechanism for the user to reclaim the funds locked into the request. -- Requests might be scheduled by the user by specifying a time locking condition on the output. The output can not be - spent before the time locking period expires. - -As we can see, there are couple new concepts regarding outputs that we need to support for the smart contract use case: - -- **alias locking** -- **metadata tied to output** -- **fallback unlocking mechanism** -- **time locking** - -In the next section, we are going to design an **Extended Output** model that can support these concepts. - -## Extended Output - -An extended output is an output that supports alias locking, output metadata, fallback unlocking mechanisms and time -locking. The structure of an extended output is as follows: - -Extended Output: - -- **AliasID**: the alias account that is allowed to unlock this output. -- **Token Balances**: tokens locked by the output. -- **Metadata**: optional, bounded size binary data. -- **FallbackAccount**: an alias or address that can unlock the output after **FallbackDeadline**. -- **FallbackDeadline**: a point in time after which the output might be unlocked by **FallbackAccount**. -- **Timelock** (Optional): a point in time. When present, the output can not be unlocked before. - -### Unlocking via AliasID - -The extended output can be unlocked by unlocking the alias output with aliasID by the state controller within the same -transaction. The unlock block of an extended output then references the unlock block of the corresponding alias output. - -Aliases abstract away the underlying address of a smart contract committee, so when a committee is rotated, `aliasID` -stays the same, but the address where the alias points to can be changed. - -It is trivial then to define the unique account of a smart contract on layer 1 as the `aliasID`, however, a new locking -mechanism is needed on the UTXO layer to be able to tie funds to an alias. - -Previously, only addresses defined accounts in the protocol. Funds can be locked into addresses, and a signature of the -respective address has to be provided in the transaction to spend funds the account. - -With the help of aliases, it is possible to extend the capabilities of the protocol to support locking funds into -aliases. This is what we call alias locking. An alias locked output specifies an `aliasID` that can spend the funds -from this output. The owner of the alias account can spend aforementioned alias locked outputs by unlocking/moving the -alias in the very same transaction. We will use the term `ExtendedLockedOutput` for outputs that support alias locking. - -Let's illustrate this through a simple example. Alice wants to send 10 IOTA to Bob's alias account. Bob then wants to -spend the 10 IOTA from his alias account to his address account. - -1. Bob creates an alias where `aliasID=BobAliasID` with Transaction A. - -[![Bob creates an alias](/img/protocol_specification/bob_alias.png 'Bob creates an alias')](/img/protocol_specification/bob_alias.png) - -2. Bob shares `BobAliasID` with Alice. -3. Alice sends 10 IOTA to Bob by sending Transaction B that creates an `ExtendedLockedOutput`, specifying the balance, - and `aliasID=BobAliasID`. - -[![Alice sends 10 IOTA to Bob](/img/protocol_specification/alice_sends_10_mi.png 'Alice sends 10 IOTA to Bob')](/img/protocol_specification/alice_sends_10_mi.png) - -4. Bob can spend the outputs created by Alice by creating Transaction C that moves his `BobAlias` (to the very same - address), and including the `ExtendedLockedOutput` with `aliasID=BobAliasID`. - -[![Bob can spend the outputs created by Alice by creating Transaction C](/img/protocol_specification/bob_can_spend_outputs_created_by_alice.png 'Bob can spend the outputs created by Alice by creating Transaction C')](/img/protocol_specification/bob_can_spend_outputs_created_by_alice.png) - -In a simple scenario, a user wishing to send a request to a smart contract creates an extended output. The output -contains the AliasID of the smart contract chain account, the layer 2 request as metadata, and some tokens to pay -for the request. Once the transaction is confirmed, the smart contract chain account "receives" the output. It -interprets the request metadata, carries out the requested operation in its chain, and submits a transaction that -contains the updated smart contract chain state (alias output), and also spends the extended output to increase -the balance of its alias output. - -What happens when the smart contract chain goes offline or dies completely? How do we prevent the extended output to -be lost forever? - -### Unlocking via Fallback - -Extended outputs can also define a fallback account and a fallback deadline. After the fallback deadline, only the -fallback account is authorized to unlock the extended output. Fallback deadline cannot be smaller than a protocol -wide constant to give enough time to the smart contract chain to pick up the request. - -Fallback unlocking can either be done via signature unlocking or alias unlocking, depending on the type of account -specified. - -### Timelock - -Timelocking outputs is a desired operation not only for smart contracts, but for other use cases as well. A user might -for example scheduled a request to a smart contract chain at a later point in time by timelocking the extended output -for a certain period. - -Timelocks can be implemented quite easily if transactions have enforced timestamps: the output can not be unlocked if -the transaction timestamp is before the timelock specified in the output. - -## Notes - -One of the most important change that the new output types imply is that checking the validity of an unlock block of a -certain consumed input has to be done in the context of the transaction. Previously, an unlock block was valid if the -provided signature was valid. Now, even if the signature is valid for an alias output unlocked for state transition, -additional constraints also have to be met. - -## How Does It Work for ISCP? - -- The new output types are completely orthogonal to colored coins, ISCP will not rely on them anymore. -- The Alias output functions as a chain constraint to allow building a non-forkable chain of transactions in the - ledger by the state controller. The alias output holds tokens, that are the balance of the smart contract chain. - The hash of the smart contract chain state is stored in the alias output, registering each state transition as a - transaction on the ledger. -- The governance controller of an alias output can change the state controller, meaning that a committee rotation can - be carried out without changing the smart contract chain account, aliasID. - - A smart contract chain can be self governed, if the state and governance controllers coincide. - - A smart contract chain can be governed by an address account, or by another smart contract chain through an - alias account. -- Each Extended Output is a request which is “sent” to the alias account. The ISCP can retrieve the backlog of - requests by retrieving all outputs for the aliasID. Consuming the Extended Output means it is atomically removed - from the backlog. It can only be done by the state controller, i.e. the committee of the smart contract chain. -- Fallback parameters prevent from losing funds if the committee is inactive for some timeout. After timeout the - Extended Output can be unlocked by FallbackAccount, an address or another alias. - -## Additional Use Cases - -### Delegated Keys - -An alias output is controlled by two parties: the state controller and the governance controller. The state controller -can only change the state metadata and the tokens when spending the output, therefore it only has the right to move the -alias to the very same account in a transaction. The governance controller however can change the state controller, or -destroy the alias and hence release the funds locked into it. - -This makes it an ideal candidate for mana delegation, that is a crucial part of a mana marketplace. In Coordidice, -moving funds generate access and consensus mana. Alias outputs make it possible to delegate the right to move funds -without losing control over them. - -1. An account owning funds create an alias output and locks funds into it. The governance controller of the alias output - shall be `ownAccount`. -2. An entity in need of mana generated by the locked funds can purchase the right from the governance controller to - move the alias output, generating mana. -3. Once purchased, the governance controller updates the alias output by specifying the state controller to be - `buyerAccount`. -4. `buyerAccount` now can move the alias output, but only to its own account. Each move generates (access) mana. -5. Since `ownAccount` is the governance controller, it can revoke `buyerAccount`'s state controlling right at any point - in time. -6. `ownAccount` can also destroy the alias and "free" the locked funds. - -Notes: - -- The state controller can redeem funds from the alias output up to the point where only `minimum allowed amount` is - present in the alias output. Therefore, without additional mechanism, it would only make sense to lock - `minimum allowed amount` into an alias by the governance controller. This is obviously a drawback, users should not - be restricted in how many funds they would like to delegate. -- A governance controller can destroy the alias output at any time, which is not desired from the buyer perspective. - The buyer should be able to buy the right to move the funds for a pre-defined amount of time. - -To solve above problems, the `AliasOutput` currently implemented in GoShimmer supports the delegation use case by -introducing two new fields in the output: - -- `isDelegated` and -- `delegationTimelock`. - -When an alias is delegated, the state controller cannot modify token balances, and the governor can destroy the -output with any balance. However, when delegation time lock is present, the governor is not allowed to unlock the -output until the delegation time expires. - -### Non-Fungible Tokens - -NFTs are unique tokens that have metadata attached to them. Since an AliasOutput implements a chain constraint in the -UTXO ledger, it is perfectly suited to represent NFTs. The unique identifier of the NFT is the `aliasID` or `AliasAddress`. -The `Immutable Data` field of the output can only be defined upon creation and can't be changed afterward, therefore -it is perfect to store metadata belonging to the NFT. - -The ID of an IOTA NFT is also a valid address, therefore the NFT itself can receive and manage funds and other NFTs as -well. Refer to the [cli-wallet tutorial](../../tutorials/wallet_library.md) for an overview of what you can do with an NFT. - -Interestingly, minting an IOTA NFT costs you only the minimum required deposit balance (0.0001 IOTA at the moment), which -you can take back when you destroy the NFT. This is required so that NFTs are not minted out of thin air, and there are -some IOTAs backing the output. Otherwise, the ledger database could be easily spammed. -Transferring NFTs is also feeless, just like any other transaction in IOTA. - -## GoShimmer Implementation - -If you are interested, you can find the GoShimmer implementation of the new output types in -[output.go](https://github.com/iotaledger/goshimmer/blob/develop/packages/protocol/engine/ledger/vm/devnetvm/output.go): - -- [AliasOutput](https://github.com/iotaledger/goshimmer/blob/develop/packages/protocol/engine/ledger/vm/devnetvm/output.go#L598) and -- [ExtendedLockedOutput](https://github.com/iotaledger/goshimmer/blob/develop/packages/protocol/engine/ledger/vm/devnetvm/output.go#L1582) diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/autopeering.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/autopeering.md deleted file mode 100644 index c7a941e69ac..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/autopeering.md +++ /dev/null @@ -1,232 +0,0 @@ ---- -description: An IOTA node needs to discover and maintain a list of the reachable IP addresses of other peers. Nodes need to be kept up-to-date about the ledger state, thus they exchange information with each other. -image: /img/protocol_specification/peer_discovery.png -keywords: - - node - - neighbors - - selection - - pong - - ping - - peer - - peering - - discovery - - request - - accepted - - salt update ---- - -# Autopeering - -In order to establish connections, an IOTA node needs to discover and maintain a list of the reachable IP addresses of other peers. Nodes need to be kept up-to-date about the ledger state, thus they exchange information with each other. Each node establishes a communication channel with a small subset of nodes (i.e., neighbors) via a process called `peering`. Such a process must be resilient against eclipse attacks: if all of a node’s neighbors are controlled by an attacker, then the attacker has complete control over the node’s view of the Tangle. Moreover, to prevent or limit sybil-based attacks, the neighbor selection protocol makes use of a scarce resource dubbed Consensus Mana: arbitrary nodes can be created, but it is difficult to produce high mana nodes. - -Throughout this section the terms `Node` and `Peer` are used interchangeably to refer to a `Node` device. - -The usage of the _Ping_ and _Pong_ mechanism is to be considered as a bidirectional exchange similarly to how described by other standards such as [CoAP](https://core-wg.github.io/coap-sig/) and [WebSocket](https://tools.ietf.org/html/rfc6455#section-5.5.2). - -## Node Identities - -Every node has a cryptographic identity, a key on the ed25519 elliptic curve. The `blake2b` hash of the public key of the peer serves as its identifier or `node ID`. - -## Peer Discovery - -The main goal of the _Peer Discovery_ protocol is to expose an interface providing a list of all the verified peers. -To bootstrap the peer discovery, a node _must_ be able to reach one or more entry nodes. To achieve this, the implementation of the protocol _shall_ provide a hard-coded list of trusted **entry nodes** run by the IF or by trusted community members that answer to peer discovery packets coming from new nodes joining the IOTA network. This approach is a common practice of many distributed networks [[Neudecker 2018]](https://ieeexplore.ieee.org/iel7/9739/8649699/08456488.pdf). -Public Key-based Cryptography (PKC) _shall_ be used for uniquely [identifying](#Node_identities) peers and for authenticating each packet. -The usage of the Ping and Pong protocols is that _Ping_ are sent to verify a given peer and, upon reception of a valid _Pong_ as a response from that peer, the peer is verified. -Once a peer has been verified, it can be queried to discover new peers by sending a _DiscoveryRequest_. As a response, a _DiscoveryResponse_ _shall_ be returned, containing a list of new peers. The new peer nodes in this list _shall_ be verified by the receiving application. - -This process is summarized in the following figure and detailed in the following subsections: - -[![Peer discovery](/img/protocol_specification/peer_discovery.png 'Peer discovery')](/img/protocol_specification/peer_discovery.png) - -### Verification - -The verification process aims at both verifying peer identities and checking their online status. Each peer _shall_ maintain a list of all the known peers. This list _shall_ be called `known_peer_list`. Elements of any known peer list _shall_ contain a reference to a [Peer](#Peer) and a time at which it _shall_ be verified/re-verified. -As such, the `known_peer_list` can be seen as a time-priority queue. A newly discovered peer gets added to the list at the current time. Whenever a peer is verified, its time value on the `known_peer_list` gets updated to the time at which that peer _shall_ be re-verified. -The intent of this arrangement is to allow the node application to first verify newly discovered (and thus still unverified) peers and then to re-verify older peers (to confirm their online status) by iterating over the `known_peer_list`. -It is worthwhile to note that the order in which the `known_peer_list` is worked through is important. For example, if the peer is added to the front ('head') of the `known_peer_list`, it is possible for an adversary to front-fill the `known_peer_list` with a selection of its own nodes. This is resolved by the use of the time-priority queue. - -The verification process always initiates from a _Ping_. Upon reception of a _Ping_, a peer _shall_ check its validity by: - -- verifying that the signature of the _Ping_ is valid and discarding the request otherwise; -- checking that the `version` and `network_id` fields match its configuration and discarding the _Ping_ otherwise; -- checking that the `timestamp` field is fresh (i.e., not older than a given time) and discarding the packet otherwise; -- checking that the `dest_addr` matches its IP address and discarding the _Ping_ otherwise. - -Upon successful validation of a received _Ping_, a peer _shall_ respond with a _Pong_. In case the sender of the _Ping_ is a new peer from the perspective of the receiving node, the receiver peer _shall_ add it to its `known_peer_list`. This enables the verification process to also occur in the reverse direction. - -Upon reception of a _Pong_, a peer _shall_ check its validity by: - -- verifying that the signature of the _Pong_ is valid and discarding it otherwise; -- checking that the `req_hash` field matches a request (i.e. _Ping_) previously sent and not expired (i.e., the difference between the timestamp of the _Ping_ and _Pong_ is not greater than a given threshold) and discarding the associated _Ping_ or _Pong_ otherwise; -- checking that the `dest_addr` matches its IP address and discarding the associated _Ping_ or _Pong_ otherwise. - -Upon successful validation of a received _Pong_, a peer _shall_: - -- add the peer sender of the _Pong_ to a list of verified peers called `verified_peer_list`; -- move the peer entry of the `known_peer_list` to the tail. - -### Removal - -While verifying a new peer, if no or an invalid _Pong_ is received after `max_verify_attempts` attempts, that node _shall_ be removed from the `known_peer_list`. Each expected reply should have a timeout such that if no answer is received after that, an attempt is considered concluded and counted as failed. - -Each peer on the `verified_peer_list` _shall_ be re-verified after `verification_lifetime` hours; while re-verifying a peer, if no or invalid _Pong_ is received after `max_reverify_attempts` attempts, the peer _shall_ be removed from the `verified_peer_list`. - -### Discovery - -Each peer entry of the `verified_peer_list` may be used to discover new peers. This process is initiated by sending a _DiscoveryRequest_. - -Upon reception of a _DiscoveryRequest_, a peer node _shall_ check its validity by: - -- checking that the sender of the _DiscoveryRequest_ is a verified peer (i.e. is stored in the `verified_peer_list`) and discarding the request otherwise; -- verifying that the signature of the _DiscoveryRequest_ is valid and discarding the request otherwise; -- checking that the `timestamp` field is fresh (i.e., not older than a given time) and discarding the request otherwise. - -Upon successful validation of a received _DiscoveryRequest_, a peer _shall_ reply with a _DiscoveryResponse_. - -Upon reception of a _DiscoveryResponse_, a peer _shall_ check its validity by: - -- verifying that the signature of the _DiscoveryResponse_ is valid and discarding the response otherwise; -- checking that the `req_hash` field matches a discovery request (i.e. _DiscoveryRequest_) previously sent and not expired (i.e., the difference between the timestamp of the _DiscoveryRequest_ and _DiscoveryResponse_ is not greater than a given threshold) and discarding the response otherwise. - -Upon successful validation of a received _DiscoveryResponse_, a node _shall_ add the nodes contained in the `peers` field to the `known_peer_list`. - -## Neighbor Selection - -The goal of the neighbor selection is to build a node's neighborhood (to be used by the gossip protocol) while preventing attackers from “tricking” other nodes into becoming neighbors. Neighbors are established when one node sends a peering request to another node, which in turn accepts or rejects the request with a peering response. - -To prevent attacks, the protocol makes the peering request _verifiably random_ such that attackers cannot create nodes to which the target node will send requests. At its core, the neighbor selection protocol uses both a screening process called _Consensus Mana rank_ and a _score function_ that takes into account some randomness dubbed _private salt_ and _public salt_. -Half of the neighbors will be constituted from nodes that accepted the peering request, while half will be constituted of nodes that will request for the peering. The two distinct groups of neighbors are consequently called: - -- Chosen neighbors (outbound). The peers that the node proactively selected through the neighbor selection mechanism. -- Accepted neighbors (inbound). The peers that sent the peering request to the node and were accepted as a neighbor. - -### Local Variables - -Local variables defined here are included to help in understanding the protocol described in this section. The node application shall handle those variables in some form. - -- `saltUpdateInterval`: The time interval at which nodes shall update their salts. -- `responseTimeout`: The time that node waits for a response during one peering attempt. -- `requestExpirationTime`: The time used for the request timestamp validation, if the timestamp is older than this threshold the request is dropped -- `maxPeeringAttempts`: The maximum number of peering requests retries sent to the selected node before the next salt update. - -### Mana Rank Interval - -Each peer discovered and verified via the _Peer Discovery_ protocol _shall_ have a consensus mana value associated with it. The peer running the _Neighbor Selection_ protocol _shall_ keep this information up-to-date and use it to update a data structure called `manaRank` containing the list of the nodes' identities for each mana value. The aim of this ranking is to select a subset of peers having similar mana to the node preparing the ranking. More specifically, let's define `potentialNeighbors` to be such a subset, that is divided into a `lower` and an `upper` set with respect to a `targetMana` value (i.e., the mana value of the node performing the ranking). By iterating over the `manaRank`, each node _shall_ fill both the `lower` and `upper` sets with nodes' identities having a similar rank to itself, not less/greater than a given threshold `rho` respectively, except when each subset does not reach the minimal size `r`. - -The following pseudocode describes a reference implementation of this process: - -``` -Inputs: - manaRank: mapping between mana values and the list of nodes' identities with that mana; - targetMana: the mana value of the node performing the ranking; - rho: the ratio determining the length of the rank to consider; - r: the minimum number of nodes' identities to return for both lower and upper sets; - Largest(r, targetMana): the set of r largest cMana holders less than targetMana; - Smallest(r, targetMana): the set of r smallest cMana holders greater than targetMana; - -Outputs: - potentialNeighbors: the set of nodes' identities to consider for neighbor selection; -``` - -```vbnet -FOR mana IN manaRank - nodeID = manaRank[mana] - IF mana > targetMana - IF mana / targetMana < rho - Append(upperSet, nodeID) - ELSE IF mana == 0 || mana == targetMana - BREAK - ELSE IF targetMana / mana < rho - Append(lowerSet, nodeID) - -IF Len(lowerSet) < r - // set lowerSet with the r largest mana holders less than targetMana - lowerSet = Largest(r, targetMana) - -IF Len(upperSet) < r - // set upperSet with the r smallest mana holders greater than targetMana - upperSet = Smallest(r, targetMana) - -potentialNeighbors = Append(upperSet, lowerSet) -RETURN potentialNeighbors - -``` - -### Selection - -The maximum number of neighbors is a parameter of the gossip protocol. This section proposes to use a size of 8 equally divided into 4 chosen (outbound) and 4 accepted (inbound) neighbors. It is crucial to decide on a fixed number of neighbors, as the constant number decreases an eclipse probability exponentially. The chosen _k_ is a compromise between having more connections resulting in lower performance and increased protection from an eclipse attack. - -The operations involved during neighbor selection are listed in the following: - -1. Get an up-to-date list of verified and known peers from the _Peer Discovery_ protocol. -2. Use [mana rank](#Mana_rank) to filter the previous list to obtain a list of peers to be potential neighbors. -3. Use the score function to request/accept neighbors. - -The score between two nodes is measured through the score function _s_, defined by: - -s(nodeID1, nodeID2, salt) = hash(nodeID1 || nodeID2 || salt), where: - -- `nodeID1` and `nodeID2` are the identities of the considered nodes. -- `salt` is the salt value that can be private or public depending on the peering direction (inbound/outbound). -- `hash` is the `blake2b` hash function. -- `||` is the concatanation operation. - -Note that the value used as the score is an unsigned integer derived from the first 4 bytes of the byte array after the `hash` function. - -In order to connect to new neighbors, each node with ID `ownID` and public salt `pubSalt` keeps a list of potential neighbors derived via [Mana rank](#Mana_rank) that is sorted by their score `d(ownID, ·, pubSalt)`. Then, the node shall send peering requests in _ascending order_, containing its own current public salt and a timestamp representing the issuance time of the request. -The connecting node shall repeat this process until it has established connections to enough neighbors or it finds closer peers. Those neighbors make up its list of chosen neighbors. This entire process is also illustrated in the following pseudocode: - -``` -Inputs: - k: desired amount of neighbors; - c: current list of chosen neighbors; - p: list of potential peers; - localID: local nodeID - pubSalt: local public salt; -``` - -```vbnet -pSorted = SortByScoreAsc(P, localID, pubSalt) -FOR p IN pSorted - peeringRequest = SendPeeringRequest(p) - IF peeringRequest.accepted - Append(c, p) - IF Len(c) == Ceil(k/2) - RETURN -``` - -More specifically, after sending a peering request a node _shall_: - -- wait to get a _Peering Response_ that could be positive or negative. - - If positive, add the peer to its chosen neighbor list - - If negative, filter out the peer from future requests until the next salt update or the end of the list of potential neighbors is reached. - - If after `responseTimeout` no response is received, try again for a fixed `maxPeeringAttempts`. If not successful, filter out the peer from future requests until the next salt update or the end of the list of potential neighbors is reached. - -Similar to the previous case, in order to accept neighbors, every node with ID ownID _shall_ generate a private salt `privSalt`. - -Upon reception of a _Peering Request_, a peer _shall_ make a decision to accept, reject or discard the request by: - -- verifying that the signature of the _Peering Request_ is valid and discard the request otherwise; -- checking that the `timestamp` field is valid (i.e., not older than a given threshold `requestExpirationTime` specified by the node) and discard the request otherwise; -- checking that the _mana_ of the requester peer is within the own [Mana rank](#Mana_rank) and send back a _negative_ _Peering Response_ otherwise; -- checking that the requestor salt matches its hash chain by: - - taking the difference between the timestamp of the peering request and the time the initial salt was set, and then dividing this number by `saltUpdateInterval`, rounding down; - - hashing the requester public salt as many times as the number of salt changes; - - finally, if the result does not match the initial salt, discard the peering request; -- applying a statistical test to the request defined as _s(remoteID, ownID, ζ_remote) < θ_ for a fixed threshold θ, and discard it otherwise. - - this test determines the effectiveness of the brute force attack when an attacker tries to establish a connection with a desired peer; - - with θ set to 0.01 an attacker has only 1% of chance of being successful; -- accept the peering request by sending back a _positive_ _Peering Response_ if either one of the following conditions is satisfied, and send back a _negative_ _Peering Response_ otherwise: - - the current size of the accepted neighbors list is smaller than _Floor(k/2)_; - - the score defined as _s(ownID, remoteID, privSalt)_ is lower than the current highest score among accepted neighbors. In this case, send a _Peering Drop_ to drop the accepted neighbor with the highest score replaced by the requester peer. - -### Neighbor Removal - -Neighbor removal can occur for several reasons: - -- A node is replacing a neighbor with a better (in terms of score function) one; -- From the gossip layer, the connection with a neighbor is lost; -- If some form of reputation or bad behavior is being monitored, a neighbor could be dropped in case of misbehavior. For example, a node could respond to the peering request but choose not to gossip received blocks. - -Independently from the reason, when a peer drops a neighbor _shall_ send a _Peering Drop_ and remove the neighbor from its requested/accepted neighbor list. Upon reception of a _Peering Drop_, the peer _shall_ remove the dropping neighbor from its requested/accepted neighbor list. diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/congestion_control.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/congestion_control.md deleted file mode 100644 index 9c65052e4a0..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/congestion_control.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -description: Every network has to deal with its intrinsic limited resources. GoShimmer uses congestion control algorithm to regulate the influx of blocks in the network with the goal of maximizing throughput (blocks/bytes per second) and minimizing delays. -image: /img/protocol_specification/congestion_control_algorithm_infographic_new.png -keywords: - - node - - congestion control algorithm - - honest node - - block - - access mana - - malicious nde - - scheduling ---- - -# Congestion Control - -Every network has to deal with its limited intrinsic resources in bandwidth and node capabilities (CPU and -storage). In this document, we present a congestion control algorithm to regulate the influx of blocks in the -network to maximize throughput (blocks/bytes per second) and minimize delays. Furthermore, the -following requirements must be satisfied: - -- **Consistency**: If an honest node writes a block, it should be written by all honest nodes within some - delay bound. -- **Fairness**: Nodes can obtain a share of the available throughput depending on their access Mana. Throughput is - shared in a way that an attempt to increase the allocation of any node necessarily results in the decrease - in the allocation of some other node with an equal or smaller allocation (max-min fairness). -- **Security**: Malicious nodes shall be unable to interfere with either of the above requirements. - -[![Congestion Control](/img/protocol_specification/congestion_control_algorithm_infographic_new.png)](/img/protocol_specification/congestion_control_algorithm_infographic_new.png) - -You can find more information in the following papers: - -- [Access Control for Distributed Ledgers in the Internet of Things: A Networking Approach](https://arxiv.org/abs/2005.07778). -- [Secure Access Control for DAG-based Distributed Ledgers](https://arxiv.org/abs/2107.10238). - -## Detailed Design - -The algorithm has three core components: - -- A scheduling algorithm that ensures fair access for all nodes according to their access Mana. -- A TCP-inspired algorithm for decentralized rate setting to utilize the available bandwidth efficiently while - preventing large delays. -- A buffer management policy to deal with malicious flows. - -### Prerequirements - -- **Node identity**: The congestion control module requires node accountability. Each block is associated with the node ID of its issuing - node. - -- **Access mana**: The congestion control module knows the access Mana of the network nodes to share the available - throughput fairly. Without access Mana, the network would be subject to Sybil attacks, which would incentivize actors - to artificially split (or aggregate) onto multiple identities. - -- **Block weight**. The weight of a block is used to prioritize blocks over the others, and it is calculated - based on the type and length of a block. - -### Outbox Buffer Management - -Once a block has successfully passed the block parser checks, is solid and booked, it is enqueued into the outbox -buffer for scheduling. The outbox is split into several queues, each corresponding to a different node issuing -blocks. The total outbox buffer size is limited, but individual queues do not have a size limit. This section -describes the operations of block enqueuing and dequeuing into and from the outbox buffer. - -The enqueuing mechanism includes the following components: - -- **Classification**: The mechanism identifies the queue where the block belongs according to the node ID of - the block issuer. -- **Block enqueuing**: The block is actually enqueued, the queue is sorted by block timestamps in increasing order - and counters are updated (e.g., counters for the total number of blocks in the queue). - -The dequeuing mechanism includes the following components: - -- **Queue selection**: A queue is selected according to a round-robin scheduling algorithm. In particular, the - mechanism uses a modified version of the deficit round-robin (DRR) algorithm. -- **Block dequeuing**. The first (oldest) block of the queue, that satisfies certain conditions is dequeued. A - block must satisfy the following conditions: - - The block has a ready flag assigned. A ready flag is assigned to a block when all of its parents are eligible (the parents have been scheduled or confirmed). - - The block timestamp is not in the future. -- **Block skipping**. Once a block in the outbox is confirmed by another block approving it, it will get removed from the outbox buffer. Since the block already has children and is supposed to be replicated on enough nodes in the network, it is not gossiped or added to the tip pool, hence "skipped". -- **Block drop**: Due to the node's bootstrapping, network congestion, or ongoing attacks, the buffer occupancy of the outbox buffer may become large. To keep bounded delays and isolate the attacker's spam, a node shall drop some blocks if the total number of blocks in all queues exceeds the maximum buffer size. Particularly, the node will drop blocks from the queue with the largest mana-scaled length, computed by dividing the number of blocks in the queue by the amount of access Mana of the corresponding node. - - `Mana-scaled queue size = queue size / node aMana`; -- **Scheduler management**: The scheduler counters and pointers are updated. - -#### False positive drop - -During an attack or congestion, a node may drop a block already scheduled by the rest of the network, causing a -_false positive drop_. This means that the block’s future cone will not be marked as _ready_ as its past cone is not -eligible. This is not a problem because blocks dropped from the outbox are already booked and confirmation comes -eventually due to blocks received from the rest of the network which approve the dropped ones. - -#### False positive schedule - -Another possible problem is that a node schedules a block that the rest of the network drops, causing a _false -positive_. The block is gossiped and added to the tip pool. However, it will never accumulate enough approval -weight to be _Confirmed_. Eventually, the node will orphan this part of tangle as the blocks in the future-cone -will not pass the [Time Since Confirmation check](tangle.md#tip-pool-and-time-since-confirmation-check) during tip -selection. - -### Scheduler - -Scheduling is the most critical task in the congestion control component. The scheduling algorithm must guarantee that -an honest node `node` meets the following requirements: - -- **Consistency**: The node's blocks will not accumulate indefinitely at any node, and so, starvation is avoided. -- **Fairness**: The node's fair share of the network resources are allocated to it according to its access Mana. -- **Security**: Malicious nodes sending above their allowed rate will not interrupt a node's throughput requirement. - -Although nodes in our setting are capable of more complex and customised behaviour than a typical router in a -packet-switched network, our scheduler must still be lightweight and scalable due to the potentially large number of -nodes requiring differentiated treatment. It is estimated that over 10,000 nodes operate on the Bitcoin network, and -we expect that an even greater number of nodes are likely to be present in the IoT setting. For this reason, we -adopt a scheduler based on [Deficit Round Robin](https://ieeexplore.ieee.org/document/502236) (DRR) (the Linux -implementation of the [FQ-CoDel packet scheduler](https://tools.ietf.org/html/rfc8290), which is based on DRR, -supports anywhere up to 65535 separate queues). - -The DRR scans all non-empty queues in sequence. When it selects a non-empty queue, the DDR will increment the queue's -priority counter (_deficit_) by a specific value (_quantum_). Then, the value of the deficit counter is a maximal amount -of bytes that can be sent this turn. If the deficit counter is greater than the weight of the block at the head of the -queue, the DRR can schedule this block, and this weight decrements the value of the counter. In our implementation, -the quantum is proportional to the node's access Mana, and we add a cap on the maximum deficit that a node can achieve -to keep the network latency low. It is also important to mention that the DRR can assign the weight of the block so -that specific blocks can be prioritized (low weight) or penalized (large weight); by default, in our mechanism, the -weight is proportional to the block size measured in bytes. The weight of a block is set by the -function `WorkCalculator()`. - -:::note - -The network manager sets up the desired maximum (fixed) rate `SCHEDULING_RATE` at which it will schedule blocks, -computed in weight per second. This implies that every block is scheduled after a delay which is equal to the weight ( -size as default) of the latest scheduled block times the parameter -`SCHEDULING_RATE`. This rate mainly depends on the degree of decentralization you desire: a larger rate leads to -higher throughput but will leave behind slower devices that will fall out of sync. - -::: - -### Rate Setting - -If nodes were continuously willing to issue new blocks,rate-setting would not be a problem. Nodes could simply operate -at a fixed, assured rate and share the total throughput according to the percentage of access Mana they own. The -scheduling algorithm would ensure that this rate is enforceable, and only misbehaving nodes would experience increasing -delays or dropped blocks. However, it is unrealistic to expect all nodes always to have blocks to issue. We would -like nodes to better utilize network resources without causing excessive congestion and violating any requirement. - -We propose a rate-setting algorithm inspired by TCP — each node employs [additive increase, multiplicative decrease] -(https://https://epubs.siam.org/doi/book/10.1137/1.9781611974225) (AIMD) rules to update their issuance rate in response -to congestion events. In the case of distributed ledgers, all block traffic passes through all nodes, contrary to the -case of traffic typically found in packet-switched networks and other traditional network architectures. Under these -conditions, local congestion at a node is all that is required to indicate congestion elsewhere in the network. This -observation is crucial as it presents an opportunity for a congestion control algorithm based entirely on local traffic. - -Our rate-setting algorithm outlines the AIMD rules employed by each node to set their issuance rate. Rate updates for a -node occur each time a new block is scheduled if the node has a non-empty set of its own blocks that are not yet -scheduled. The node sets its own local additive-increase variable `localIncrease(node)` based on its access Mana and a -global increase rate parameter `RATE_SETTING_INCREASE`. An appropriate choice of -`RATE_SETTING_INCREASE` ensures a conservative global increase rate that does not cause problems even when many nodes -simultaneously increase their rate. - -Nodes wait `RATE_SETTING_PAUSE` seconds after a global multiplicative decrease parameter `RATE_SETTING_DECREASE`, during -which no further updates are made, to allow the reduced rate to take effect and prevent multiple successive decreases. -At each update, the node checks how many of its own blocks are in its outbox queue and responds with a multiplicative -decrease if this number is above a threshold, -`backoff(node)`, which is proportional to the node's access Mana. If the number of the node's blocks in the outbox is -below the threshold, the node's issuance rate is incremented by its local increase variable, `localIncrease(node)`. diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/consensus_mechanism.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/consensus_mechanism.md deleted file mode 100644 index dfc6bede7bf..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/consensus_mechanism.md +++ /dev/null @@ -1,259 +0,0 @@ ---- -description: The consensus mechanism is necessary to achieve agreement among the nodes of the network. Since the Tangle is only partially ordered we have designed an open and leaderless consensus mechanism which combines FPC and Approval Weight. - -keywords: - - node - - approval weight - - conflict - - opinion - - block - - high probability - - active consensus mana ---- - -# Consensus Mechanism - -The consensus mechanism is necessary to achieve agreement among the nodes of the network. In case of a double spend, one way to decide which transaction should be considered valid would be to order them and pick the oldest one. However, the Tangle is only partially ordered. To tackle this problem in the context of the Tangle, we have designed an open and leaderless consensus mechanism that utilizes the Tangle as a medium to exchange votes. Any node can add a block to the Tangle, and each block added to the Tangle represents a virtual vote (i.e. there is no additional overhead to communicate votes) to its entire past. - -The consensus mechanism can broadly be devided into consensus on two separate entities. On the one hand, we need to resolve any conflicts on the underlying UTXO ledger to prevent double spends. On the other hand, we need to make sure that blocks within the Tangle are not orphaned. Both are simply derived by observing the Tangle and objectively keeping track of [Approval Weight (AW)](#approval-weight-aw) with cMana (more specifically [active cMana](#active-cmana)) as a Sibyl protection. Once a [conflict](ledgerstate.md#conflicts) (or block) reaches a certain AW threshold, an application can consider it as _confirmed_. To simplify this notion we introduce [Grades of Finality (GoF)](#grades-of-finality-gof) where a higher GoF represents a higher confidence. - -| Name | Component | Initial/local opinion | Consensus | Comparable blockchain mechanism for voting/finality | -| ------------------- | ----------- | --------------------------------- | -------------- | --------------------------------------------------- | -| voting on conflicts | UTXO ledger | OTV/FPCS | conflict/tx AW | longest chain rule | -| finality of blocks | Tangle | inclusion score via tip selection | block AW | x block rule | - -On an abstract level, a node can be seen as a replicated state machine, just following whatever it receives through the Tangle, and, in case of blocks containing transactions, modifying the UTXO ledger. Only when a node wants to issue a block (read as: _cast a vote_) it needs to evaluate its own local opinion via [modular conflict selection function](#modular-conflict-selection-function). This decoupling of coming to consensus and setting the initial opinion allows for great flexibility and separation of concerns. - -## Approval Weight (AW) - -Approval weight represents the [weight](#active-consensus-mana) of conflicts (and blocks), similar to the longest chain rule in Nakamoto consensus. However, instead of selecting a leader based on a puzzle (PoW) or stake (PoS), it allows every node to express its opinion by simply issuing any block and attaching it in a part of the Tangle it _likes_ (based on its initial opinion on blocks and possibly utilizing the [like switch](#like-switch) to express its opinion on conflicts). - -It is important to note that tracking of AW for conflicts and markers/blocks is orthogonal. Thus, a block can reach a high AW whereas its contained payload, e.g., a transaction being a double spend, does not reach any AW on conflict/UTXO level. - -### Detailed Design - -Approval weight AW increases because of voters (nodes) that cast votes for conflicts and blocks by means of making statements. This is necessary due to the changing nature of cMana over time, which prevents simply counting the AW per conflict or block. Additionally, whenever a node changes its opinion on a conflict, the previous vote needs to be invalidated. - -#### Definitions - -- **Statement**: A statement is any block issued by a _node_, expressing its opinion and casting a (virtual) vote. It can be objectively ordered by its timestamp, and, if equal, its block ID. -- **Conflict voter**: A conflict voter is a _node_ that issued a statement attaching to a conflict, and, thus, voting for it. -- **Marker/block voter**: A marker/block's voter is a _node_ that issued a statement directly or indirectly referencing a marker/block, including its issuer. - -#### Conflicts - -Tracking voters of [conflicts](ledgerstate.md#conflicts) is an effective way of objective virtual voting. It allows nodes to express their opinion simply by attaching a statement to a conflict they like (see [like switch](#like-switch)). This statement needs to propagate down the conflict DAG, adding support to each of the conflict's parents. In case a voter changes their opinion, support needs to be revoked from all conflicting conflicts and their children. Thus, a node can only support one conflict of a conflict set. - -To make this more clear consider the following example: - -[![Conflict Voter](/img/protocol_specification/conflicts.png)](/img/protocol_specification/conflicts.png) - -The green node issued **statement 1** and attached it to the aggregated conflict `Conflict 1.1 + Conflict 4.1.1`. Thus, the green node is a voter of all the aggregated conflict's parent conflicts, which are (from top to bottom) `Conflict 4.1.1`, `Conflict 1.1`, `Conflict 4.1`, `Conflict 1`, and `Conflict 4`. - -Then, the green node issued **statement 2** and attached it to `Conflict 4.1.2`. This makes the green node a voter of `Conflict 4.1.2`, however, `Conflict 4.1.1` is its conflict conflict and thus support for `Conflict 4.1.1` has to be revoked. - -`Conflict 4.1`, `Conflict 4` are parent conflicts of `Conflict 4.1.2`, which the green node is still supporting. Since `Conflict 1.1`, `Conflict 1` are not conflicting to either of `Conflict 4.1.2`'s parents, the green node remains their voter. - -Finally, the green nodes issued **statement 3**, which is in `Conflict 2`. Now the green node is a voter of `Conflict 2`, and no longer a voter of `Conflict 1`, since `Conflict 1` is conflicting to `Conflict 2`. Note that, this voter removal will propagate to child conflicts. Thus, the green node is removed from `Conflict 1.1` as well. -`Conflict 3`, `4` and both of their child conflicts have nothing to do with this attachement, the voter status remains. - -It is important to notice that the arrival order of the statements does not make a difference on the final outcome. Due to the fact that statements can be ordered objectively, every node in the network eventually comes to the same conclusion as to who is supporting which conflict, even when nodes change their opinions. - -##### Calculation of Approval Weight - -The approval weight itself is calculated every time a new voter is added/removed to a conflict. The AW for a conflict _B_ is calculated as follows: - -``` -AW(B) = 'active cMana of voters(B)' / 'total active cMana' -``` - -#### Markers - -It would be computationally expensive to track the AW for each block individually. Instead, we approximate the AW with the help of [markers](markers.md). Once a marker fulfills a GoF, the corresponding GoF value is propagated into its past cone until all blocks have an equal or higher GoF. - -Recall that markers are not part of the core protocol. As such, this description is merely an optimization from an implementation standpoint. - -Rather than keeping a list of voters for each marker and collecting voters for each marker (which would also be expensive), we keep a list of voters along with its approved marker index for each marker sequence. This approach provides a simple and fast look-up for marker voters making use of the Tangle structure as mapped by the markers. - -For each marker sequence, we keep a map of voter to marker index, meaning a voter supports a marker index `i`. This implies that the voter supports all markers with index `<= i`. - -Take the figure below as an example: -![MarkersApprovalWeight SequenceVoters](/img/protocol_specification/MarkersApprovalWeight.png) - -The purple circles represent markers of the same sequence, the numbers are marker indices. - -Four nodes (A to D) issue statements with past markers of the purple sequence. Node A and D issue blocks having past marker with index 6, thus node A and D are the voters of marker 6 and all markers before, which is 1 to 5. On the other hand, node B issues a block having past marker with index 3, which implies node B is a voter for marker 1 and 2 as well. - -This is a fast look-up and avoids walking through a marker's future cone when it comes to retrieving voters for approval weight calculation. - -For example, to find all voter of marker 2, we iterate through the map and filter out those support marker with `index >= 2`. In this case, all nodes are its voters. As for marker 5, it has voters node A and D, which fulfill the check: `index >= 5`. - -Here is another more complicated example with parent sequences: -![MarkersApprovalWeight SequenceVoters](/img/protocol_specification/MarkersApprovalWeightSequenceVoters.png) - -The voter will be propagated to the parent sequence. - -Node A issues block A2 having past markers `[1,4], [3,4]`, which implies node A is a voter for marker `[1,1]` to `[1,4]`, `[2,1]` to `[2,3]`, and `[3,4]` as well as the block with marker `[3,5]` itself. - -##### Calculation of Approval Weight - -The approval weight itself is calculated every time a new voter is added to a marker. The AW for a marker _M_ is calculated as follows: - -``` -AW(M) = 'active cMana of voters(M)' / 'total active cMana' -``` - -### Grades of Finality (GoF) - -The tracking of AW itself is objective as long as the Tangle converges on all nodes. However, delays, network splits and ongoing attacks might lead to differences in perception so that a finality can only be expressed probabilistically. The higher the AW, the less likely a decision is going to be reversed. To abstract and simplify this concept we introduce the GoF. Currently, they are simply a translation of AW thresholds to a GoF, but one can imagine other factors as well. - -**Block / non-conflicting transaction** -GoF | AW --- | -- -0 | < 0.25 -1 | >= 0.25 -2 | >= 0.45 -3 | >= 0.67 - -**Conflict / conflicting transaction** -GoF | AW --- | -- -0 | < 0.25 -1 | >= 0.25 -2 | >= 0.45 -3 | >= 0.67 - -These thresholds play a curcial role in the safety vs. liveness of the protocol, together with the exact workings of [active cMana](#active-cmana). We are currently investigating them with in-depth simulations. - -- The higher the AW threshold the more voters a conflict or block will need to reach a certain GoF -> more secure but higher confirmation time. -- As a consequence of the above point, TangleTime will be tougher to advance; making the cMana window more likely to get stuck and confirmations to halt forever. - -An application needs to decide when to consider a block and (conflicting) transaction as _confirmed_ based on its safety requirements. Conversely, a block or conflict that does not gain enough AW stays pending forever (and is orphaned/removed on snapshotting time). - -## Modular Conflict Selection Function - -The modular conflict selection function is an abstraction on how a node sets an initial opinion on conflicts. By decoupling the objective perception of AW and a node's initial opinion, we gain flexibility and it becomes effortless to change the way we set initial opinions without modifying anything related to the AW. - -### Pure On Tangle Voting (OTV) - -The idea of pure OTV is simple: set the initial opinion based on the currently heavier conflict as perceived by AW. However, building a coherent overall opinion means that conflicting realities (possible outcomes of overlapping conflict sets) can not be liked at the same time, which makes finding the heaviest conflict to like not as trivial as it may seem. - -In the examples below, a snapshot at a certain time of a UTXO-DAG with its conflicts is shown. The gray boxes labelled with `O:X` represent an output and and arrow from an output to a transaction means that the transaction is consuming this output. An arrow from a transaction to an output creates this output. An output being consumed multiple times is a conflict and the transactions create a conflict, respectively. The number assiged to a conflict, e.g., `Conflict A = 0.2`, defines the currently perceived Approval Weight of the conflict. A conflict highlighted in **bold** is the outcome of applying the pure OTV rules, i.e., the conflicts that are liked from the perspective of the node. - -**Example 1** -The example below shows how applying the heavier conflict rule recursively results in the end result of `A`, `C`, `E`, and thus the aggregated conflict `C+E` being liked. Looking at the individual conflict weights this result might be surprising: conflict `B` has a weight of `0.3` which is bigger than its conflict conflict `A = 0.2`. However, `B` is also in conflict with `C` which has an even higher weight `0.4`. Thus, `C` is liked, `B` cannot be liked, and `A` suddenly can become liked again. - -`E = 0.35` is heavier than `D = 0.15` and is therefore liked. An (aggregated) conflict can only be liked if all its parents are liked which is the case with `C+E`. - -![OTV example 1](/img/protocol_specification/otv-example-1.png) - -**Example 2** -This example is exactly the same as example 1, except that conflict `C` has a weight of `0.25` instead of `0.4`. Now the end result is conflicts `B` and `E` liked. Conflict `B` is heavier than conflict `C` and `A` (winning in all its conflict sets) and becomes liked. Therefore, neither `A` nor `C` can be liked. - -Again, `E = 0.35` is heavier than `D = 0.15` and is therefore liked. An (aggregated) conflict can only be liked if all its parents are liked which is not the case with `C+E` in this example. - -![OTV example 2](/img/protocol_specification/otv-example-2.png) - -### Metastability: OTV and FPCS - -Pure OTV is susceptible to metastability attacks: If a powerful attacker can keep any conflict of a conflict set reaching a high enough approval weight, the attacker can prevent the network from tipping to a side and thus theoretically halt a decision on the given conflicts indefinitely. Only the decision on the targeted conflicts is affected but the rest of the consensus can continue working. By forcing a conflict to stay unresolved, an attacker can, at most, prevent a node from pruning resources related to the pending decision. - -In order to prevent such attacks from happening we are planning to implement FPCS with OTV as a conflict selection function. A more detailed description can be found [here](https://iota.cafe/t/on-tangle-voting-with-fpcs/1218). - -## Like Switch - -Without the like switch, blocks vote for conflicts simply by attaching in their underlying conflict's future cone. While this principle is simple, it has one major flaw: the part of the Tangle of the losing conflict is abandoned so that only the _valid_ part remains. This might lead to mass orphanage of "unlucky" blocks that happened to first vote for the losing conflict. With the help of weak parents these blocks might be _rescued_ without a reattachment but the nature of weak parents makes it necessary so that every block needs to be picked up individually. Next to the fact that keeping such a weak tip pool is computationally expensive, it also open up orphanage attack scenarios by keeping conflicts undecided (metastability attack turns into orphanage attack). - -The like switch is a special type of parent reference that enables keeping everything in the Tangle, even conflicting transactions that are not included into the valid ledger state by means of consensus. Therefore, it prevents mass orphanage and enables a decoupling of **voting on conflicts (UTXO ledger)** and **finality of blocks / voting on blocks (Tangle)**. It makes the overall protocol (and its implementation) not only more efficient but also easier to reason about and allows for lazy evaluation of a node's opinion, namely only when a node wants to issue a block (read as: _cast a vote_). - -From a high-level perspective, the like switch can be seen as a set of rules that influence the way a block inherits its conflicts. Using only strong parents, a block inherits all its parents' conflicts. A like parent retains all the properties of the strong parent (i.e., inherit the conflict of said parent) but additionally it means to exclude all conflicts that are conflicting with the liked conflict. -Through this mechanism, it becomes possible to attach a block anywhere in the Tangle but still only vote for the conflicts that are liked. Thus, decoupling of block AW and conflict AW. - -**Examples** -To make this more clear, let's consider the following examples. The conflicts `A` and `B`, as well as `C` and `D` form an independent conflict set, respectively. The `Conflict Weights` are the weights as perceived by the node that currently wants to create a block. - -**Block creation** -A node performs random tip selection (e.g. URTS) and in this example selects blocks `5` and `11` as strong parents. Now the node needs to determine whether it currently _likes_ all the conflicts of the selected blocks (`red, yellow, green`) in order to apply the like switch (if necessary) and vote for its liked conflicts. - -![Like switch: block creation undecided](/img/protocol_specification/like-switch-block-creation-1.png) - -When performing the conflict selection function with pure OTV it will yield the result: - -- `red` is disliked, instead like `purple` -- `green` is disliked, instead like `yellow` - -Therefore, the block needs to set two like references to the blocks that introduced the conflict (first attachment of the transaction). The final result is the following: - -- conflicts `purple` and `yellow` are supported -- block `5` (and its entire past cone, `3`, `2`, `1`) is supported -- block `11` (and its entire past cone, `6`, `4`, `2`, `1`) is supported -- block `6` (and its entire past cone, `4`, `2`, `1`) -- block `7` (and its entire past cone, `1`) - -![Like switch: block creation](/img/protocol_specification/like-switch-block-creation-2.png) - -**Block booking** -On the flip side of block creation (casting a vote) is applying a vote when booking a block, where the process is essentiall just reversed. Assuming a node receives block `17`. First, it determines the conflicts of the strong parents (`red`, `yellow`, `purple`) and like parents (`red`). Now, it removes all conflicts that are conflicting with the like parents' conflicts (i.e., `purple`) and is left with the conflicts `red` and `yellow` (and adds the like conflicts to it, which in this case is without effect). If the resulting conflict is `invalid`, (e.g., because it combines conflicting conflicts) then the block itself is considered invalid because the issuer did not follow the protocol. - -In this example the final result is the following (block `17` supports): - -- conflicts `red` and `yellow` -- block `16` and its entire past cone -- block `11` and its entire past cone -- block `4` and its entire past cone - -![Like switch: block booking](/img/protocol_specification/like-switch-block-booking.png) - -## Active cMana - -The consensus mechanism weighs votes on conflicts (blocks in future cone with like switch) or block inclusion (all blocks in future cone) by the limited resource cMana, which is thus our Sybil-protection mechanism. cMana can be pledged to any nodeID, including offline or non-existing nodes, when transferring funds (in the proportion of the funds) and is instantly available (current implementation without EMA). Funds might get lost/locked over time, and, therefore, the total accessible cMana declines as well. **Consequently, a fixed cMana threshold cannot be used to determine a voting outcome.** - -Finalization of voting outcomes should happen once a conflict is _sufficiently_ decided and/or a block is _sufficiently_ deep in the Tangle. However, measuring AW in terms of cMana alone does not yield enough information. Therefore, a degree/grade of finalization and of voting outcome in relation to _something_, e.g., **recently active nodes**, is preferred. This measure should have the following properties: - -- security/resilience against various attacks -- lose influence (shortly) after losing access to funds -- no possibility of long-range attacks -- no too quick fluctuations -- real incentives - -### Current Implementation - -Active cMana in GoShimmer basically combines two components in an active cMana WeightProvider: the TangleTime and the current state of cMana. A node is considered to be active if it has issued any block in the last `activeTimeThreshold=30min` with respect to the TangleTime. The total active consensus mana is, therefore, the sum of all the consensus mana of each active node. - -#### TangleTime - -The TangleTime is the issuing time of the last confirmed block. It cannot be attacked without controlling enough mana to accept incorrect timestamps, making it a reliable, attack-resistant quantity. - -![Tangle Time](/img/protocol_specification/tangle_time.jpg) - -#### cMana - -The current state of cMana is simply the current cMana vector, at the time the active cMana is requested. - -#### Putting it together - -Every node keeps track of a list of active nodes locally. Whenever a node issues a block it is added to the list of active nodes (nodeID -> issuing time of latest block). When the active cMana is requested all relevant node weights are returned. Relevant here means the following: - -- the node has more than `minimumManaThreshold=0` cMana to prevent bloating attacks with too little cMana -- there is a block that fulfills the condition `issuing time <= TangleTime && TangleTime - issuing time <= activeTimeThreshold` where `activeTimeThreshold=30min` (see the following example, blocks `1` and `3` are not within the window) - -![Active cMana window](/img/protocol_specification/active-cMana-window.png) - -### Example - -When syncing (`TT=t0`) and booking a block from time `t1`, active cMana is considered from `t0-activeTimeThreshold`. Once this block gets confirmed, the TangleTime advances to `TT=t1`. For the next block at `t2`, `TT=t1-activeTimeThreshold` will be considered. Using active cMana in this way, we basically get a sliding window of how the Tangle emerged and _replay_ it from the past to the present. - -### Pros - -- replaying the Tangle as it emerged -- always use cMana from the current perspective -- relatively simple concept - -### Cons - -- active cMana does not yield sufficient information (e.g. when eclipsed), it might look like something is 100% confirmed even though only 2% of the total cMana are considered active. -- active cMana might change quickly nodes with high mana suddenly become active -- if nodes are only able to issue blocks when "in sync" and no block gets confirmed within that time, nobody might be able to issue blocks anymore -- if a majority/all active cMana nodes go offline _within the active cMana window_, consensus will halt forever because the TangleTime can never advance unless a majority of these nodes move the TangleTime forward - -This reflects the current implementation and we are currently investigating active cMana with in-depth simulations to improve the mechanism. diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/ledgerstate.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/ledgerstate.md deleted file mode 100644 index 8c8b9eda6d6..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/ledgerstate.md +++ /dev/null @@ -1,323 +0,0 @@ ---- -description: The unspent transaction output (UTXO) model defines a ledger state where balances are not directly associated with addresses but with the outputs of transactions. Transactions specify the outputs of previous transactions as inputs, which are consumed in order to create new outputs. -image: /img/protocol_specification/utxo_fund_flow.png -keywords: - - transactions - - ledger state - - unlock block - - essence - - utxo - - input - - signature unlock block - - reference unlock block - - conflict conflict - - aggregate conflict ---- - -## UTXO model - -The unspent transaction output (UTXO) model defines a ledger state where balances are not directly associated with addresses but with the outputs of transactions. In this model, transactions specify the outputs of previous transactions as inputs, which are consumed in order to create new outputs. -A transaction must consume the entirety of the specified inputs. The section unlocking the inputs is called an _unlock block_. An unlock block may contain a signature proving ownership of a given input's address and/or other unlock criteria. - -The following image depicts the flow of funds using UTXO: - -[![Flow of funds using UTXO](/img/protocol_specification/utxo_fund_flow.png 'Flow of funds using UTXO')](/img/protocol_specification/utxo_fund_flow.png) - -## Transaction Layout - -A _Transaction_ payload is made up of two parts: - -1. The _Transaction Essence_ part contains: version, timestamp, nodeID of the aMana pledge, nodeID of the cMana pledge, inputs, outputs and an optional data payload. -2. The _Unlock Blocks_ which unlock the _Transaction Essence_'s inputs. In case the unlock block contains a signature, it signs the entire _Transaction Essence_ part. - -All values are serialized in little-endian encoding (it stores the most significant byte of a word at the largest address and the smallest byte at the smallest address). The serialized form of the transaction is deterministic, meaning the same logical transaction always results in the same serialized byte sequence. - -### Transaction Essence - -The _Transaction Essence_ of a _Transaction_ carries a version, timestamp, nodeID of the aMana pledge, nodeID of the cMana pledge, inputs, outputs and an optional data payload. - -### Inputs - -The _Inputs_ part holds the inputs to consume, that in turn fund the outputs of the _Transaction Essence_. There is only one supported type of input as of now, the _UTXO Input_. In the future, more types of inputs may be specified as part of protocol upgrades. - -Each defined input must be accompanied by a corresponding _Unlock Block_ at the same index in the _Unlock Blocks_ part of the _Transaction_. -If multiple inputs may be unlocked through the same _Unlock Block_, the given _Unlock Block_ only needs to be specified at the index of the first input that gets unlocked by it. -Subsequent inputs that are unlocked through the same data must have a _Reference Unlock Block_ pointing to the previous _Unlock Block_. -This ensures that no duplicate data needs to occur in the same transaction. - -#### UTXO Input - -| Name | Type | Description | -| ------------------------ | ------------- | ----------------------------------------------------------------------- | -| Input Type | uint8 | Set to value 0 to denote an _UTXO Input_. | -| Transaction ID | ByteArray[32] | The BLAKE2b-256 hash of the transaction from which the UTXO comes from. | -| Transaction Output Index | uint16 | The index of the output on the referenced transaction to consume. | - -A _UTXO Input_ is an input which references an output of a previous transaction by using the given transaction's BLAKE2b-256 hash + the index of the output on that transaction. -A _UTXO Input_ must be accompanied by an _Unlock Block_ for the corresponding type of output the _UTXO Input_ is referencing. - -Example: If the input references outputs to an Ed25519 address, then the corresponding unlock block must be of type _Signature Unlock Block_ holding an Ed25519 signature. - -### Outputs - -The _Outputs_ part holds the outputs to create with this _Transaction Payload_. There are different types of output: - -- _SigLockedSingleOutput_ -- _SigLockedAssetOutput_ - -#### SigLockedSingleOutput - -| Name | Type | Description | -| --------------- | ------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------- | -| Output Type | uint8 | Set to value 0 to denote a _SigLockedSingleOutput_. | -| Address `oneOf` | [Ed25519 Address](#ed25519-address) \| [BLS Address](#bls-address) | The raw bytes of the Ed25519/BLS address which is a BLAKE2b-256 hash of the Ed25519/BLS public key | -| Balance | uint64 | The balance of IOTA tokens to deposit with this _SigLockedSingleOutput_ output. | - -##### Ed25519 Address - -| Name | Type | Description | -| ------------ | ------------- | ------------------------------------------------------------------------------------------- | -| Address Type | uint8 | Set to value 0 to denote an _Ed25519 Address_. | -| Address | ByteArray[32] | The raw bytes of the Ed25519 address which is a BLAKE2b-256 hash of the Ed25519 public key. | - -#### BLS Address - -| Name | Type | Description | -| ------------ | ------------- | ----------------------------------------------------------------------------------- | -| Address Type | uint8 | Set to value 1 to denote a _BLS Address_. | -| Address | ByteArray[49] | The raw bytes of the BLS address which is a BLAKE2b-256 hash of the BLS public key. | - -The _SigLockedSingleOutput_ defines an output holding an IOTA balance linked to a single address; it is unlocked via a valid signature proving ownership over the given address. Such output may hold an address of different types. - -#### SigLockedAssetOutput - -| Name | Type | Description | -| -------------------- | ------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------- | -| Output Type | uint8 | Set to value 1 to denote a _SigLockedAssetOutput_. | -| Address `oneOf` | [Ed25519 Address](#ed25519-address) \| [BLS Address](#bls-address) | The raw bytes of the Ed25519/BLS address which is a BLAKE2b-256 hash of the Ed25519/BLS public key | -| Balances count | uint32 | The number of individual balances. | -| AssetBalance `anyOf` | [Asset Balance](#asset-balance) | The balance of the tokenized asset. | - -##### Asset Balance - -The balance of the tokenized asset. - -| Name | Type | Description | -| ------- | ------------- | ----------------------------------- | -| AssetID | ByteArray[32] | The ID of the tokenized asset | -| Balance | uint64 | The balance of the tokenized asset. | - -The _SigLockedAssetOutput_ defines an output holding a balance for each specified tokenized asset linked to a single address; it is unlocked via a valid signature proving ownership over the given address. Such output may hold an address of different types. -The ID of any tokenized asset is defined by the BLAKE2b-256 hash of the OutputID that created the asset. - -### Payload - -The payload part of a _Transaction Essence_ may hold an optional payload. This payload does not affect the validity of the _Transaction Essence_. If the transaction is not valid, then the payload _shall_ be discarded. - -### Unlock Blocks - -The _Unlock Blocks_ part holds the unlock blocks unlocking inputs within a _Transaction Essence_. - -There are different types of _Unlock Blocks_: -| Name | Unlock Type | Description | -| ---------------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -| Signature Unlock Block | 0 | An unlock block holding one or more signatures unlocking one or more inputs. | -| Reference Unlock Block | 1 | An unlock block which must reference a previous unlock block which unlocks also the input at the same index as this _Reference Unlock Block_. | - -#### Signature Unlock Block - -| Name | Type | Description | -| ----------------- | ------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------- | -| Unlock Type | uint8 | Set to value 0 to denote a _Signature Unlock Block_. | -| Signature `oneOf` | [Ed25519 Address](#ed25519-address) \| [BLS Address](#bls-address) | The raw bytes of the Ed25519/BLS address which is a BLAKE2b-256 hash of the Ed25519/BLS public key | - -A _Signature Unlock Block_ defines an _Unlock Block_ which holds one or more signatures unlocking one or more inputs. -Such a block signs the entire _Transaction Essence_ part of a _Transaction Payload_ including the optional payload. - -#### Reference Unlock block - -| Name | Type | Description | -| ----------- | ------ | ---------------------------------------------------- | -| Unlock Type | uint8 | Set to value 1 to denote a _Reference Unlock Block_. | -| Reference | uint16 | Represents the index of a previous unlock block. | - -A _Reference Unlock Block_ defines an _Unlock Block_ that references a previous _Unlock Block_ (that must not be another _Reference Unlock Block_). It must be used if multiple inputs can be unlocked through the same origin _Unlock Block_. - -Example: -Consider a _Transaction Essence_ containing _UTXO Inputs_ A, B and C, where A and C are both spending the UTXOs originating from the same Ed25519 address. The _Unlock Block_ part must thereby have the following structure: - -| Index | Must Contain | -| ----- | ---------------------------------------------------------------------------------------------------------- | -| 0 | A _Signature Unlock Block_ holding the corresponding Ed25519 signature to unlock A and C. | -| 1 | A _Signature Unlock Block_ that unlocks B. | -| 2 | A _Reference Unlock Block_ that references index 0, since C also gets unlocked by the same signature as A. | - -## Validation - -A _Transaction_ payload has different validation stages since some validation steps can only be executed at the point when certain information has (or has not) been received. We, therefore, distinguish between syntactical and semantic validation. - -### Transaction Syntactical Validation - -This validation can commence as soon as the transaction data has been received in its entirety. It validates the structure but not the signatures of the transaction. A transaction must be discarded right away if it does not pass this stage. - -The following criteria define whether the transaction passes the syntactical validation: - -- Transaction Essence: - - `Transaction Essence Version` value must be 0. - - The `timestamp` of the _Transaction Essence_ must be older than (or equal to) the `timestamp` of the block - containing the transaction by at most 10 minutes. - - A _Transaction Essence_ must contain at least one input and output. -- Inputs: - - `Inputs Count` must be 0 < x < 128. - - At least one input must be specified. - - `Input Type` value must be 0, denoting an `UTXO Input`. - - `UTXO Input`: - - `Transaction Output Index` must be 0 ≤ x < 128. - - Every combination of `Transaction ID` + `Transaction Output Index` must be unique in the inputs set. - - Inputs must be in lexicographical order of their serialized form.1 -- Outputs: - - `Outputs Count` must be 0 < x < 128. - - At least one output must be specified. - - `Output Type` must be 0, denoting a `SigLockedSingleOutput`. - - `SigLockedSingleOutput`: - - `Address Type` must either be 0 or 1, denoting an `Ed25519` - or `BLS` address . - - The `Address` must be unique in the set of `SigLockedSingleOutputs`. - - `Amount` must be > 0. - - Outputs must be in lexicographical order by their serialized form. This ensures that serialization of the transaction becomes deterministic, meaning that libraries always produce the same bytes given the logical transaction. - - Accumulated output balance must not exceed the total supply of tokens `2,779,530,283,277,761`. -- `Payload Length` must be 0 (to indicate that there's no payload) or be valid for the specified payload type. -- `Payload Type` must be one of the supported payload types if `Payload Length` is not 0. -- `Unlock Blocks Count` must match the number of inputs. Must be 0 < x < 128. -- `Unlock Block Type` must either be 0 or 1, denoting a `Signature Unlock Block` or `Reference Unlock block`. -- `Signature Unlock Blocks` must define either an `Ed25519`- or `BLS Signature`. -- A `Signature Unlock Block` unlocking multiple inputs must only appear once (be unique) and be positioned at the same index of the first input it unlocks. All other inputs unlocked by the same `Signature Unlock Block` must have a companion `Reference Unlock Block` at the same index as the corresponding input that points to the origin `Signature Unlock Block`. -- `Reference Unlock Blocks` must specify a previous `Unlock Block` that is not of type `Reference Unlock Block`. The referenced index must therefore be smaller than the index of the `Reference Unlock Block`. -- Given the type and length information, the _Transaction_ must consume the entire byte array the `Payload Length` field in the _Block_ defines. - -### Transaction Semantic Validation - -The following criteria define whether the transaction passes the semantic validation: - -1. All the UTXOs the transaction references are known (booked) and unspent. -1. The transaction is spending the entirety of the funds of the referenced UTXOs to the outputs. -1. The address type of the referenced UTXO must match the signature type contained in the corresponding _Signature Unlock Block_. -1. The _Signature Unlock Blocks_ are valid, i.e. the signatures prove ownership over the addresses of the referenced UTXOs. - -If a transaction passes the semantic validation, its referenced UTXOs _shall_ be marked as spent and the corresponding new outputs _shall_ be booked/specified in the ledger. - -Transactions that do not pass semantic validation _shall_ be discarded. Their UTXOs are not marked as spent and neither are their outputs booked into the ledger. Moreover, their blocks _shall_ be considered invalid. - -# Ledger State - -The introduction of a voting-based consensus requires a fast and easy way to determine a node's initial opinion for every received transaction. This includes the ability to both detect double spends and transactions that try to spend non-existing funds. -These conditions are fulfilled by the introduction of an Unspent Transaction Output (UTXO) model for record-keeping, which enables the validation of transactions in real time. - -The concept of UTXO style transactions is directly linked to the creation of a directed acyclic graph (DAG), in which the vertices are transactions and the links between these are determined by the outputs and inputs of transactions. - -To deal with double spends and leverage on certain properties of UTXO, we introduce the Realities Ledger State. - -## Realities Ledger State - -In the Realities Ledger State, we model the different perceptions of the ledger state that exist in the Tangle. In each “reality” on its own there are zero conflicting transactions. -Each reality thus forms an in itself consistent UTXO sub-DAG, where every transaction references any other transaction correctly. - -Since outputs of transactions can only be consumed once, a transaction that double spends outputs creates a persistent conflict in a corresponding UTXO DAG. Each conflict receives a unique identifier `conflictID`. These conflicts cannot be merged by any vertices (transactions). -A transaction that attempts to merge incompatible conflicts fails to pass a validity check and is marked as invalid. - -The composition of all realities defines the Realities Ledger State. - -From this composition nodes are able to know which possible outcomes for the Tangle exist, where they split, how they relate to each other, if they can be merged and which blocks are valid tips. All of this information can be retrieved in a fast and efficient way without having to walk the Tangle. - -Ultimately, for a set of competing realities, only one reality can survive. It is then up to the consensus protocol to determine which conflict is part of the eventually accepted reality. - -In total the ledger state thus involves three different layers: - -- the UTXO DAG, -- its extension to the corresponding conflict DAG, -- the Tangle which maps the parent relations between blocks and thus also transactions. - -## The UTXO DAG - -The UTXO DAG models the relationship between transactions, by tracking which outputs have been spent by what transaction. Since outputs can only be spent once, we use this property to detect double spends. - -Instead of permitting immediately only one transaction into to the ledger state, we allow for different versions of the ledger to coexist temporarily. -This is enabled by extending the UTXO DAG by the introduction of conflicts, see the following section. We can then determine which conflicting versions of the ledger state exist in the presence of conflicts. - -### Conflict Sets and Detection of Double Spends - -We maintain a list of consumers `consumerList` associated with every output, that keeps track of which transactions have spent that particular output. Outputs without consumers are considered to be unspent outputs. Transactions that consume an output that have more than one consumer are considered to be double spends. - -If there is more than one consumer in the consumer list we _shall_ create a conflict set list `conflictSet`, which is identical to the consumer list. The `conflictSet` is uniquely identified by the unique identifier `conflictSetID`. Since the `outputID` is directly and uniquely linked to the conflict set, we set `conflictSetID=outputID`. - -## Conflicts - -The UTXO model and the concept of solidification, makes all non-conflicting transactions converge to the same ledger state no matter in which order the transactions are received. Blocks containing these transactions could always reference each other in the Tangle without limitations. - -However, every double spend creates a new possible version of the ledger state that will no longer converge. Whenever a double spend is detected, see the previous section, we track the outputs created by the conflicting transactions and all of the transactions that spend these outputs, by creating a container for them in the ledger which we call a conflict. - -More specifically a container `conflict` _shall_ be created for each transaction that double spends one or several outputs, or if transactions aggregated those conflicts. -Every transaction that spends directly or indirectly from a transaction in a given `conflict`, i.e. is in the future cone in the UTXO DAG of the double-spending transaction that created `conflict`, is also contained in this `conflict` or one of the child conflicts. -A conflict that was created by a transaction that spends multiple outputs can be part of multiple conflict sets. - -Every conflict _shall_ be identified by the unique identifier `conflictID`. We consider two kinds of conflicts: conflict conflicts and aggregated conflicts, which are explained in the following sections. - -### Conflict Conflicts - -A conflict conflict is created by a corresponding double spend transaction. Since the transaction identifier is unique, we choose the transaction id `transactionID` of the double spending transaction as the `conflictID`. - -Outputs inside a conflict can be double spent again, recursively forming sub-conflicts. - -On solidification of a block, we _shall_ store the corresponding conflict identifier together with every output, as well as the transaction metadata to enable instant lookups of this information. Thus, on solidification, a transaction can be immediately associated with a conflict. - -### Aggregated Conflicts - -A transaction that does not create a double spend inherits the conflicts of the input's conflicts. In the simplest case, where there is only one input conflict the transaction inherits that conflict. - -If outputs from multiple non-conflicting conflicts are spent in the same transaction, then the transaction and its resulting outputs are part of an aggregated conflict. This type of conflict is not part of any conflict set. Rather it simply combines the perception that the individual conflict conflicts associated to the transaction's inputs are the ones that will be accepted by the network. Each aggregated conflict _shall_ have a unique identifier `conflictID`, which is the same type as for conflict conflicts. Furthermore the container for an aggregated conflict is also of type `conflict`. - -To calculate the unique identifier of a new aggregated conflict, we take the identifiers of the conflicts that were aggregated, sort them lexicographically and hash the concatenated identifiers once - -An aggregated conflict can't aggregate other aggregated conflicts. However, it can aggregate the conflict conflicts that are part of the referenced aggregated conflict. -Thus aggregated conflicts have no further conflicts as their children and they remain tips in the conflict DAG. Furthermore, the sortation of the `conflictID`s in the function `AggregatedConflictID()` ensures that even though blocks can attach at different points in the Tangle and aggregate different aggregated conflicts they are treated as if they are in the same aggregated conflict **if** the referenced conflict conflicts are the same. - -These properties allow for an efficient reduction of a set of conflicts. In the following we will require the following fields as part of the conflict data: - -- `isConflictConflict` is a boolean flat that is `TRUE` if the conflict is a conflict conflict or `FALSE` if its an aggregated conflict. -- `parentConflicts` contains the list of parent conflict conflicts of the conflict, i.e. the conflict conflicts that are directly referenced by this conflict. - -Then the following function takes a list of conflicts (which can be either conflict or aggregated conflicts) and returns a unique set of conflict conflicts that these conflicts represent. This is done by replacing duplicates and extracting the parent conflict conflicts from aggregated conflicts. - -```vbnet -FUNCTION reducedConflicts = ReduceConflicts(conflicts) - FOR conflict IN conflicts - IF conflict.isConflictConflict - Append(reducedConflicts,conflict) - ELSE - FOR parentConflict IN conflict.parentConflicts - IF NOT (parentConflict IN reducedConflicts) - Append(reducedConflicts,parentConflict) - - RETURN reducedConflicts -``` - -### The Conflict DAG - -A new conflict is created for each transaction that is part of a conflict set, or if a transaction aggregates conflicts. -In the conflict DAG, conflicts constitute the vertices of the DAG. A conflict that is created by a transaction that is spending outputs from other conflicts has edges pointing to those conflicts. -The conflict DAG maps the UTXO DAG to a simpler structure that ignores details about relations between transactions inside the conflicts and instead retains only details about the interrelations of conflicts. -The set of all non-conflicting transactions form the master conflict. Thus, at its root the conflict DAG has the master conflict, which consists of non-conflicting transaction and resolved transactions. From this root of the conflict DAG the various conflicts emerge. -In other words the conflict conflicts and the aggregated conflicts appear as the children of the master conflict. - -### Detecting Conflicting Conflicts - -Conflicts are conflicting if they, or any of their ancestors, are part of the same conflict set. -The conflict DAG can be used to check if conflicts are conflicting, by applying an operation called normalization, to a set of input conflicts. -From this information we can identify blocks or transactions that are trying to combine conflicts belonging to conflicting double spends, and thus introduce an invalid perception of the ledger state. - -Since conflicts represent the ledger state associated with a double spend and sub-conflicts implicitly share the perception of their parents, we define an operation to normalize a list of conflicts that gets rid of all conflicts that are referenced by other conflicts in that list. The function returns `NULL` if the conflicts are conflicting and can not be merged. - -### Merging of Conflicts Into the Master Conflict - -A conflict gains approval weight when blocks from (previously non-attached) `nodeID`s attach to blocks in the future cone of that conflict. Once the approval weight exceeds a certain threshold we consider the conflict as confirmed. -Once a conflict conflict is confirmed, it can be merged back into the master conflict. Since the approval weight is monotonically increasing for conflicts from the past to the future, conflicts are only merged into the master conflict. -The loosing conflicts and all their children conflicts are booked into the container `rejectedConflict` that has the identifier `rejectedConflictID`. diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/mana.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/mana.md deleted file mode 100644 index 87a093272cb..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/mana.md +++ /dev/null @@ -1,617 +0,0 @@ ---- -description: Mana is a reputation system for nodes within the IOTA network. Reputation is gained by contributing to the network. As time passes, part of the earned mana of a node decays to encourage keeping up the good behavior. -image: /img/protocol_specification/mana.png -keywords: - - mana - - node - - calculation - - transactions - - base mana - - vectors - - access mana - - consensus mana - - effective base mana - - ledger state ---- - -# Mana Implementation - -This document provides a high level overview of how mana is implemented in GoShimmer. - -## Introduction - -Mana is a reputation system for nodes within the IOTA network. - -Reputation is gained by contributing to the network, i.e. creating value transfers. -As time passes, part of the earned mana of a node decays to encourage keeping up the good behavior. - -## Scope - -The scope of the first implementation of mana into GoShimmer is to verify that mana calculations work, -study base mana calculations 1 & 2, and mana distribution in the test network, furthermore to verify that nodes have -similar view on the network. - -## Mana Calculation - -Mana is essentially the reputation score of a node in the IOTA network. Mana is calculated locally in each node, as a -function that takes value transactions as input and produces the Base Mana Vector as output. - -Each transaction has an `accessMana` and `consensusMana` field that determine which node to pledge these two types -of mana to. Both of these fields denote a `nodeID`, the receiver of mana. `accessMana` and `consensusMana` do not have -to be pledged to the same node, but for simplicity, in the first implementation, they will be. - -In addition to the mana fields, a `timestamp` field is also added to the transactions that will be utilized for calculating -decay and effective mana. - -From the pledged mana of a transaction, a node can calculate locally the `Base Mana Vector` for both `Access Mana` and -`Consensus Mana`. - -A `Base Mana Vector` consists of Base Mana 1 and Base Mana 2 and their respective `Effective Base Mana`. -Given a value transaction, Base Mana 1 and Base Mana 2 are determined as follows: - -1. Base Mana 1 is revoked from the node that created the output(s) used as input(s) in the transaction, and is pledged to - the node creating the new output(s). The amount of `Base Mana 1` revoked and pledged is equal to the balance of the - input. -2. Base Mana 2 is freshly created at the issuance time of the transaction, awarded to the node, but decays with time. - The amount of `Base Mana 2` pledged is determined with `Pending Mana` concept: funds sitting at an address generate - `pending mana` that grows over time, but bounded. - - `Mana_pending = (alpha*S)/gamma*(1-e^(-gamma*t))`, where `alpha` and `gamma` are chosen parameters, `S` is the amount - of funds an output transfers to the address, and `t` is the time since the funds are on that address. - -An example `Base Mana Vector` for `Access Mana` could look like this: - -| | Node 1 | Node 2 | ... | Node k | -| --------------------- | ------ | ------ | --- | ------ | -| Base Mana 1 | 0 | 1 | ... | 100.54 | -| Effective Base Mana 1 | 0 | 0.5 | ... | 120.7 | -| Base Mana 2 | 0 | 1.2 | ... | 0.01 | -| Effective Base Mana 2 | 0 | 0.6 | ... | 0.015 | - -`Base Mana` is pledged or revoked at discrete times, which results in `Base Mana` being discontinuous function over time. -In order to make mana "smoother" and continuous, an exponential moving average is applied to the `Base Mana` values, -resulting in `Effective Base Mana 1` and `Effective Base Mana 2`. - -It is important to note, that consuming a new transaction and pledging its mana happens when the transaction is -confirmed on the node. At the same time, entries of the nodes whose mana is being modified during pledging in the -`Base Mana Vector(s)` are updated with respect to time. In general, updates due to time happen whenever a node's mana is -being accessed. Except for the aforementioned case, this could be for example a mana related query from an external -module (AutoPeering, DRNG, Rate Control, tools, etc.). - -Following figure summarizes how `Access Mana` and `Consensus Mana` is derived from a transaction: - -[![Mana](/img/protocol_specification/mana.png 'Mana')](/img/protocol_specification/mana.png) - -The reason for having two separate `Base Mana Vectors` is the fact, that `accessMana` and `consensusMana` can be pledged -to different nodes. - -The exact mathematical formulas, and their respective parameters will be determined later. - -## Challenges - -### Dependency on Tangle - -Since mana is awarded to nodes submitting value transfers, the tangle is needed as input for mana calculation. -Each node calculates mana locally, therefore, it is essential to determine when to consider transactions in the -tangle "final enough" (so that they will not be orphaned). - -When a transaction is `confirmed`, it is a sufficient indicator that it will not be orphaned. However, in current -GoShimmer implementation, confirmation is not yet a properly defined concept. This issue will be addressed in a separate -module. - -The Mana module assumes, that the (value) tangle's `TransactionConfirmed` event is the trigger condition to update the -mana state machine (base mana vectors for access and consensus mana). Once the concept of transaction finality is -introduced for the tangle, the trigger conditions for access and consensus mana calculations can be adjusted. - -### Transaction Layout - -A new field should be added to `Transaction` denoting `PledgedNodeID` for `Access Mana` and `Consensus Mana`. -This is also beneficial to implement mana donation feature, that is, to donate the mana of a certain transaction to an -arbitrary node. - -## Limitations - -The first implementation of mana in GoShimmer will: - -- not have voted timestamps on value transactions, -- lack proper `TransactionConfirmed` mechanism to trigger mana update, -- lack integration into rate control/autopeering/etc. - -## Detailed Design - -In this section, detailed GoShimmer implementation design considerations will be outlined about the mana module. -In short, changes can be classified into 3 categories: - -1. Transaction related changes, -2. Mana module functionality, -3. and related tools/utilities, such as API, visualization, analytics. - -### Transaction - -As described above, 3 new fields will be added to the transaction layout: - -1. `Timestamp` time.time -2. `AccessManaNodeID` []bytes -3. `ConsensusManaNodeID` []bytes - -By adding these fields to the signed transaction, `valuetransfers/packages/transaction` should be modified. - -- The three new fields should be added to the transaction essence. -- Marshalling and unmarshalling of a transaction should be modified. -- For calculating `Base Mana 1` values, `mana module` should be able to derive from a transaction the nodes which received - pledged `Base Mana 1` as a consequence of the consumed inputs of the transaction. Therefore, a lookup function should - be exposed from the value tangle that given an `input`, returns the `pledgedNodeID` of the transaction creating the input. - -`Timestamp` is part of the signed transaction, therefore, a client sending a transaction to the node should already -define it. In this case, this `Timestamp` will not be the same as the timestamp of the block containing the -transaction and value payload, since the block is created on the node. -A solution to this is that upon receiving a `transaction` from a client, the node checks if the timestamp is within -a predefined time window, for example `t_current - delta`, where `delta` could be couple seconds. If true, then the node -constructs the block, which must have a greater timestamp, than the transaction. - -`AccessManaNodeID` and `ConsensusManaNodeID` are also part of the signed transaction, so a client should fill them out. -Node owners are free to choose to whom they pledge mana to with the transaction, so there should be a mechanism that -lets the client know, what `AccessManaNodeID` and `ConsensusManaNodeID` are allowed. This could be a new API endpoint -that works like this: - -1. Client asks node what nodeIDs can be included for pledging a certain type (access, consensus) mana. -2. Node answers with either: - -- Don't care. Any node IDs are valid. -- List of nodeIDs that are allowed for each type. - -3. If a client sends back the transaction with invalid or empty mana fields, the transaction is considered invalid. - -This way node owners can decide who their transactions are pledging mana to. It could be only their node, or they could -provide mana pledging as a service. They could delegate access mana to others, but hold own to consensus mana, or the -other way around. - -### Initialization - -Mana state machine is an extension of the ledger state, hence its calculation depends on the ledger state perception -of the node. Snapshotting is the mechanism that saves the ledger states and prunes unnecessary transactions. Together -with the ledger state, base mana vectors are also saved, since a certain ledger state reflects a certain mana distribution -in the network. In future, when snapshotting is implemented in GoShimmer, nodes joining the network will be able to query -for snapshot files that will contain initial base mana vectors as well. - -Until this functionality is implemented, mana calculation solely relies on transactions getting confirmed. That is, when -a node joins the network and starts gathering blocks and transactions from peers, it builds its own ledger state through -solidification process. Essentially, the node requests all blocks down to the genesis from the current tips of its neighbors. -Once the genesis is found, blocks are solidified bottom up. For the value tangle, this means that for each solidified -and liked transaction, `TransactionConfirmed` event is triggered, updating the base mana vectors. - -In case of a large database, initial synching and solidification is a computationally heavy task due to the sheer amount -of blocks in the tangle. Mana calculation only adds to this burden. It will be determined through testing if additional -"weight lifting" mechanism is needed (for example delaying mana calculation). - -In the GoShimmer test network, all funds are initially held by the faucet node, therefore all mana present at bootstrap belong -to this node. Whenever a transaction is requested from the faucet, it pledges mana to the requesting node, helping other -nodes to increase their mana. - -### Mana Package - -The functionality of the mana module should be implemented in a `mana` package. Then, a `mana plugin` can use the package -structs and methods to connect the dots, for example execute `BookMana` when `TransactionConfirmed` event is triggered -in the value tangle. - -`BaseMana` is a struct that holds the different mana values for a given node. -Note that except for `Base Mana 1` calculation, we need the time when `BaseMana` values were updated, so we store it in the struct: - -```go -type BaseMana struct { - BaseMana1 float - EffectiveBaseMana1 float - BaseMana2 float - EffectiveBaseMana2 float - LastUpdated time -} -``` - -`BaseManaVector` is a data structure that maps `nodeID`s to `BaseMana`. It also has a `Type` that denotes the type -of mana this vector deals with (Access, Consensus, etc.). - -```go -type BaseManaVector struct { - vector map[identity.ID]*BaseMana - vectorType Type -} -``` - -#### Methods - -`BaseManaVector` should have the following methods: - -- `BookMana(transaction)`: Book mana of a transaction. Trigger `ManaBooked` event. Note, that this method updates - `BaseMana` with respect to time and to new `Base Mana 1` and `Base Mana 2` values. -- `GetWeightedMana(nodeID, weight) mana`: Return `weight` \* `Effective Base Mana 1` + (1-`weight`)+`Effective Base Mana 2`. - `weight` is a number in [0,1] interval. Notice, that `weight` = 1 results in only returning `Effective Base Mana 1`, - and the other way around. Note, that this method also updates `BaseMana` of the node with respect to time. -- `GetMana(nodeID) mana`: Return 0.5*`Effective Base Mana 1` + 0.5*`Effective Base Mana 2` of a particular node. Note, that - this method also updates `BaseMana` of the node with respect to time. -- `update(nodeID, time)`: update `Base Mana 2`, `Effective Base Mana 1` and `Effective Base Mana 2` of a node with respect `time`. -- `updateAll(time)`: update `Base Mana 2`, `Effective Base Mana 1` and `Effective Base Mana 2` of all nodes with respect to `time`. - -`BaseMana` should have the following methods: - -- `pledgeAndUpdate(transaction)`: update `BaseMana` fields and pledge mana with respect to `transaction`. -- `revokeBaseMana1(amount, time)`: update `BaseMana` values with respect to `time` and revoke `amount` `BaseMana1`. -- `update(time)`: update all `BaseMana` fields with respect to `time`. -- `updateEBM1(time)`: update `Effective Base Mana 1` wrt to `time`. -- `updateBM2(time)`: update `Base Mana 2` wrt to `time`. -- `updateEBM2(time)`: update `Effective Base Mana 2` wrt to `time`. - -#### Base Mana Calculation - -There are two cases when the values within `Base Mana Vector` are updated: - -1. A confirmed transaction pledges mana. -2. Any module accesses the `Base Mana Vector`, and hence its values are updated with respect to `access time`. - -First, let's explore the former. - -##### A confirmed transaction pledges mana - -For simplicity, we only describe mana calculation for one of the Base Mana Vectors, namely, the Base Access Mana Vector. - -First, a `TransactionConfirmed` event is triggered, therefore `BaseManaVector.BookMana(transaction)` is executed: - -```go -func (bmv *BaseManaVector) BookMana(tx *transaction) { - pledgedNodeID := tx.accessMana - - for input := range tx.inputs { - // search for the nodeID that the input's tx pledged its mana to - inputNodeID := loadPledgedNodeIDFromInput(input) - // save it for proper event trigger - oldMana := bmv[inputNodeID] - // revoke BM1 - bmv[inputNodeID].revokeBaseMana1(input.balance, tx.timestamp) - - // trigger events - Events.ManaRevoked.Trigger(&ManaRevokedEvent{inputNodeID, input.balance, tx.timestamp, AccessManaType}) - Events.ManaUpdated.Tigger(&ManaUpdatedEvent{inputNodeID, oldMana, bmv[inputNodeID], AccessManaType}) - } - - // save it for proper event trigger - oldMana := bmv[pledgedNodeID] - // actually pledge and update - bm1Pledged, bm2Pledged := bmv[pledgedNodeID].pledgeAndUpdate(tx) - - // trigger events - Events.ManaPledged.Trigger(&ManaPledgedEvent{pledgedNodeID, bm1Pledged, bm2Pledged, tx.timestamp, AccessManaType}) - Events.ManaUpdated.Trigger(&ManaUpdatedEvent{pledgedNodeID, oldMana, bmv[pledgedNodeID], AccessManaType}) -} -``` - -`Base Mana 1` is being revoked from the nodes that pledged mana for inputs that the current transaction consumes. -Then, the appropriate node is located in `Base Mana Vector`, and mana is pledged to its `BaseMana`. -`Events` are essential to study what happens within the module from the outside. - -Note, that `revokeBaseMana1` accesses the mana entry of the nodes within `Base Mana Vector`, therefore all values are -updated with respect to `t`. Notice the two conflicts after the condition. When `Base Mana` values had been updated before -the transaction's timestamp, a regular update is carried out. However, if `t` is older, than the transaction timestamp, -an update in the "past" is carried out and values are updated up to `LastUpdated`. - -```go -func (bm *BaseMana) revokeBaseMana1(amount float64, t time.Time) { - if t.After(bm.LastUpdated) { - // regular update - n := t.Sub(bm.LastUpdated) - // first, update EBM1, BM2 and EBM2 until `t` - bm.updateEBM1(n) - bm.updateBM2(n) - bm.updateEBM2(n) - - bm.LastUpdated = t - // revoke BM1 at `t` - bm.BaseMana1 -= amount - } else { - // update in past - n := bm.LastUpdated.Sub(t) - // revoke BM1 at `t` - bm.BaseMana1 -= amount - // update EBM1 to `bm.LastUpdated` - bm.EffectiveBaseMana1 -= amount*(1-math.Pow(math.E,-EMA_coeff_1*n)) - } -} -``` - -The same regular and past update scheme is applied to pledging mana too: - -```go -func (bm *BaseMana) pledgeAndUpdate(tx *transaction) (bm1Pledged int, bm2Pledged int){ - t := tx.timestamp - bm1Pledged = sum_balance(tx.inputs) - - if t.After(bm.LastUpdated) { - // regular update - n := t.Sub(bm.LastUpdated) - // first, update EBM1, BM2 and EBM2 until `t` - bm.updateEBM1(n) - bm.updateBM2(n) - bm.updateEBM2(n) - bm.LastUpdated = t - bm.BaseMana1 += bm1Pledged - // pending mana awarded, need to see how long funds sat - for input := range tx.inputs { - // search for the timestamp of the UTXO that generated the input - t_inp := LoadTxTimestampFromOutputID(input) - bm2Add := input.balance * (1 - math.Pow(math.E, -decay*(t-t_inp))) - bm.BaseMana2 += bm2Add - bm2Pledged += bm2Add - } - } else { - // past update - n := bm.LastUpdated.Sub(t) - // update BM1 and BM2 at `t` - bm.BaseMana1 += bm1Pledged - oldMana2 = bm.BaseMana2 - for input := range tx.inputs { - // search for the timestamp of the UTXO that generated the input - t_inp := LoadTxTimestampFromOutputID(input) - bm2Add := input.balance * (1-math.Pow( math.E,-decay*(t-t_inp) ) ) * math.Pow(math.E, -decay*n) - bm.BaseMana2 += bm2Add - bm2Pledged += bm2Add - } - // update EBM1 and EBM2 to `bm.LastUpdated` - bm.EffectiveBaseMana1 += amount*(1-math.Pow(math.E,-EMA_coeff_1*n)) - if EMA_coeff_2 != decay { - bm.EffectiveBaseMana2 += (bm.BaseMana2 - oldMana2) *EMA_coeff_2*(math.Pow(math.E,-decay*n)- - math.Pow(math.E,-EMA_coeff_2*n))/(EMA_coeff_2-decay) / math.Pow(math.E, -decay*n) - } else { - bm.EffectiveBaseMana2 += (bm.BaseMana2 - oldMana2) * decay * n - } -} - return bm1Pledged, bm2Pledged -} -``` - -Notice, that in case of `EMA_coeff_2 = decay`, a simplified formula can be used to calculate `EffectiveBaseMana2`. -The same approach is applied in `updateEBM2()`. - -```go -func (bm *BaseMana) updateEBM1(n time.Duration) { - bm.EffectiveBaseMana1 = math.Pow(math.E, -EMA_coeff_1 * n) * bm.EffectiveBaseMana1 + - (1-math.Pow(math.E, -EMA_coeff_1 * n)) * bm.BaseMana1 -} -``` - -```go -func (bm *BaseMana) updateBM2(n time.Duration) { - bm.BaseMana2 = bm.BaseMana2 * math.Pow(math.E, -decay*n) -} -``` - -```go -func (bm *BaseMana) updateEBM2(n time.Duration) { - if EMA_coeff_2 != decay { - bm.EffectiveBaseMana2 = math.Pow(math.E, -emaCoeff2 * n) * bm.EffectiveBaseMana2 + - (math.Pow(math.E, -decay * n) - math.Pow(math.E, -EMA_coeff_2 * n)) / - (EMA_coeff_2 - decay) * EMA_coeff_2 / math.Pow(math.E, -decay * n)*bm.BaseMana2 - } else { - bm.EffectiveBaseMana2 = math.Pow(math.E, -decay * n)*bm.EffectiveBaseMana2 + - decay * n * bm.BaseMana2 - } -} -``` - -##### Any module accesses the Base Mana Vector - -In this case, the accessed entries within `Base Mana Vector` are updated via the method: - -```go -func (bmv *BaseManaVector) update(nodeID ID, t time.Time ) { - oldMana := bmv[nodeID] - bmv[nodeID].update(t) - Events.ManaUpdated.Trigger(&ManaUpdatedEvent{nodeID, oldMana, bmv[nodeID], AccessManaType}) -} -``` - -where `t` is the access time. - -```go -func (bm *BaseMana) update(t time.Time ) { - n := t - bm.LastUpdated - bm.updateEBM1(n) - bm.updateBM2(n) - bm.updateEBM2(n) - - bm.LastUpdated = t -} -``` - -#### Events - -The mana package should have the following events: - -- `Pledged` when mana (`BM1` and `BM2`) was pledged for a node due to new transactions being confirmed. - -```go -type PledgedEvent struct { - NodeID []bytes - AmountBM1 int - AmountBM2 int - Time time.Time - Type ManaType // access or consensus -} -``` - -- `Revoked` when mana (`BM1`) was revoked from a node. - -```go -type RevokedEvent struct { - NodeID []bytes - AmountBM1 int - Time time.Time - Type ManaType // access or consensus -} -``` - -- `Updated` when mana was updated for a node due to it being accessed. - -```go -type UpdatedEvent struct { - NodeID []bytes - OldMana BaseMana - NewMana BaseMana - Type ManaType // access or consensus -} -``` - -#### Testing - -- Write unit tests for all methods. -- Test all events and if they are correctly triggered. -- Benchmark calculations in tests to see how heavy it is to calculate EMAs and decays. - -### Mana Plugin - -The `mana plugin` is responsible for: - -- calculating mana from value transactions, -- keeping a log of the different mana values of all nodes, -- updating mana values, -- responding to mana related queries from other modules, -- saving base mana vectors in database when shutting down the node, -- trying to load base mana vectors from database when starting the node. - -The proposed mana plugin should keep track of the different mana values of nodes and handle calculation -updates. Mana values are mapped to `nodeID`s and stored in a `map` data structure. The vector also stores information on -what `Type` of mana it handles. - -```go -type BaseManaVector struct { - vector map[identity.ID]*BaseMana - vectorType Type -} -``` - -`Access Mana` and `Consensus Mana` should have their own respective `BaseManaVector`. - -```go -accessManaVector := BaseManaVector{vectorType: AccesMana} -consensusManaVector := BaseManaVector{vectorType: ConsensusMana} -``` - -In the future, it should be possible to combine `Effective Base Mana 1` and `Effective Base Mana 2` from a `BaseManaVector` -in arbitrary proportions to arrive at a final mana value that other modules use. The `mana package` has these methods -in place. Additionally, a parameter could be passed to the `getMana` type of exposed functions to set the proportions. - -#### Methods - -The mana plugin should expose utility functions to other modules: - -- `GetHighestManaNodes(type, n) [n]NodeIdManaTuple`: return the `n` highest `type` mana nodes (`nodeID`,`manaValue`) in - ascending order. Should also update their mana value. -- `GetManaMap(type) map[nodeID]manaValue`: return `type` mana perception of the node. -- `GetAccessMana(nodeID) mana`: access `Base Mana Vector` of `Access Mana`, update its values with respect to time, - and return the amount of `Access Mana` (either `Effective Base Mana 1`, `Effective Base Mana 2`, or some combination - of the two). Trigger `ManaUpdated` event. -- `GetConsensusMana(nodeID) mana`: access `Base Mana Vector` of `Consensus Mana`, update its values with respect to time, - and returns the amount of `Consensus Mana` (either `Effective Base Mana 1`, `Effective Base Mana 2`, or some combination - of the two). Trigger `ManaUpdated` event. -- `GetNeighborsMana(type)`: returns the `type` mana of the nodes neighbors -- `GetAllManaVectors()` Obtaining the full mana maps for comparison with the perception of other nodes. -- `GetWeightedRandomNodes(n)`: returns a weighted random selection of `n` nodes. `Consensus Mana` is used for the weights. -- Obtaining a list of currently known peers + their mana, sorted. Useful for knowing which high mana nodes are online. -- `OverrideMana(nodeID, baseManaVector)`: Sets the nodes mana to a specific value. Can be useful for debugging, setting faucet mana, initialization, etc.. Triggers `ManaUpdated` - -Such utility functions could be used for example to visualize mana distribution in node dashboard, or send neighbor -mana data to the analysis server for further processing. - -#### Booking Mana - -Mana is booked when a transaction is confirmed. - -```go -on TransactionConfirmed (tx): - bookAccessMana() - bookConsensusMana() -``` - -#### Synchronization and Mana Calculation - -The mana plugin is responsible to determine when to start calculating mana locally. -Since mana state is an extension to ledger state, it can only depict realistic mana values once the node is in sync. -During syncing, ledger state is constructed from blocks coming from neighbors as described further above. - -In this first iteration, mana plugin relies on `TransactionConfirmed` event of the value transfers plugin, and has no -explicit rules on when to start and stop mana calculation. - -In future, initial mana state (together with the initial ledger state) will be derived from a snapshot file. - -### Mana Toolkit - -In this section, all tools and utility functions for mana will be outlined. - -#### Mana Related API endpoints - -- `/info`: Add own mana in node info response. -- `value/allowedManaPledge`: Endpoint that clients can query to determine which nodeIDs are allowed as part of - `accessMana` and `consensusMana` fields in a transaction. -- `value/sendTransactionByJson`: Add `accessMana`, `consensusMana` and `timestamp` fields to the JSON request. - -Add a new `mana` endpoint route: - -- `/mana`: Return access and consensus mana of the node. -- `/mana/all`: Return whole mana map (mana perception of the node). -- `/mana/access/nhighest`: Return `n` highest access mana holder `nodeIDs` and their access mana values. -- `/mana/consensus/nhighest`: Return `n` highest consensus mana holder `nodeIDs` and their consensus mana values. -- `/mana/percentile`: Return the top percentile the node belongs to relative to the network. For example, if there are 100 nodes in the - network owning mana, and a node is the 13th richest, it means that is part of the top 13% of mana holders, but not the - top 12%. - -#### Metrics collection - -To study the mana module, following metrics could be gathered: - -- Amount of consensus and access mana present in the network. (amount varies because of `Base Mana 2`). -- Amount of mana each node holds. -- Number of (and amount of mana) a node was pledged with mana in the last `t` interval. -- Mana development of a particular node over time. -- Mana percentile development of a node over time. -- Average pledge amount of a node. (how much mana it receives on average with one pledge) -- Mean and median mana holdings of nodes in the network. Shows how even mana distribution is. -- Average mana of neighbors. - -#### Visualization - -Each node calculates mana locally, not only for themselves, but for all nodes in the network that it knows. As a result, -mana perception of nodes may not be exactly the same at all times (due to network delay, processing capabilities), but -should converge to the same state. A big question for visualization is which node's viewpoint to base mana visualization on? - -When running a node, operators will be shown the mana perception of their own node, but it also makes sense to -display the perception of high mana nodes as the global mana perception. First, let's look at how local mana perception -is visualized for a node: - -##### Local Perception - -There are two ways to visualize mana in GoShimmer: - -1. Node Local Dashboard -2. Grafana Dashboard - -While `Local Dashboard` gives flexibility in what and how to visualize, `Grafana Dashboard` is better at storing historic -data but can only visualize time series. Therefore, both of these ways will be utilized, depending on which suits the best. - -`Local Dashboard` visualization: - -- Histogram of mana distribution within the network. -- List of `n` richest mana nodes, ordered. -- Mana rank of node. - -`Grafana Dashboard` visualization: - -- Mana of a particular node with respect to time. -- Amount of mana in the network. -- Average pledge amount of a node. -- Mean and median mana holdings of nodes. -- Mana rank of the node over time. -- Average mana of neighbors. - -##### Global Perception - -Additionally, the GoShimmer Analyzer (analysis server) could be updated: - -- Autopeering node graph, where size of a node corresponds to its mana value. -- Some previously described metrics could be visualized here as well, to give the chance to people without - a node to take a look. As an input, a high mana node's perception should be used. diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/markers.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/markers.md deleted file mode 100644 index cf2f6ac2817..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/markers.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -description: Markers is a tool to efficiently estimate the approval weight of a block and that reduces the portion of the Tangle that needs to be traversed, and which finally results in the confirmation state. -image: /img/protocol_specification/example_1.png -keywords: - - approval weight - - marker - - block - - sequence - - future marker - - new marker - - part marker - - past cone ---- - -# Markers - -## Summary - -Operations that involve traversing the Tangle are very performance intensive and, thus, we need to minimize the amount of traversing to keep algorithms fast. Markers are a tool to infer structural knowledge about the Tangle without actually traversing it. - -:::info Note - -**Markers** are not a core module of the Coordicide project. - -::: - -## Motivation - -_Markers_ are a tool to infer knowledge about the structure of the Tangle, therefore, we use them to keep algorithms fast. Specifically, markers are used for: - -- past/future cone membership; -- approximate approval weight of any block; -- tagging sections of the Tangle (e.g., conflicts) without having to traverse each block individually. - -## Definitions - -Let's define the terms related to markers: - -- **Sequence:** A sequence is a chain of markers where each progressing marker contains all preceding markers of the sequence in its past cone. -- **Sequence Identifier (`SID`):** A Sequence Identifier is the unique identifier of a Sequence. -- **Marker Index (`MI`):** A Marker Index is the marker rank in the marker DAG. Throughout the code the marker rank will be called index. -- **marker:** A marker is a pair of numbers: `SID` and `MI` associated to a given block. Markers carrying the same `SID` belong to the same Sequence. -- **future marker (`FM`):** A future marker of a block is the first marker in its future cone from different sequences. -- **past marker (`PM`):** A past marker of a block is a marker in its past cone (can be multiple markers of distinct sequences). For a given sequence it is set to the newest past marker of its parents, that is the one that has the largest `MI`. The past marker of a marker is set to itself. - -## Design - -On a high level, markers provide structural knowledge of the Tangle and each individual block without the need to traverse (aka walking the Tangle). Markers are a form of meta-information (for each block) that each node locally creates when processing blocks. They can be seen as specific, uniquely tainted blocks that, taken together, again build a DAG within the Tangle. We can then utilize this marker DAG to determine structural details. - -![](https://i.imgur.com/3x7H68t.png) - -The above example shows a Tangle with the red blocks being markers in the same sequence (more details on sequences later). A marker is uniquely identified by `sequenceID,index`, where the index is ever-increasing. Any block can be "selected" as a marker if it fulfills a certain set of rules: - -- every n-th block (in the example, each block is tried to be set as a marker) -- latest marker of sequence is in its past cone. - -The markers build a chain/DAG and because of the rules it becomes clear that `marker 0,1` is in the past cone of `marker 0,5`. Since markers represent meta-information for the underlying blocks and each block keeps the latest marker in its past cone as _structural information_, we can infer that `block B` (`FM 0,2`) is in the past cone of `block I` (`PM 0,3`) Similarly, it is evident that `block D` is in the past cone of `block J`. - -### Sequences - -A sequence is a chain of markers where each progressing marker contains all preceding markers of the sequence in its past cone. However, this very definition entails a problem: what if there are certain parts of the Tangle that are disparate to each other. Assuming only a single sequence, this would mean that a certain part of the Tangle can't get any markers. In turn, certain operations within this part of the Tangle would involve walking. - -For this reason, we keep track of the _marker distance_, which signals the distance of blocks in the Tangle in a certain past cone where no marker could be assigned. If this distance gets too big, a new sequence is created as is shown in the example below (marker distance to spawn a new sequence = 3). - -![](https://i.imgur.com/Q44XZgk.png) - -The example above shows a side chain starting from `block L` to `block P` where it merges back with the "main Tangle". There can be no new marker assigned as none of the `blocks L-O` have the latest marker of `sequence 0` in their past cone. The marker distance grows and eventually a marker is created at `block N`. Following, a marker can be assigned to `block O` and `block P`. The latter is special because it combines two sequences. This is to be expected as disparate parts of the Tangle should be merged eventually. In case a block has markers from multiple sequences in its past cones the following rules apply: - -- Assign a marker in the highest sequence if possible. If not possible, try to assign a marker in the next lower sequence. -- The index is `max(marker1.Index,marker2.Index,...)` - -With these rules in mind, it becomes clear why `block P` has the `marker 1,6` and `block R` has `marker 1,7`. In case of `block Q`, no marker can be assigned to `sequence 1`, and, thus, a new marker in `sequence 0` is created. - -Always continuing the highest seqeuence should result in smaller sequences being discontinued once disparate parts of the Tangle merge and overall a relatively small number of sequences (optimally just one) is expected to be active at any given moment in time. - -### Sequence Graph - -The information that markers yield about past and future cone is only valid for any given sequence individually. However, to relate markers of separate sequences, we need to track dependencies between sequences. -Therefore, sequences build a graph between each other, where relationships between the sequences can be seen. - -Each sequence keeps track of **referenced sequences** and **referencing sequences** at a specific marker index so that bidirectional traversing into the future and past are possible from a sequence is possible. - -Specifically, in our example there are 3 bidirectional references between `sequence 0` and `sequence 1`. -Sequence 0: - -- `0,1`<->`1,2` -- `0,5`<->`1,6` -- `0,6`<->`1,7` - -Sequence 1: - -- `1,2`<->`0,1` -- `1,6`<->`0,5` -- `1,7`<->`0,6` - -![](https://i.imgur.com/EhbJohc.png) - -## Usage - -### Markers Application: Approval Weight Estimation - -To approximate the approval weight of a block, we simply retrieve the approval weight of its `FM` list. Since the block is in the past cone of its `FM`s, the approval weight and the finality will be at least the same as its `FM`s. This will of course be a lower bound (which is the “safe” bound), but if the markers are set frequently enough, it should be a good approximation. -In practice, we propagate the GoF finality to blocks in a marker's past cone until we reach another marker. - -For details of managing approval weight of each marker and approval weight calculation thereof please refer to [Approval Weight](consensus_mechanism.md#approval-weight-aw). - -### Conflict Mapping - -Conflicts are introduced to the Tangle when double spends occur and are carried forward (inherited) by blocks until a conflict is resolved (merge to master). As such, each block needs to carry conflict information and if a conflict arises deep within the Tangle, each block would need to be traversed individually, which makes this operation very expensive and thus attackable. - -Therefore, we utilize markers to store conflict information for blocks and store only a **difference** of conflicts (subtracted/added) on each block individually. In that way, propagation of conflicts can happen via structural marker information and not every block needs to be updated. When querying conflict information of a block, first all conflicts of the block's past markers are retrieved and then combined with the diff of the block itself to result in the block's overall conflict. diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/overview.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/overview.md deleted file mode 100644 index 7fd95fed302..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/overview.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: High-level description of the interaction between components of the currently implemented GoShimmer protocol. The protocol can be divided into three main elements. A P2P overlay network, an immutable data structure, and a consensus mechanism. -image: /img/protocol_specification/layers.png -keywords: - - network layer - - node - - block - - ledger state - - data flow - - past cone - - future cone - - timestamp - - opinion setting - - strong tip - - tip pool ---- - -# Components of the Protocol - -This section provides a high-level description of the interaction between components of the currently implemented GoShimmer protocol. The protocol can be divided into three main elements: a P2P overlay network, an immutable data structure, and a consensus mechanism. We abstract these three elements into layers, where—similarly to other architectures—upper layers build on the functionality provided by the layers below them. The definition of the different layers is merely about the convenience of creating a clear separation of concerns. - -[![Components of the Protocol](/img/protocol_specification/layers.png 'Components of the Protocol')](/img/protocol_specification/layers.png) - -## Network Layer - -The network is maintained by the network layer modules, which can be characterized as a pure P2P overlay network, meaning that it is a system that runs on top of another network (e.g., the internet), and where all nodes have the same roles and perform the same actions (in contrast to client-server systems). GoShimmer's Network Layer consists of three basic modules: the [peer discovery](autopeering.md#peer-discovery) module (which provides a list of nodes actively using the network), and the [neighbor selection](autopeering.md#neighbor-selection) module (also known as autopeering), which actually selects peers. Finally, the P2P Communication manages a node's neighbors, either selected via [autopeering](autopeering.md) or [manual peering](../../tutorials/manual_peering.md). - -## Communication Layer - -The communication layer concerns the information propagated through the network layer, which is contained in objects called blocks. This layer forms a DAG with blocks as vertices called the [Tangle](tangle.md): a replicated, shared and distributed data structure that emerges—through a combination of deterministic rules, cooperation, and virtual voting. -Since nodes have finite capabilities, the number of blocks that the network can process is limited. Thus, the network might become overloaded, either simply because of honest heavy usage or because of malicious (spam) attacks. To protect the network from halting or even from getting inconsistent, the rate control (currently a static PoW) and [congestion control](congestion_control.md) modules control when and how many blocks can be gossiped. - -## (Decentralized) Application Layer - -On top of the communication layer lives the application layer. Anybody can develop applications that run on this layer, and nodes can choose which applications to run. Of course, these applications can also be dependent on each other. -There are several core applications that must be run by all nodes, as the value transfer applications, which maintains the [ledger state](ledgerstate.md) (including advanced [output types](advanced_outputs.md)), and a quantity called [Mana](mana.md), that serves as a scarce resource as our Sybil protection mechanism. -Additionally, all nodes must run what we call the consensus applications, which regulate timestamps in the blocks and resolve conflicts. -The consensus mechanism implemented in GoShimmer is leaderless and consists out of multiple components: - -1. [Approval Weight](consensus_mechanism.md#approval-weight-aw) is an objective measure to determine the grade of finality of blocks and conflicts based on [active cMana](consensus_mechanism.md#Active-cMana). -2. The [Modular Conflict Selection Function](consensus_mechanism.md#modular-conflict-selection-function) is an abstraction on how a node sets an initial opinion on conflicts based on the . - -## Data Flow - Overview - -The diagram below represents the interaction between the different modules in the protocol ([event driven](../../implementation_design/event_driven_model.md)). Each blue box represents a component of the [Tangle codebase](https://github.com/iotaledger/goshimmer/tree/develop/packages/tangle), which has events (in yellow boxes) that belong to it. Those events will trigger methods (the green boxes), that can also trigger other methods. This triggering is represented by the arrows in the diagram. Finally, the purple boxes represent events that do not belong to the component that triggered them. - -As an example, take the Parser component. The function `ProcessGossipBlock` will trigger the method `Parse`, which is the only entry to the component. There are three possible outcomes to the `Parser`: triggering a `ParsingFailed` event, a `BlockRejected` event, or a `BlockParsed` event. In the last case, the event will trigger the `StoreBlock` method (which is the entry to the Storage component), whereas the first two events do not trigger any other component. - -[![Data Flow - Overview](/img/protocol_specification/data-flow.png 'Data Flow - Overview')](/img/protocol_specification/data-flow.png) - -We call this the data flow, i.e., the [life cycle of a block](../protocol.md), from block reception (meaning that we focus here on the point of view of a node receiving a block issued by another node) up until acceptance in the Tangle. Notice that any block, either created locally by the node or received from a neighbor needs to pass through the data flow. - -### Block Factory - -The IssuePayload function creates a valid payload which is provided to the `CreateBlock` method, along with a set of parents chosen with the Tip Selection Algorithm. Then, the Block Factory component is responsible to find a nonce compatible with the PoW requirements defined by the rate control module. Finally, the block is signed. Notice that the block generation should follow the rates imposed by the rate setter, as defined in [rate setting](congestion_control.md#rate-setting). - -### Parser - -The first step after the arrival of the block to the block inbox is the parsing, which consists of the following different filtering processes (meaning that the blocks that don't pass these steps will not be stored): - -**Bytes filter**: - -1. Recently Seen Bytes: it compares the incoming blocks with a pool of recently seen bytes to filter duplicates. -2. PoW check: it checks if the PoW requirements are met, currently set to the block hash starting with 22 zeroes. - -Followed by the bytes filters, the received bytes are parsed into a block and its corresponding payload and [syntactically validated](tangle.md#syntactical-validation). From now on, the filters operate on block objects rather than just bytes. - -**Block filter**: - -1. Signature check: it checks if the block signature is valid. -2. [Timestamp Difference Check for transactions](tangle.md#block-timestamp-vs-transaction-timestamp): it checks if the timestamps of the payload, and the block are consistent with each other - -### Storage - -Only blocks that pass the Parser are stored, along with their metadata. Additionally, new blocks are stored as children of their parents, i.e., a reverse mapping that enables us to walk the Tangle into the future cone of a block. - -### Solidifier - -[Solidification](tangle.md#Solidification) is the process of requesting missing blocks. In this step, the node checks if all the past cone of the block is known; in the case that the node realizes that a block in the past cone is missing, it sends a request to its neighbors asking for that missing block. This process is recursively repeated until all of a block's past cone up to the genesis (or snapshot) becomes known to the node. -This way, the protocol enables any node to retrieve the entire block history, even for nodes that have just joined the network. - -### Scheduler - -The scheduler makes sure that the network as a whole can operate with maximum throughput and minimum delays while providing consistency, fairness (according to aMana), and security. It, therefore, regulates the allowed influx of blocks to the network as a [congestion-control mechanism](congestion_control.md). - -### Booker - -After scheduling, the block goes to the booker. This step is different between blocks that contain a transaction payload and blocks that do not contain it. - -In the case of a non-transaction payload, booking into the Tangle occurs after the conflicting parents conflicts check, i.e., after checking if the parents' conflicts contain sets of (two or more) transactions that belong to the same conflict set. In the case of this check not being successful, the block is marked as `invalid` and not booked. - -In the case of a transaction as payload, initially, the following check is done: - -1. UTXO check: it checks if the inputs of the transaction were already booked. If the block does not pass this check, the block is not booked. If it passes the check, it goes to the next block of steps. -2. Balances check: it checks if the sum of the values of the generated outputs equals the sum of the values of the consumed inputs. If the block does not pass this check, the block is marked as `invalid` and not booked. If it passes the check, it goes to the next step. -3. Unlock conditions: checks if the unlock conditions are valid. If the block does not pass this check, the block is marked as `invalid` and not booked. If it passes the check, it goes to the next step. -4. Inputs' conflicts validity check: it checks if all the consumed inputs belong to a valid conflict. If the block does not pass this check, the block is marked as `invalid` and not booked. If it passes the check, it goes to the next step. - -After the objective checks, the following subjective checks are done: - -5. Inputs' conflicts rejection check: it checks if all the consumed inputs belong to a non-rejected conflict. Notice that this is not an objective check, so the node is susceptible (even if with a small probability) to have its opinion about rejected conflicts changed by a reorganization. For that reason, if the block does not pass this check, the block is booked into the Tangle and ledger state (even though the balances are not altered by this block, since it will be booked to a rejected conflict). This is what we call "lazy booking", which is done to avoid huge re-calculations in case of a reorganization of the ledger. If it passes the check, it goes to the next step. -6. Double spend check: it checks if any of the inputs is conflicting with a transaction that was already confirmed. As in the last step, this check is not objective and, thus, if the block does not pass this check, it is lazy booked into the Tangle and ledger state, into an invalid conflict. If it passes the check, it goes to the next step. - -At this point, the missing steps are the most computationally expensive: - -7. Inputs' conflicting conflicts check: it checks if the conflicts of the inputs are conflicting. As in the last step, if the block does not pass this check, the block is marked as `invalid` and not booked. If it passes the check, it goes to the next step. -8. Conflict check: it checks if the inputs are conflicting with an unconfirmed transaction. In this step, the conflict to which the block belongs is computed. In both cases (passing the check or not), the transaction is booked into the ledger state and the block is booked into the Tangle, but its conflict ID will be different depending on the outcome of the check. - -[![Booker](/img/protocol_specification/booker.png 'Booker')](/img/protocol_specification/booker.png) - -Finally, after a block is booked, it might become a [marker](markers.md) (depending on the marker policy) and can be gossiped. - -### Consensus Mechanism - -A detailed description can be found [here](consensus_mechanism.md). - -### Tip Manager - -The first check done in the tip manager is the eligibility check (i.e., subjective timestamp is ok), after passing it, a block is said to be `eligible` for tip selection (otherwise, it's `not eligible`). -If a block is eligible for [tip selection](tangle.md#tsa) and its payload is `liked`, along with all its weak past cone, the block is added to the strong tip pool and its parents are removed from the strong tip pool. If a block is eligible for tip selection, its payload is `liked` but its conflict is not liked it is added to the weak tip pool. diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/tangle.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/tangle.md deleted file mode 100644 index ee66e56c518..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/components/tangle.md +++ /dev/null @@ -1,520 +0,0 @@ ---- -description: The Tangle represents a growing partially-ordered set of blocks, linked with each other through cryptographic primitives, and replicated to all nodes in the peer-to-peer network. It enables the ledger state (i.e., the UTXO-DAG formed by transactions contained in blocks), and the possibility to store data. -image: /img/protocol_specification/tangle.png -keywords: - - block - - strong parents - - node - - transactions - - level of knowledge - - cone - - past - - future - - strong block - - weak block - - approval weight ---- - -# Tangle - -## Data Types - -| Name | Description | -| ------------ | -------------------------------------------------------------------------------------------------------------- | -| uint8 | An unsigned 8 bit integer encoded in Little Endian. | -| uint16 | An unsigned 16 bit integer encoded in Little Endian. | -| uint32 | An unsigned 32 bit integer encoded in Little Endian. | -| uint64 | An unsigned 64 bit integer encoded in Little Endian. | -| ByteArray[N] | A static size array of size N. | -| ByteArray | A dynamically sized array. A uint32 denotes its length. | -| string | A dynamically sized array of an UTF-8 encoded string. A uint16 denotes its length. | -| time | Unix time in nanoseconds stored as `int64`, i.e., the number of nanoseconds elapsed since January 1, 1970 UTC. | - -## Subschema Notation - -| Name | Description | -| :------------- | :-------------------------------------------------------- | -| oneOf | One of the listed subschemas. | -| optOneOf | Optionally one of the listed subschemas. | -| anyOf | Any (one or more) of the listed subschemas. | -| `between(x,y)` | Between (but including) x and y of the listed subschemas. | - -## Parameters - -- `MAX_MESSAGE_SIZE=64 KB` The maximum allowed block size. -- `MAX_PAYLOAD_SIZE=65157 B` The maximum allowed payload size. -- `MIN_STRONG_PARENTS=1` The minimum amount of strong parents a block needs to reference. -- `MAX_PARENTS=8` The maximum amount of parents a block can reference. - -## General Concept - -[![The Tangle](/img/protocol_specification/tangle.png)](/img/protocol_specification/tangle.png) - -The Tangle represents a growing partially-ordered set of blocks, linked with each other through cryptographic primitives, and replicated to all nodes in the peer-to-peer network. The Tangle enables the ledger state (i.e., the UTXO-DAG formed by transactions contained in blocks), and the possibility to store data. - -### Terminology - -- **Genesis**: The genesis block is used to bootstrap the Tangle and creates the entire token supply and no other tokens will ever be created. It is the first block and does not have parents. It is marked as solid, eligible and confirmed. -- **Past cone**: All blocks that are directly or indirectly referenced by a block are called its past cone. -- **Future cone**: All blocks that directly or indirectly reference a block are called its future cone. -- **Solidity**: A block is marked as solid if its entire past cone until the Genesis (or the latest snapshot) is known. -- **Parents**: A block directly references between 1-8 previous blocks that we call its **parents**. A parent can be either **strong** or **weak** (see [approval switch](#orphanage--approval-switch)). -- **Children**: Parents are approved by their referencing blocks called **children**. It is thus a reverse mapping of parents. As in the parents' definition, an child might be either **strong** or **weak**. -- **Conflict**: A version of the ledger that temporarily coexists with other versions, each spawned by conflicting transactions. - -## Blocks - -Blocks are created and signed by nodes. Next to several fields of metadata, they carry a **payload**. The maximum block size is `MAX_MESSAGE_SIZE`. - -### Block ID - -BLAKE2b-256 hash of the byte contents of the block. It should be used by the nodes to index the blocks and by external APIs. - -### Block structure - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Versionuint8The block version. The schema specified in this RFC is for version 1 only.
Parents blocks countuint8The amount of parents block preceding the current block.
Parents Blocks anyOf -
- Strong Parents Block -
- Defines a parents block containing strong parents references. -
- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Parent Typeuint8 - Set to value 0 to denote a Strong Parents Block. -
Parent Countuint8 - Set to number of parent references in this block. -
Reference between(1,8)ByteArray[32]Reference to a Block ID.
-
-
- Weak Parents Block -
- Defines a parents block containing weak parents references. -
- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Parent Typeuint8 - Set to value 1 to denote a Weak Parents Block. -
Parent Countuint8 - Set to number of parent references in this block. -
Reference between(1,8)ByteArray[32]Reference to a Block ID.
-
-
- Dislike Parents Block -
- Defines a parents block containing dislike parents references. -
- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Parent Typeuint8 - Set to value 2 to denote a Dislike Parents Block. -
Parent Countuint8 - Set to number of parent references in this block. -
Reference between(1,8)ByteArray[32]Reference to a Block ID.
-
-
- Like Parents Block -
- Defines a parents block containing like parents references. -
- - - - - - - - - - - - - - - - - - - - - -
NameTypeDescription
Parent Typeuint8 - Set to value 3 to denote a Like Parents Block. -
Parent Countuint8 - Set to number of parent references in this block. -
Reference between(1,8)ByteArray[32]Reference to a Block ID.
-
-
Issuer public key (Ed25519)ByteArray[32]The public key of the node issuing the block.
Issuing timetimeThe time the block was issued.
Sequence numberuint64The always increasing number of issued blocks of the issuing node.
Payload lengthuint32The length of the Payload. Since its type may be unknown to the node, it must be declared in advance. 0 length means no payload will be attached.
- Payload - -
- Generic Payload -
- An outline of a general payload -
- - - - - - - - - - - - - - - - -
NameTypeDescription
Payload Typeuint32 - The type of the payload. It will instruct the node how to parse the fields that follow. Types in the range of 0-127 are "core types", that all nodes are expected to know. -
Data FieldsANYA sequence of fields, where the structure depends on payload type.
-
-
Nonceuint64The nonce which lets this block fulfill the adaptive Proof-of-Work requirement.
Signature (Ed25519)ByteArray[64]Signature of the issuing node's private key signing the entire block bytes.
- -### Syntactical Validation - -Blocks that do no pass the Syntactical Validation are discarded. Only syntactically valid blocks continue in the data flow, i.e., pass to the Semantic Validation. - -A block is syntactically valid if: - -1. The block length does not exceed `MAX_MESSAGE_SIZE` bytes. -1. When the block parsing is complete, there are not any trailing bytes left that were not parsed. -1. Parents Blocks must be ordered by ASC type with no repetitions. -1. A Strong Parents Block must exist. -1. There must be at least 1 parent per block and no more than 8. -1. Parents in each Parents Block types must be ordered ASC without repetition. -1. Parents must be unique across Parents Blocks. But there may be repetitions across the Strong and Liked blocks. -1. Parents Block Count and Parents Count must match the actual number of blocks and parents respectively. - -### Semantic Validation - -Blocks that do not pass the Semantic Validation are discarded. Only semantically valid blocks continue in the data flow. - -A block is semantically valid if: - -1. The Block PoW Hash contains at least the number of leading 0 defined as required by the PoW. -2. The signature of the issuing node is valid. -3. It passes [parents age checks](#age-of-parents). - -#### Votes Validation - -1. Only one dislike parent is allowed per conflict set. -1. Only one like parent is allowed per conflict set. -1. Every dislike parent must be in the past cone of a strong parent. -1. For each like parent, only one dislike parent must exist pointing to a block containing a transaction within the same conflict set. -1. For every dislike parent and for every conflict set it belongs to, a like parent must also exist pointing to a block within the considered conflict set, provided that such transaction is not already present in the past cone of any strong parent. -1. For each referenced conflict set, from all parents types, for each referenced conflict set, must result in only a single transaction support. -1. Only one like or weak parent can be within the same conflict set. - -## Payloads - -Payloads can contain arbitrary data up to `MAX_PAYLOAD_SIZE`, which allows building additional protocols on top of the base protocol in the same way as TCP/IP allows to define additional protocols on top of its generic data segment. - -Payloads can recursively contain other payloads, which enables the creation of higher level protocols based on the same concepts of layers, as in traditional software and network architecture. - -Payloads other than transactions are always liked with level of knowledge 3. - -### User-defined Payloads - -A node can choose to interpret user-defined payloads by listenting to its specific **payload type** (possibly via third-party code/software). If a node does not know a certain **payload type**, it simply treats it as arbitrary data. - -### Core Payloads - -The core protocol defines a number of payloads that every node needs to interpret and process in order to participate in the network. - -- **Transactions:** Value transfers that constitute the ledger state. -- **Data:** Pure data payloads allow to send unsigned blocks. -- **dRNG:** Blocks that contain randomness or committee declarations. - -## Solidification - -Due to the asynchronicity of the network, we may receive blocks for which their past cone is not known yet. We refer to these blocks as unsolid blocks. It is not possible neither to approve nor to gossip unsolid blocks. The actions required to obtain such missing blocks is called solidification. -**Solidification** is the process of requesting missing referenced blocks. It may be recursively repeated until all of a block's past cone up to the genesis (or snapshot) becomes solid. - -In that way, the Tangle enables all nodes to retrieve all of a block's history, even the ones joining the network at a point later in time. - -### Definitions - -- **valid**: A block is considered valid if it passes the following filters from the solidifier and from the block booker: - - solidifier: it checks if parents are valid, - - booker: it checks if the contained transaction is valid. Notice that only blocks containing a transaction are required to perform this check. -- **parents age check**: A check that ensures the timestamps of parents and child are valid, following the details defined in the [Timestamp specification](#age-of-parents). -- **solid**: A block is solid if it passes parents age check and all its parents are stored in the storage, solid and valid. - -### Detailed Design - -During solidification, if a node is missing a referenced block, the corresponding block ID is stored in the `solidification buffer`. A node asks its neighbors for the missing block by sending a `solidification request` containing the block ID. Once the requested block is received from its neighbors, its block ID shall be removed from the `solidification buffer`. The requested block is marked as solid after it passes the standard solidification checks. If any of the checks fails, the block remains unsolid. - -If a block gets solid, it shall walk through the rest of the data flow, then propagate the solid status to its future cone by performing the solidification checks on each of the blocks in its future cone again. - -[![Block solidification specs](/img/protocol_specification/GoShimmer-flow-solidification_spec.png)](/img/protocol_specification/GoShimmer-flow-solidification_spec.png) - -## Orphanage & Approval Switch - -The Tangle builds approval of a given block by directly or indirectly attaching other blocks in its future cone. Due to different reasons, such as the TSA not picking up a given block before its timestamp is still _fresh_ or because its past cone has been rejected, a block can become orphan. This implies that the block cannot be included in the Tangle history since all the recent tips do not contain it in their past cone and thus, it cannot be retrieved during solidification. As a result, it might happen that honest blocks and transactions would need to be reissued or reattached. -To overcome this limitation, we propose the `approval switch`. The idea is to minimize honest blocks along with transactions getting orphaned, by assigning a different meaning to the parents of a block. - -### Detailed design - -Each block can express two levels of approval with respect to its parents: - -- **Strong**: it defines approval for both the referenced block along with its entire past cone. -- **Weak**: it defines approval for the referenced block but not for its past cone. - -Let's consider the following example: - -[![Detailed Design Example](/img/protocol_specification/detailed_desing.png 'Detailed Design Example')](/img/protocol_specification/detailed_desing.png) - -Block _D_ contains a transaction that has been rejected, thus, due to the monotonicity rule, its future cone must be orphaned. Both blocks _F_ (transaction) and _E_ (data) directly reference _D_ and, traditionally, they should not be considered for tip selection. However, by introducing the approval switch, these blocks can be picked up via a **weak** reference as blocks _G_ and _H_ show. - -We define two categories of eligible blocks: - -- **Strong block**: - - It is eligible - - Its payload is liked with level of knowledge >=2 - - Its conflict is **liked** with level of knowledge >= 2 -- **Weak block**: - - It is eligible - - Its payload is liked with level of knowledge >=2 - - Its conflict is **not liked** with level of knowledge >= 2 - -We call _strong child of x_ (or _strong child of x_) any strong block _y_ approving _x_ via a strong reference. Similarly, we call _weak child of x_ (or _weak child of x_) any strong block _y_ approving _x_ via a weak reference. - -### Tip Pool and Time Since Confirmation Check - -When a block is scheduled, it is gossiped to the node's neighbors and, normally, added to the local tip pool -except in the following situations: - -- A confirmed block shall not be added to the tip pool (it shall be skipped by the scheduler). -- A block that has confirmed or scheduled children shall not be added to the tip pool. - -Additionally, strong parents of a block are removed from the tip pool, when the block is added and unused tips are removed from the tip pool after a certain amount of time. - -When selecting tips from the tip pool an additional check is performed to make sure that the timestamp and the -past cone of a selected block is valid. For the selected tip, the algorithm needs to find a timestamp of the oldest parent of the oldest -unconfirmed block in the past cone of the tip (`TS_oum`). If the difference between current Confirmed Tangle Time `now` and the -timestamp of the oldest confirmed block is greater than a certain threshold (`now - TS_oum > TSC_threshold`), then -the tip cannot be selected and another one needs to be found. The tip stays in the tip pool until it is -automatically removed because of its age. - -The Time Since Confirmation check solves the mention problem of [false positive schedule](congestion_control.md#false-positive-schedule) -by eventually orphaning blocks that were dropped by the network. - -### Conflict Management - -A block inherits the conflict of its strong parents, while it does not inherit the conflict of its weak parents. - -#### Approval Weight - -The approval weight of a given block takes into account all of its future cone built over all its strong children. -Let's consider the following example: - -[![Approval Weight](/img/protocol_specification/approval_weight_example.png 'Approval Weight')](/img/protocol_specification/approval_weight_example.png) - -_E_ is a weak block strongly approving _B_ and _D_. When considering the approval weight of _B_, only the strong children of its future cone are used, thus, _D, E, F_. Note that, the approval weight of _E_ would instead be built over _G, H, I_. Therefore, its approval weight does not add up to its own weight (for instance, when looking at the approval weight of _B_). - -### Solidification - -The solidification process does not change, both parent types are used to progress. - -### Test cases - -- block _x_ strongly approves a strong block _y_: ok -- block _x_ weakly approves a strong block _y_: it's weird, counts for approval weight of _y_ but does not affect the tip status of _y_ -- block _x_ strongly approves a weak block _y_: _x_ becomes a weak block -- block _x_ weakly approves a weak block _y_: ok - -## Finality - -Users need to know whether their information will not be orphaned. However, finality is inherently probabilistic. For instance, consider the following scenario: an attacker can trivially maintain a chain of blocks that do not approve any other block. At any given point in time, it is possible that all blocks will be orphaned except this chain. This is incredibly unlikely, but yet still possible. - -Therefore, we introduce [Approval Weight](consensus_mechanism.md#approval-weight-aw) to measure the finality of any given block. Similarly to Bitcoin's 6 block rule, AW describes how deeply buried a block in the Tangle is. If a block reaches >50% of active consensus mana approving it, i.e., its future cone contains blocks of nodes that together assert >50% of active consensus mana, it as finalized and, thus, confirmed. Specifically, in GoShimmer we use [markers](markers.md) to optimize AW calculations and approximate AW instead of tracking it for each block individually. - -## Timestamps - -In order to enable snapshotting based on time constraints rather than special blocks in the Tangle (e.g. checkpoints), nodes need to share the same perception of time. Specifically, they need to have consensus on the _age of blocks_. This is one of the reasons that blocks must contain a field `timestamp` which represents the creation time of the block and is signed by the issuing node. - -Having consensus on the creation time of blocks enables not only total ordering but also new applications that require certain guarantees regarding time. Specifically, we use block timestamps to enforce timestamps in transactions, which may also be used in computing the Mana associated to a particular node ID. - -### Clock Synchronization - -Nodes need to share a reasonably similar perception of time in order to effectively judge the accuracy of timestamps. Therefore, we propose that nodes synchronize their clock on startup and resynchronize periodically every `30min` to counter [drift](https://en.wikipedia.org/wiki/Clock_drift) of local clocks. Instead of changing a nodes' system clock, we introduce an `offset` parameter to adjust for differences between _network time_ and local time of a node. Initially, the [Network Time Protocol (NTP)](https://en.wikipedia.org/wiki/Network_Time_Protocol) ([Go implementation](https://github.com/beevik/ntp)) is used to achieve this task. - -### General Timestamp Rules - -Every block contains a timestamp, which is signed by the issuing node. Thus, the timestamp itself is objective and immutable. Furthermore, transactions also contain a timestamp, which is also signed by the sender of the transaction (user) and thus immutable. We first discuss the rules regarding block timestamps. - -In order for a block to be eligible for tip selection, the timestamp of every block in its past cone (both weak and strong) must satisfy certain requirements. These requirements fall into two categories: objective and subjective. The objective criteria only depend on information written directly in the Tangle and are applied immediately upon solidification. Thus, all nodes immediately have consensus on the objective criteria. In this section, we will discuss these objective criteria. - -The quality of the timestamp is a subjective criterion since it is based on the solidification time of the block. Thus, nodes must use a consensus algorithm, to decide which blocks should be rejected based on subjective criteria. However, currently this feature is not yet implemented in GoShimmer, and we assume all timestamps to be good. - -### Age of parents - -It is problematic when incoming blocks reference extremely old blocks. If any new block may reference any block in the Tangle, then a node will need to keep all blocks readily available, precluding snapshotting. For this reason, we require that the difference between the timestamp of a block, and the timestamp of its parents must be at most `30min`. Additionally, we require that timestamps are monotonic, i.e., parents must have a timestamp smaller than their children's timestamps. - -### Block timestamp vs transaction timestamp - -Transactions contain a timestamp that is signed by the user when creating the transaction. It is thus different from the timestamp in the block which is created and signed by the node. We require - -```go -transaction.timestamp+TW >= block.timestamp >= transaction.timestamp -``` - -where `TW` defines the maximum allowed difference between both timestamps, currently set to `10min`. - -If a node receives a transaction from a user with an invalid timestamp it does not create a block but discards the transaction with a corresponding error block to the user. To prevent a user's local clock differences causing issues the node should offer an API endpoint to retrieve its `SyncedTime` according to the network time. - -### Reattachments - -Reattachments of a transaction are possible during the time window `TW`. Specifically, a transaction may be reattached in a new block as long as the condition `block.timestamp-TW >= transaction.timestamp` is fulfilled. If for some reason a transaction is not _picked up_ (even after reattachment) and thus being orphaned, the user needs to create a new transaction with a current timestamp. - -### Age of UTXO - -Inputs to a transaction (unspent outputs) inherit their spent time from the transaction timestamp. Similarly, unspent outputs inherit their creation time from the transaction timestamp as well. For a transaction to be considered valid we require - -```go -transaction.timestamp >= inputs.timestamp -``` - -In other words, all inputs to a transaction need to have a smaller or equal timestamp than the transaction. In turn, all created unspent outputs will have a greater or equal timestamp than all inputs. - -## Tangle Time - -For a variety of reasons, a node needs to be able to determine if it is in sync with the rest of the network, including the following: - -- to signal to clients that its perception is healthy, -- to know when to issue blocks (nodes out of sync should not issue blocks, lest they are added to the wrong part of the Tangle), -- to schedule blocks at the correct rate: out of sync nodes should schedule faster in order to catch up with the network. - -Every DLT is a clock, or more specifically a network of synchronized clocks. This clock has a natural correspondence with "real time". If the DLT clock differs significantly from local time, then we can conclude that our DLT clock is off from all the other clocks, and thus the node is out of sync. - -Tangle time is the timestamp of the last confirmed block. It cannot be attacked without controlling enough mana to accept incorrect timestamps, making it a reliable, attack-resistant quantity. - -Typically speaking, `CurrentTime - TangleTime` is, on average, the approximate confirmation time of blocks. Thus, if this difference is too far off, then we can conclude that we do not know which blocks are confirmed and thus we are out of sync. In this spirit, we are able to define the following function. - -```go -func Synced() bool { - if CurrentTime - TangleTime <= SYNC_THRESHOLD { - return true - } - - return false -} -``` - -The following figure displays the Tangle Time visually: -[![Tangle Time](/img/protocol_specification/tangle_time.jpg 'Tangle Time')](/img/protocol_specification/tangle_time.jpg) diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/glossary.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/glossary.md deleted file mode 100644 index 8c7573c4ca7..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/glossary.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -description: Protocol specification glossary. -image: /img/logo/goshimmer_light.png -keywords: - - transactions - - node - - tangle - - weak parents - - past cone - - approval switch - - tip selection - - mana distribution - - value transfer application ---- - -# Glossary - ---- - -## Application Layer - -The IOTA Protocol allows for a host of applications to run on the block tangle. Anybody can design an application, and users can decide which applications to run on their nodes. These applications will all use the communication layer to broadcast and store data. - -### Core Applications - -Applications that are necessary for the protocol to operate. These include for example: - -- The value transfer application -- The distributed random number generator (DRNG for short) -- The consensus mechanism, more specifically the Approval Weight manager - -### Faucet - -A test application issuing funds on request. - -### Value Transfer Application - -The application which maintains the ledger state. - ---- - -## Communication Layer - -This layer stores and communicates information. This layer contains the “distributed ledger” or the tangle. The rate control and timestamps are in this layer too. - -### Mana - -The reputation of a node is based on a virtual token called mana. This reputation, working as a Sybil protection mechanism, is important for issuing more transactions (see Module 3) and having a higher influence during the voting process (see Module 5). - -#### Slot - -A time interval that is used for a certain type of consensus mana. At the end of each slot a snapshot of the state of mana distribution in the network is taken. Since this tool employs the timestamp of blocks every node can reach consensus on an slots's mana distribution eventually. - -### Block - -The object that is gossiped between neighbors. All gossiped information is included in a block. The most basic unit of information of the IOTA Protocol. Each block has a type and size and contains data. - -### Block Overhead - -The additional information (metadata) that needs to be sent along with the actual information (data). This can contain signatures, voting, heartbeat signals, and anything that is transmitted over the network but is not the transaction itself. - -### Parent - -A block approved by another block is called a parent to the latter. A parent can be selected as strong or weak parent. If the past cone of the parent is liked the parent is set as strong parent. If the block is liked but its past cone is disliked it is set as a weak parent. This mechanism is called approval switch. - -### Payload - -A field in a block which determines the type. Examples are: - -- Value payload (type TransactionType) -- dRNG payload -- Salt declaration payload -- Generic data payload - ---- - -### Transaction - -A block with payload of type TransactionType. It contains the information of a transfer of funds. - -#### Finality - -The property that once a transaction is completed there is no way to revert or alter it. This is the moment when the parties involved in a transfer can consider the deal done. Finality can be deterministic or probabilistic. - -#### History - -The list of transactions directly or indirectly approved by a given transaction. - -#### Orphan - -A transaction (or block) that is not referenced by any succeeding transaction (or block). An orphan is not considered confirmed and will not be part of the consensus. - -#### Reattachment - -Resending a transaction by redoing tip selection and referencing newer tips by redoing PoW. - -#### Solidification Time - -The solidification time is the point at which the entire history of a transaction has been received by a node. - -#### UTXO - -Unspent transaction output. - ---- - -### Tip Selection - -The process of selecting previous blocks to be referenced by a new block. These references are where a block attaches to the existing data structure. IOTA only enforces that a block approves (at least) two other blocks, but the tip selection strategy is left up to the user (with a good default provided by IOTA). - -#### Approval Switch - -When selecting a block as a parent, we can select from the strong or weak tip pool. This mechanism is called approval switch. - -#### Approval Weight - -A block gains mana weight, by blocks approving it directly or indirectly. However, only strong parents can propagate the mana weight to the past, while weak parents obtain the weight from its weak children but don't propagate it. - -#### Local Modifiers - -Custom conditions that nodes can take into account during tip selection. In IOTA, nodes do not necessarily have the same view of the Tangle; various kinds of information only locally available to them can be used to strengthen security. - -#### Tip - -A block that has not yet been approved. - ---- - -## Consensus - -Agreement on a specific datum or value in distributed multi-agent systems, in the presence of faulty processes. - -### Blockchain Bottleneck - -As more transactions are issued, the block rate and size become a bottleneck in the system. It can no longer include all incoming transactions promptly. Attempts to speed up block rates will introduce more orphan blocks (blocks being left behind) and reduce the security of the blockchain. - -### Mining Races - -In PoW-based DLTs, competition between nodes to obtain mining rewards and transaction fees are known as mining races. These are undesirable as they favor more powerful nodes, especially those with highly optimized hardware like ASICs. As such, -they block participation by regular or IoT hardware and are harmful for the environment. - -### Nakamoto Consensus - -Named after the originator of Bitcoin, Satoshi Nakamoto, Nakamoto consensus describes the replacement of voting/communication between known agents with a cryptographic puzzle (Proof-of-Work). Completing the puzzle determines which agent is the next to act. - -### Proof of Work - -Data which is difficult (costly, time-consuming) to produce but easy for others to verify. - ---- - -## Coordinator - -A trusted entity that issues milestones to guarantee finality and protect the Tangle against attacks. - -### Milestones - -Milestones are transactions signed and issued by the Coordinator. Their main goal is to help the Tangle to grow healthily and to guarantee finality. When milestones directly or indirectly approve a transaction in the Tangle, nodes mark the state of that transaction and its entire history as confirmed. - ---- - -## Markers - -A tool that exists only locally and allows performing certain calculations more efficiently. Such as approval weight calculation or the existence of certain blocks in the past or future cone of another block. - -## Network Layer - -This layer manages the lower layers of internet communication like TCP. It is the most technical, and in some ways the least interesting. In this layer, the connections between nodes are managed by the autopeering and peer discovery modules and the gossip protocol. - -### Eclipse Attack - -A cyber-attack that aims to isolate and attack a specific user, rather than the whole network. - -### Neighbors - -Network nodes that are directly connected and can exchange blocks without intermediate nodes. - -### Node - -A machine which is part of the IOTA network. Its role is to issue new transactions and to validate existing ones. - -### Peering - -The procedure of discovering and connecting to other network nodes. - -### Small World Network - -A network in which most nodes can be reached from every other node by a few intermediate steps. - -### Splitting Attack - -An attack in which a malicious node attempts to split the Tangle into two conflicts. As one of the conflicts grows, the attacker publishes transactions on the other conflict to keep both alive. Splitting attacks attempt to slow down the consensus process or conduct a double spend. - -### Sybil Attack - -An attempt to gain control over a peer-to-peer network by forging multiple fake identities. - ---- - -## Tangle - -An append only block data structure where each block references (at least) two other blocks. - -### Subtangle - -A consistent section of the Tangle (i.e. a subset of blocks), such that each included block also includes its referenced blocks. - ---- diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/overview.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/overview.md deleted file mode 100644 index 84531e679f8..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/overview.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -description: High-level overview of the protocol, and its implemented components. -image: /img/protocol_specification/Protocol_overview_received_block.png -keywords: - - overview - - protocol - - high-level ---- - -# Protocol specification - -:::warning DISCLAIMER - -The following sections describe how things are implemented in GoShimmer. They might not reflect the final IOTA 2.0 specification or implementation. - -::: - -In this chapter we provide an overview of the various protocol components. - -We start with a [high level overview](protocol.md) of the protocol, followed by sections explaining the various implemented components. - -Note, this chapter is still work in progress. diff --git a/docs/maintain/goshimmer/0.9/docs/protocol_specification/protocol.md b/docs/maintain/goshimmer/0.9/docs/protocol_specification/protocol.md deleted file mode 100644 index 7de3fac3a08..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/protocol_specification/protocol.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -description: A high-level overview of the protocol, following the natural life cycle of a block from the Tip Selection module to being considered valid permanently by all nodes. -image: /img/protocol_specification/Protocol_overview_received_block.png -keywords: - - block - - node - - honest node - - rate of issuance - - congestion control - - mana - - access - - rate setter - - consensus ---- - -# Protocol High-level Overview - -To orientate the reader, we provide a high-level overview of the protocol, following the natural life cycle of a block. The first module used—while the block is still being created—, is the **Tip Selection** module. -Here, the node must choose a certain number (from two to eight) of other blocks to reference, meaning that the newly created block will be cryptographically attached to these referenced blocks. -An honest node must always choose tips uniformly at random from a tip pool, i.e., from a set of still unreferenced blocks that satisfy a certain set of conditions, as discussed on the [Tangle](components/tangle.md) component. -In the diagram below, the issuance process being described now is represented in the context of the complete protocol. - -Each node in the network has limited bandwidth, CPU, and memory. In order to avoid any node from being overloaded, the right to write in everybody else's Tangle is regulated by the **Rate and Congestion Control Modules**. -The first one dictates the maximum rate of issuance of blocks by the introduction of a small amount of proof of work. -However, if an attacker accelerates the block issuance—effectively flooding the network—, the difficulty of the proof of work for that node will increase exponentially. Eventually, the attacker will be incapable of issuing new blocks. - -Later, the Congestion control module fairly allocates the network resources accordingly to a quantity called **Access Mana**, which acts as a [Sybil protection](https://en.wikipedia.org/wiki/Sybil_attack) mechanism. -We can summarize Access Mana as a scarce resource, that makes an effective Sybil protection mechanism. Thus, each node has the right to issue blocks at a rate proportional to their Access Mana. -This fair rate is not constant (since the utilization of the network may fluctuate), and to correctly set its own individual rate of issuance of blocks, each node uses a mechanism called the **Rate Setter**, that makes the average issuance rate of the node converge to the fair rate guaranteed by Access Mana. -Nodes that do not use the rate Setter will be punished by either the Rate Control or the Congestion Control Module. - -Between the Rate Setter and the actual gossip of the block, several steps will take place, but—for the sake of clearness—we ignore these steps for now and return to this subject later. -Then, assuming that the block was properly created, it will be propagated to the rest of the network. -Since we deal with a large number of nodes, the communication graph cannot be [complete](https://en.wikipedia.org/wiki/Complete_graph). -Thus, the [network topology](https://en.wikipedia.org/wiki/Network_topology) will be dictated by the [**Neighbor Selection**](components/autopeering.md) (aka Autopeering) module. - -[![Protocol Overview Received Block](/img/protocol_specification/Protocol_overview_received_block.png 'Protocol Overview Received Block')](/img/protocol_specification/Protocol_overview_received_block.png) - -We turn our attention now to another point of view: the one of the nodes receiving new blocks, represented in the diagram above. -After receiving a block, the node will perform several **syntactical verifications**, that will act as a filter to the blocks. Additionally, the block has to be **solidified**, meaning that the node must know all the past cone of the block (i.e., the set of all blocks directly or indirectly referenced by the block in question). -After this step, the node places all the blocks left into an inbox. At a fixed global rate (meaning that all nodes use the same rate), the node uses a **scheduler** to choose a block from the inbox. -This scheduler—that, technically, is part of the aforementioned congestion control mechanism—works as a gatekeeper, effectively regulating the use of the most scarce resources of the nodes. -Since the scheduler works at a fixed rate, the network cannot be overwhelmed. The scheduler is designed to ensure—even in the presence of attackers—the following properties: - -1. **Consistency**: all honest nodes will schedule the same blocks -2. **Fair access**: the nodes' blocks will be scheduled at a fair rate according to their Access Mana -3. **Bounded latency**: the processing time of all blocks will be bounded - -Only after passing the scheduler the blocks can be written into the local Tangle. To do that, the nodes perform most of the **semantic validation**, such as the search for irreconcilable conflicts in the block's past cone or (in the case of value transfers) unlock condition checks. -At this point (if the block passes these checks), the block will be **booked** into the **local Tangle** of the node and be gossiped to the rest of the network. -Additionally, in the case of a value transfer, the **ledger state** and two vectors called Access Mana Vector and **Consensus Mana** Vector are updated accordingly. -The Consensus Mana is another Sybil protection mechanism which—since it is applied to different modules than Access Mana—has the need of a different calculation. - -[![Protocol Overview Booking](/img/protocol_specification/Protocol_overview_booking.png 'Protocol Overview Booking')](/img/protocol_specification/Protocol_overview_booking.png) - -After having the block booked, the node is free to **gossip** it, but a crucial step of the protocol is still missing: the **Opinion Setter** and the voting protocol, that deal with the most subjective parts of the consensus mechanism (notice that, until now, the protocol has mostly dealt with objective checks). -The voting protocol used here is the FPC (or **Fast Probabilistic Consensus**), which is a binary voting protocol that allows a large group of nodes to come to a consensus on the value of a single bit. -The FPC begins with each node having an initial opinion, set using the node's local time perception and ordering of the blocks. The nodes must set opinions about two subjects: - -1. **The legitimacy of the timestamp of the block**: Whenever a node issues a block, it adds a timestamp to it, which should represent the local time of issuance (as seen by the issuer node). The other nodes will judge if this timestamp is reasonable, by checking if it is too far away from their own local clock. -2. In the case of a value transfer, **whether it is a conflict**: We use the [**FCoB Rule**](components/consensus_mechanism.md#fcob). Roughly, the node will have a positive opinion about a transaction A if and only if all its conflicts arrived later than a certain time interval after A's arrival. - -In each round, nodes randomly choose other nodes to query about their opinions about one of the subjects above. -The querying node changes its own opinion if the number of responses with a different opinion than it is greater than a certain threshold. -In order to prevent liveness attacks, this threshold is determined by a random number issued by a committee of high Consensus Mana nodes via the **dRNG** application. -Without the random threshold, an attacker could lie about its responses in a way that could prevent the protocol from terminating. Finally, a node will finalize on a certain opinion after holding it for a certain number of rounds. - -When selecting which other nodes to query, a node must weight the list of all nodes by Consensus Mana. -Thus, high Consensus Mana nodes are queried more often than low Consensus Mana nodes. This makes it difficult for an attacker to manipulate the outcome of the voting. -Unless the attacker controls more than 1/3 of the Consensus Mana in the system, with high probability, we know that FPC has the following properties: - -1. **Termination**: every honest node will finalize on some opinion before a maximum round number. -2. **Agreement**: all honest nodes will finalize on the same opinion. -3. **Integrity**: if a super majority of nodes—e.g. more than 90% weighted by Consensus Mana—, have the same initial opinion, then FPC will terminate with that value. - -[![Protocol Overview Consensus](/img/protocol_specification/Protocol_overview_consensus.png 'Protocol Overview Consensus')](/img/protocol_specification/Protocol_overview_consensus.png) - -Analogously to Bitcoin's [six blocks rule](https://en.bitcoin.it/wiki/Confirmation), our protocol has certain measures of the probability of a certain block being considered valid permanently by all nodes. -This is achieved by the use of the [**Approval Weight**](components/consensus_mechanism.md#approval-weight-aw). -The Approval weight represents the _weight_ of conflicts (and blocks). -Different to the classical Nakamoto consensus, instead of selecting a leader based on a puzzle (PoW) or stake (PoS), it allows every node to express its opinion by simply issuing any block and attaching it in a part of the Tangle it _likes_ based on a `Like switch`. Through the like switch, a block can even reference blocks of conflicts it doesn't like (thereby reduce orphanage), by explicitly expressing which ones it likes instead. -This process is known as On-Tangle-Voting. diff --git a/docs/maintain/goshimmer/0.9/docs/teamresources/analysis_dashboard.md b/docs/maintain/goshimmer/0.9/docs/teamresources/analysis_dashboard.md deleted file mode 100644 index 6a059613209..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/teamresources/analysis_dashboard.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -description: How to use the Dashboard in dev mode and set up hot loading and packaging. -image: /img/logo/goshimmer_light.png -keywords: - - port config - - pkger - - webpack - - build - - change - - npm - - yarn ---- - -# GoShimmer Analysis Dashboard - -Programmed using modern web technologies. - -### Dashboard in Dev Mode - -1. Make sure to set `analysis.dashboard.dev` to true, to enable GoShimmer to serve assets - from the webpack-dev-server. -2. Install all needed npm modules via `yarn install`. -3. Run a webpack-dev-server instance by running `yarn start` within the `frontend` directory. -4. Using default port config, you should now be able to access the analysis dashboard under http://127.0.0.1:8000 - -The Analysis Dashboard is hot-reload enabled. - -### Pack Your Changes - -We are using [pkger](https://github.com/markbates/pkger) to wrap all built frontend files into Go files. - -1. [Install `pkger`](https://github.com/markbates/pkger) if not already done. -2. Check that the correct webpack-cli (version v3.3.11) is installed: - - 2.1 `yarn webpack-cli --version` - - 2.2 If a newer version is installed use `yarn remove webpack-cli` and `yarn add webpack-cli@3.3.11` - -3. Build Analysis Dashboard by running `yarn build` within the `frontend` directory. -4. Navigate to the root of the repo. -5. Run `pkger` in the root of the repo. -6. `pkged.go` should have been modified. -7. Done. Now you can build GoShimmer and your Analysis Dashboard changes will be included within the binary. - -The above steps can also be done by running the `scripts/pkger.sh` script from the root folder. diff --git a/docs/maintain/goshimmer/0.9/docs/teamresources/guidelines.md b/docs/maintain/goshimmer/0.9/docs/teamresources/guidelines.md deleted file mode 100644 index 31c16857c61..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/teamresources/guidelines.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -description: Code guidelines on how to contribute to the GoShimmer project. -image: /img/logo/goshimmer_light.png -keywords: - - error - - function call - - stack trace - - assign error - - explicit constant - - sentinel error - - 3rd party libs ---- - -# Code Guidelines - -## General Guidelines - -- Don’t use `log.Fatal()` or `os.Exit()` outside of the main. It immediately terminates the program and all defers are ignored and no graceful shutdown is possible. It can lead to inconsistencies. Propagate the error up to the main and let the main function exit instead. Avoid panics as well, almost always use errors. [Example](https://github.com/iotaledger/goshimmer/blob/f75ce47eeaa3bf930b368754ac24b72f768a5964/plugins/autopeering/autopeering.go#L135). -- Don’t duplicate code, reuse it. In tests too. Example: [duplicate1](https://github.com/iotaledger/goshimmer/blob/f75ce47eeaa3bf930b368754ac24b72f768a5964/packages/ledgerstate/conflict_dag.go#L969) and [duplicate2](https://github.com/iotaledger/goshimmer/blob/f75ce47eeaa3bf930b368754ac24b72f768a5964/packages/ledgerstate/conflict_dag.go#L1053) -- Unhandled errors can cause bugs and make it harder to diagnose problems. Try to handle all errors: propagate them to the caller or log them. Even if the function call is used with a defer, and it’s inconvenient to handle the error it returns, still handle it. Wrap the function call in an anonymous function assign error to the upper error like that: - -```go - defer func() { - cerr := f.Close() - if err == nil { - err = errors.Wrap(cerr, "failed to close file") - } - }() -``` - -- Wrap errors with `errors.Wrap()` when returning them to the caller. It adds the stack trace and a custom block to the error. Without that information investigating an issue is very hard. -- Use `errors.Is()` instead of direct errors comparison. This function unwraps errors recursively. [Example](https://github.com/iotaledger/goshimmer/pull/1113/files#diff-05fdc081489a8d5a61224d812f9bbd7bc77edf9769ed00d95ea024d2a44a699aL62). -- Propagate `ctx` and use APIs that accept `ctx`, start exposing APIs that accept `ctx`. Context is a native way for timeouts/cancellation in Go. It allows writing more resilient and fault tolerant code. [Example](https://github.com/iotaledger/goshimmer/pull/1113/files#diff-f2820ed0d3d4d9ea05b78b1dd3978dbcf9401c8caaa8cc40cc1c0342a55379fcL35). -- Don’t shadow builtin functions like copy, len, new etc. [Example](https://github.com/iotaledger/goshimmer/pull/1113/files#diff-f07268750a44da26386469c1b1e93574a678c3d418fce9e1f186d5f1991a92eaL14). -- Don’t shadow imported packages. [Example](https://github.com/iotaledger/goshimmer/blob/f75ce47eeaa3bf930b368754ac24b72f768a5964/plugins/webapi/value/sendtransactionbyjson.go#L172). -- Don’t do `[:]` on a slice. It has no effect. [Example](https://github.com/iotaledger/goshimmer/pull/1113/files#diff-299a1ac5fa09739ea07b7c806ee2785d83eea110f8af143dbc853a25e4819116L133). -- Avoid naked returns if the function isn’t very small. It makes the code more readable. -- Define explicit constants for strings that are used 3 times or more. It makes the code more maintainable. -- Define explicit constants for all numbers. It makes the code more readable. -- Don’t write really long and complex functions. Split them into smaller ones. -- Treat comments as regular text/documentation. Start with a capital letter, set space after `//` and end them with a dot. It’s a good habit since Go package docs are generated automatically from the comments and displayed on the godoc site. - -## Error Handling - -We use the new error wrapping API and behavior introduced with Go 1.13 but we use the "github.com/pkg/errors" drop-in replacement which follows the Go 2 design draft and which enables us to have a stack trace for every "wrapping" of the error. - -Errors should always be wrapped and annotated with an additional block at each step. The following example shows how errors are wrapped and turned into the corresponding sentinel errors. - -```go -package example - -import ( - "fmt" - "3rdPartyLibrary" - - "github.com/pkg/errors" -) - -// define error variables to make errors identifiable (sentinel errors) -var ErrSentinel = errors.New("identifiable error") - -// turn anonymous 3rd party errors into identifiable ones -func SentinelErrFrom3rdParty() (result interface{}, err error) - if result, err = 3rdPartyLibrary.DoSomething(); err != nil { - err = errors.WithMessagef(ErrSentinel, "failed to do something (%s)", err.Error()) - return - } - - return -} - -// wrap recursive errors at each step -func WrappedErrFromInternalCall() error { - _, err := SentinelErrFrom3rdParty() - return errors.Wrap(err, "wrapped internal error") -} - -// create "new" identifiable internal errors that are not originating in 3rd party libs -func ErrFromInternalCall() error { - return errors.WithMessage(ErrSentinel, "internal error") -} - -// main function -func main() { - err1 := WrappedErrFromInternalCall() - if errors.Is(err1, ErrSentinel) { - fmt.Printf("%v\n", err1) - } - - err2 := ErrFromInternalCall() - if errors.Is(err2 , ErrSentinel) { - fmt.Printf("%v\n", err2 ) - } -} -``` diff --git a/docs/maintain/goshimmer/0.9/docs/teamresources/local_development.md b/docs/maintain/goshimmer/0.9/docs/teamresources/local_development.md deleted file mode 100644 index f59abcbdd34..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/teamresources/local_development.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -description: How to run and use golangci-lint to lint your code. How to build an image with the buildkit docker engine. -image: /img/logo/goshimmer_light.png -keywords: - - port config - - golang - - lint - - error handling - - golangci-lint - - docker - - buildkit - - image - - configuration json ---- - -# golangci-lint - -## Overview - -We use golangci-lint v1.38.0 to run various types of linters on our codebase. All settings are stored in the `.golangci.yml` file. -golangci-lint is very flexible and customizable. Check the docs to see how configuration works https://golangci-lint.run/usage/configuration/ - -## How to Run - -1. Install the golangci-lint program https://golangci-lint.run/usage/install/ -2. In the project root: `golangci-lint run` - -## Dealing With Errors - -Most of the errors that golangci-lint reports are errors from formatting linters like `gofmt`, `goimports` and etc. You can easily auto-fix them with: - -```shell -golangci-lint run --fix -``` - -Here is the full list of linters that support the auto-fix feature: `gofmt`, `gofumpt`, `goimports`, `misspell`, `whitespace`. - -In case it's not a formatting error, do your best to fix it first. If you think it's a false alarm there are a few ways how to disable that check in golangci-lint: - -- Exclude the check by the error text regexp. Example: `'Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked'`. -- Exclude the entire linter for that file type. Example: don't run `errcheck` in Go test files. -- Change linter settings to make it more relaxed. -- Disable that particular error occurrence: use a comment with a special `nolint` directive next to the place in code with the error. Example: `// nolint: errcheck`. - -# Docker - -## Building an Image - -We use the new buildkit docker engine to build `iotaledger/goshimmer` image. -The minimum required docker version that supports this feature is `18.09`. -To enable buildkit engine in your local docker add the following to the docker configuration json file: - -```json -{ "features": { "buildkit": true } } -``` - -Check this [article](https://docs.docker.com/develop/develop-images/build_enhancements/#to-enable-buildkit-builds) for details on how to do that. - -### Troubleshooting - -If you already enabled the buildkit engine in the configuration json file as described above and docker version is `18.09` or higher, -try to set the following env variables when building the docker image: - -```shell -DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker build -t iotaledger/goshimmer . -``` diff --git a/docs/maintain/goshimmer/0.9/docs/teamresources/release.md b/docs/maintain/goshimmer/0.9/docs/teamresources/release.md deleted file mode 100644 index 750d5eb5cb5..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/teamresources/release.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -description: How to create a GoShimmer release. -image: /img/logo/goshimmer_light.png -keywords: - - github - - release - - banner version - - Changelog - - build - - node - - newest image ---- - -# How to Do a Release - -1. Create a PR into `develop` updating the banner version, database version and network version (`plugins/banner.AppVersion` `plugins/database/versioning.go` `plugins/autopeering/discovery/parameters.go`) and mentioning the changes in `CHANGELOG.md`. -2. Create a PR merging `develop` into `master`: merge **without squashing**. -3. Go to release workflow https://github.com/iotaledger/goshimmer/actions/workflows/release.yml and click the gray "Run workflow" button to configure the release process. -4. In "Conflict" field set `master`, in "Tag name" set current version, in "Release description" paste the changes recently added to `CHANGELOG.md`. Click the green "Run workflow" to trigger the automatic release and deployment process. -5. Check that the binaries are working. -6. Check that the nodes are up and functioning on `devnet`. diff --git a/docs/maintain/goshimmer/0.9/docs/tooling/dags_visualizer.md b/docs/maintain/goshimmer/0.9/docs/tooling/dags_visualizer.md deleted file mode 100644 index cef0e32c8b0..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tooling/dags_visualizer.md +++ /dev/null @@ -1,75 +0,0 @@ -# GoShimmer DAGs visualizer - -The DAGs visualizer is our all round tool for visualizing DAGs. Be it Tangle, UTXO-DAG or Conflict-DAG or their interactions. The DAGs visualizer is our go-to tool for visualization. - -## How to run - -DAGs visualizer is already packed into `pkged.go`. -To run it just simply launch a goshimmer node, open browser and go to `http://localhost:8061`. - -:::note - -UTXO-DAG and Conflict-DAG will check if there's any added or removed vertex every 10 seconds and rearrange vertices positions. - -::: - -[![DAGs visualizer Overview](/img/tooling/dags-visualizer.png 'DAGs visualizer overview')](/img/tooling/dags-visualizer.png) - -### Global Functions - -Global functions are used to apply settings across DAGs or interact with them. - -#### Set explorer URL - -Each node in a graph can be selected to see its contained information, and they are navigated to the dashboard explorer for more details. You can change the url to the desired dashboard explorer, default is `http://localhost:8081`. - -#### Search Vertex Within Time Intervals - -You can check how Tangle, UTXO and conflict DAG look like in a given timeframe. -Press "search" button, it will show you numbers of blocks, transactions and conflicts found within the given timeframe. If you want to render them in graphs, push "render" button. - -The conflict DAG shows not just conflicts in the given time interval (colored in orage) but also the full history (colored in blue) to the master conflict. - -:::note - -Drawing a large amount of transactions or conflicts may slow down the browser. - -::: - -[![DAGs visualizer Searching](/img/tooling/searching.png 'DAGs visualizer searching')](/img/tooling/searching.png) - -#### Select and center vertex across DAGs - -You can see a selected block/transaction/conflict and its corresponding block/transaction/conflict in other DAGs! Here's an example of sync with the selected transaction, you can see the block and conflict that contains the transaction are highlighted. - -[![DAGs visualizer Syncing with TX](/img/tooling/sync-with-tx.png 'DAGs visualizer sync with TX')](/img/tooling/sync-with-tx.png) - -Another example of sync with the selected conflict: -[![DAGs visualizer Syncing with conflict](/img/tooling/sync-with-conflict.png 'DAGs visualizer sync with conflict')](/img/tooling/sync-with-conflict.png) - -## How to run in dev mode - -Dev mode has only been tested on Linux. - -### Docker - -Run the yarn development server in a container and add it to the docker-network. - -1. Make sure to set `dagsvisualizer.dev` to true, to enable GoShimmer to serve assets. -2. Make sure to set `dagsvisualizer.devBindAddress` to `dagsvisualizer-dev-docker:3000`. -3. Run Goshimmer docker-network. -4. Go to goshimmer root directory and run script `scripts/dags_visualizer_dev_docker.sh`. It will - install all needed npm modules and create a container with a running development server instance. -5. Using default port config, you should now be able to access the DAGs visualizer under http://127.0.0.1:8061 - -To see the changes, you need to manually reload the page. - -## How to pack changes to pkged.go - -We are using [pkger](https://github.com/markbates/pkger) to wrap all built frontend files into Go files. - -1. [Install `pkger`](https://github.com/markbates/pkger#installation) if not already done. -2. Build DAGs visualizezr by running `yarn build` within the `frontend` directory. -3. Run `pkger`. -4. `pkged.go` under root directory of goShimmer should have been modified. -5. Done. Now you can build goShimmer and your DAGs visualizer changes will be included within the binary. diff --git a/docs/maintain/goshimmer/0.9/docs/tooling/docker_private_network.md b/docs/maintain/goshimmer/0.9/docs/tooling/docker_private_network.md deleted file mode 100644 index c884eecb947..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tooling/docker_private_network.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -description: GoShimmer provides a tool at `tools/docker-network` with which a local test network can be set up locally with docker. -image: /img/tooling/docker-network.png -keywords: - - docker - - docker network - - dashboard - - web api - - host system - - port - - docker compose - - analysis dashboard ---- - -# Docker Private Network - -We provide a tool at `tools/docker-network` with which a local test network can be set up locally with docker. - -[![Docker network](/img/tooling/docker-network.png 'Docker network')](/img/tooling/docker-network.png) - -## How to Use the Tool - -In the docker network run for example - -```shell -./run.sh 5 1 1 -``` - -The command `./run.sh` spins up a GoShimmer network within Docker as schematically shown in the figure above. The first integer input defines the number of `peer_replicas` `N`. The second argument is optional for activating the Grafana dashboard, where - -- default (no argument) or 0: Grafana disabled -- 1: Grafana enabled - -More details on how to set up the dashboard can be found [here](../tutorials/setup.md). - -The peers can communicate freely within the Docker network -while the analysis and visualizer dashboard, as well as the `peer_master's` dashboard and web API are reachable from the host system on the respective ports. - -The settings for the different containers (`peer_master`, `peer_replica`) can be modified in `docker-compose.yml`. - -## How to Use as Development Tool - -Using a standalone throwaway Docker network can be really helpful as a development tool. - -Prerequisites: - -- Docker 17.12.0+ -- Docker compose: file format 3.5 - -Reachable from the host system - -- `peer_master's` analysis dashboard (autopeering visualizer): http://localhost:9000 -- `peer_master's` web API: http: http://localhost:8080 -- `faucet's` dashboard: http: http://localhost:8081 - - -It is therefore possible to send blocks to the local network via the `peer_master`. Log blocks of a specific containter can be followed via - -```shell -docker logs --follow CONTAINERNAME -``` - -## Snapshot Tool - -A snapshot tool is provided in the tools folder. The snapshot file that is created must be moved into the `integration-tests/assets` folder. There, rename and replace the existing bin file (`7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih.bin`). After restarting the docker network the snapshot file will be loaded. - -Docker Compose uses the `SNAPSHOT_FILE` environment variable to determine the location of the snapshot. Once you have a new snapshot you can simply set `SNAPSHOT_FILE` to the location of your new snapshot and Docker Compose will use your snapshot the next time you run `docker compose up`. - -## How to Use Block Approval Check Tool - -`get_approval_csv.sh` script helps you conveniently trigger the block approval checks on all nodes in the docker -network, and gather their results in the `csv` folder. - -Once the network is up and running, execute the script: - -```shell -./get_approval_csv.sh -``` - -Example output: - -``` -Triggering approval analysis on peer_master and 20 replicas... -Triggering approval analysis on peer_master and 20 replicas... DONE -Copying csv files from peer_master and 20 replicas... -Copying csv files from peer_master and 20 replicas... DONE -Copied files are located at ./csv -``` - -The exported csv files are timestamped to the date of request. - -``` -csv -├── 210120_16_34_14-docker-network_peer_replica_10.csv -├── 210120_16_34_14-docker-network_peer_replica_11.csv -├── 210120_16_34_14-docker-network_peer_replica_12.csv -... -``` - -Note, that the record length of the files might differ, since the approval check execution time of the nodes might differ. - -## Spammer Tool - -The Spammer tool lets you add blocks to the tangle when running GoShimmer in a Docker network. -In order to start the spammer, you need to send GET requests to a `/spammer` API endpoint with the following parameters: - -- `cmd` - one of two possible values: `start` and `stop`. -- `unit` - Either `mps` or `mpm`. Only applicable when `cmd=start`. -- `rate` - Rate in integer. Only applicable when `cmd=start`. -- `imif` - (_optional_) parameter indicating time interval between issued blocks. Possible values: - - `poisson` - emit blocks modeled with Poisson point process, whose time intervals are exponential variables with mean 1/rate - - `uniform` - issues blocks at constant rate - -Example requests: - -```shell -http://localhost:8080/spammer?cmd=start&rate=10&unit=mps - -http://localhost:8080/spammer?cmd=start&rate=10&unit=mps&imif=uniform -http://localhost:8080/spammer?cmd=stop -``` - -## Tangle Width - -When running GoShimmer locally in a Docker network, the network delay is so small that only 1 tip will be available most of the time. -In order to artificially create a tangle structure with multiple tips you can add a `blockLayer.tangleWidth` property to [config.docker.json](https://github.com/iotaledger/goshimmer/blob/develop/tools/docker-network/config.docker.json) -that specifies the number of tips that nodes should retain. This setting exists only for local testing purposes and should not be used in a distributed testnet. - -Here is an example config that can be added: - -```json -{ - "blockLayer": { - "tangleWidth": 10 - } -} -``` - -## Running With `docker compose` Directly - -To get an instance up and running on your machine make sure you have [Docker Compose](https://docs.docker.com/compose/install/) installed. - -Then you just need to run this command: - -```shell -docker compose build -docker compose --profile grafana up -d -``` - -> Note: Docker will build the GoShimmer image which can take several minutes. - -### Base Components - -These services that are created by default with `docker compose up -d`. - -#### Configuration - -- SNAPSHOT_FILE: The full path to the block snapshot file. Defaults to `./goshimmer/assets/7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih.bin` -- GOSHIMMER_TAG: (Optional) The [iotaledger/goshimmer](https://hub.docker.com/r/iotaledger/goshimmer) tag to use. Defaults to `develop`. -- GOSHIMMER_CONFIG: The location of the GoShimmer config file. Defaults to `./config.docker.json`. - -#### Example - -You can set the environment variable configuration inline as seen in this example. - -```shell -GOSHIMMER_TAG=develop docker compose up -d -``` - -#### Peer master - -A node that is used to expose ports via the host and to have a single attachment point for monitoring tools. - -##### Volumes - -Docker Compose creates a `mainnetdb` volume to maintain a tangle even after tearing down the containers. Run `docker compose down -v` to clear the volume. - -##### Ports - -The following ports are exposed on the host to allow for interacting with the Tangle. - -| Port | Service | -| -------- | ------------------ | -| 8080/tcp | Web API | -| 9000/tcp | Analysis dashboard | - -#### Peer replicas - -A node that can be replicated to add more nodes to your local tangle. - -##### Ports - -These expose 0 ports because they are replicas and the host system cannot map a port to multiple containers. - -#### Faucet - -A node that can dispense tokens. - -##### Ports - -The following ports are exposed on the host to allow for interacting with the Tangle. - -| Port | Service | -| -------- | --------- | -| 8081/tcp | Dashboard | - - - -### Optional Components - -These services can be added to your deployment through `--profile` flags and can be configured with `ENVIRONMENT_VARIABLES`. - -#### Grafana + Prometheus - -A set of containers to enable dashboards and monitoring. - -##### Profile - -In order to enable these containers you must set the `--profile grafana` flag when running `docker compose`. - -##### Configuration - -- PROMETHEUS_CONFIG: Location of the prometheus config yaml file. Defaults to `./prometheus.yml`. - -##### Example - -You can set the environment variable configuration inline as seen in this example. - -```shell -docker compose --profile grafana up -d -``` - -##### Ports - -The following ports are exposed on the host to allow for interacting with the Tangle. - -| Port | Service | -| -------- | ---------- | -| 3000/tcp | Grafana | -| 9090/tcp | Prometheus | diff --git a/docs/maintain/goshimmer/0.9/docs/tooling/evil_spammer.md b/docs/maintain/goshimmer/0.9/docs/tooling/evil_spammer.md deleted file mode 100644 index 4689ad9bc4d..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tooling/evil_spammer.md +++ /dev/null @@ -1,501 +0,0 @@ -# Evil spammer - -Evil spammer is the cli tool placed in `tools/evil-spammer` that allows to easily spam and stress test the network. It utilises client libraries `evilwallet` and `evilspammer`. Many predefined conflict and non-conflict scenarios are available to use directly with the `evilwallet` package, by command lines arguments of Evil Spammer tool, and by its interactive mode. - -The main goal is to test how the network will handle more complicated spam scenarios and find as many bugs as possible! - -**Main features:** - -- easily spam and stress test the GoShimmer network with the predefined scenarios -- ability to enable deep spam mode that reuses outputs created during the spam -- spamming with the command lines -- spamming with the interactive mode - -_If you have any idea on some nice scenarios, do not hesitate to open the PR, and we can extend our list with your ideas! -Also, do not forget to choose the right name for your spam._ - -## How to be evil? - -There are many options, but we encourage you to use our Evil Spammer Tool. It is available in a form of command line tool and in the interactive mode. - -The compiled versions of the tool for Windows, Linux, macOS are available in [goshimmer releases](https://github.com/iotaledger/goshimmer/releases). - -### Evil spammer command line - -The tool starts with the `main.go` file in `tools/evil-spammer`. - -Currently available script names: - -- `basic` -- `quick` -- `interactive` - -Run `go run . --help` to get the list of parameters available for each script and their descriptions. - -**Basic spammer.** -Basic spammer can be run with: - -```shell -cd tools/evil-spammer -go run . basic -``` - -and providing spam parameters with flags. -Below is an example with custom spam: - -```shell -# under tools/evil-spammer -go run . basic --spammer custom --scenario --rate 5 --duration 30s -``` - -It is possible to start multiple spam types at once by providing parameters separated by commas. - -```shell -go run . basic --urls http://localhost:8080 --spammer ds,blk,custom --rate 5,10,2 --duration 20s,20s,20s --tu 1s --scenario peace -``` - -#### Quick Test - -Can be used for fast and intense spamming test. First is transaction spam, next data spam, which should reduce the tip pool size if there was any, and double spend at the end. - -Example usage: - -```shell -# under tools/evil-spammer -go run . quick --urls http://localhost:8080,http://localhost:8090 --rate 50 --duration 1m --tu 1s --dbc 100ms -``` - -### Go interactive - -Simply run - -```shell -# under tools/evil-spammer -go run . interactive -``` - -![Interactive mode](/img/tooling/evil_spammer/evil-spammer-interactive.png 'Interactive mode') - -Evil wallet will start with API endpoints configured for the local docker network, -**if you want to play with different nodes on different network you need to update urls** in the config.json file and restart the tool, -or update it directly in the settings menu. -The url for the DevNet is: http://nodes.nectar.iota.cafe -The url for the devnet is: http://nodes.nectar.iota.cafe - -Some nodes might have double spend filter enabled. In that case, to correctly execute N-spend (a conflict set with size N) in scenarios, you need to provide at least N distinct urls to issue them simultaneously. The evil tool will pop an warning if more urls are needed. We disabled the double spend filter for now on our nodes - everything should work also with only one url provided, so you don't need to worry about the warning. -E.g. to correctly spam with _`pear`_ you should have 4 clients configured. - -#### Requesting funds - -![Request funds](/img/tooling/evil_spammer/evilwallet-request-funds.png 'Request funds') - -In order to request faucet funds choose "Prepare faucet funds" option, and Evil Spammer will send the faucet request and split the output on the requested number. The fastest is 100 outputs, as we wait only for one transaction to be confirmed, the more output you request the longer you will need to wait. - -> :warning: On the DevNet due to higher PoW and congestion in the network, a creation of more than 100 outputs can not always be successful (as it tries to create 100 splitting transactions at once), that's why we encourage you to use 100 option on the DevNet, and play with higher spam rates and requesting large amounts of outputs in the [local docker network](docker_private_network.md). - -You can also enable auto funds requesting, that will trigger funds preparation whenever you'll be short on faucet outputs. -Just go to: `Settings -> Auto funds requesting -> enable`. However, as mentioned above this is recommended only on private networks, where you have enough network throughput share. - -#### Wallet status - -You can check how many outputs is available in the "Evil wallet details". -![Details](/img/tooling/evil_spammer/evilwallet-details.png 'Details') - -- faucet outputs are outputs created from the faucet requests -- reuse outputs are the outputs available for the deep spam, you can collect them by changing reuse spam options to enable in - `New Spam -> Update spam options`. Later if you enable the deep spam in `Update spam options` they will be used as the batch inputs and will create deep DAG structures. -- and the statistics about spammed data blocks, value blocks and whole scenarios. - -#### Other things worth to know - -- Saving the evil wallet states is not supported. But don't worry you still can request more fresh Faucet outputs with just one click! -- Wallet will generate a `config.json` file if it did not exist. You can use it to set up your favorite settings or webAPI urls. -- We encourage you to see the results of your spams and structures in the DAGs Visualizer that by default can be accessed on port `8061`. -- Spammer allows for max 5 concurrently running spams, you can check currently running spams and cancel them at any time. -- Spammer tool keeps track of your last spams history, so you can check the times of the spam and render a specific period with the visualizer. -- In spam options you can enable `deep` spam, in which the spammer will reuse outputs generated by the current spam, previous spams with `reuse` option enabled, and previous deep spams' outputs. -- By default, the spam rate is set to mps, but you can change the time unit in the config file, e.g. `"timeUnit": "1m"` for block per minute. - -## Predefined scenarios - -Below you can find a list of predefined scenarios. - -- in the client library they can be accessed by the function `GetScenario(scenarioName string) (batch EvilBatch, ok bool)` -- in the evil spammer tool with command line you can use `basic` option and `scenario` flag to choose the scenario by name. -- in the evil spammer tool with interactive mode simply go to `New Spam -> Change scenario` and select from the list. - -In the below diagrams, the white box represents a transaction, the yellow box is an output, the green box is an input, and the numbers in yellow and green boxes are aliases for inputs and outputs. - -##### No conflicts - -- `single-tx` - -![Single transaction](/img/tooling/evil_spammer/evil-scenario-tx.png 'Single transaction') - -- `peace` - -![Peace](/img/tooling/evil_spammer/evil-scenario-peace.png 'Peace') - -##### Conflicts - -- `ds` - -![Double spend](/img/tooling/evil_spammer/evil-scenario-ds.png 'Double spend') - -- `conflict-circle` - -![Conflict circle](/img/tooling/evil_spammer/evil-scenario-conflict-circle.png 'Conflict circle') - -- `guava` - -![Guava](/img/tooling/evil_spammer/evil-scenario-guava.png 'Guava') - -- `orange` - -![Orange](/img/tooling/evil_spammer/evil-scenario-orange.png 'Orange') - -- `mango` - -![Mango](/img/tooling/evil_spammer/evil-scenario-mango.png 'Mango') - -- `pear` - -![Pear](/img/tooling/evil_spammer/evil-scenario-pear.png 'Pear') - -- `lemon` - -![Lemon](/img/tooling/evil_spammer/evil-scenario-lemon.png 'Lemon') - -- `banana` - -![Banana](/img/tooling/evil_spammer/evil-scenario-banana.png 'Banana') - -- `kiwi` - -![Kiwi](/img/tooling/evil_spammer/evil-scenario-kiwi.png 'Kiwi') - -## Evil Wallet and Evil spammer lib - -> :warning: This section is a guide for the users that wants to create their own tools or scenarios - - with the `evilwallet` and `evilwallet` library. - If you simply want to spam, you can use the evil spammer tool and its interactive mode described above. - -The wallet library was designed with the focus on the spamming use cases. -The evil wallet is a collection of many wallets (many seeds) that can be provided by the user, build from the faucet requests or are created during the spam. - -While creating the wallet we can provide the nodes webAPI urls, that will be ordered to spam. Otherwise, it will use default endpoints for the local docker network. - -```go -// provide webAPI urls -evilWallet := evilwallet.NewEvilWallet("http://localhost:1234", "http://localhost:2234") - -// automatically adds docker network as endpoints. -evilWallet := evilwallet.NewEvilWallet() -``` - -### Request funds from the faucet - -Then in order to send transactions, we need to request funds from the Faucet. -The evil wallet sends the request and splits the received funds on requested number of outputs that are further used as inputs for the spamming batches. - -Evil spammer does not care about the value of sent transactions, -it simply splits the input value equally among the outputs during the spam. -Below are presented all possibilities for requesting funds. -Requesting more allows you to spam harder, but you need to wait more for outputs preparation. - -```go -// 100 ouptuts -evilwallet.RequestFreshFaucetWallet() - -// 10k outputs -evilwallet.RequestFreshBigFaucetWallet() - -// x * 10k outputs -evilwallet.RequestFreshBigFaucetWallets(x) -``` - -### Create and send a transaction - -The evil wallet allows you to easily build a transaction by providing a list of options, such as inputs/outputs and issuer, see `evilwallet/options` for more options. - -There are 2 ways to assign **inputs** of a transaction: - -- alias(es) -- unspent outputs ID(s) - By assigning alias to an output will come in handy when you want to spend the specific output without knowing its actual output ID, and the evil wallet will handle the mapping for you. - -There are 2 ways to assign **outputs** of a transaction in `OutputOption`: - -```go -type OutputOption struct { - aliasName string - color ledgerstate.Color - amount uint64 -} -``` - -- with alias - - if amount is not specified, all balances will be sent to provided output alias(es) -- without alias - - if amount is less than the balances of input, remainder will be taken care of. - -The default color is `IOTA` if not specified. - -> :warning: You need to register an alias for the output if inputs are provided with alias and the other way around. Currently, evil wallet does not accept the mixing usage, for example, `in:alias -> out:without alias`. - -Examples: - -```go -// invalid, mixing usage: in:alias -> out:without alias -txA, err := evilwallet.CreateTransaction(WithInputs("1"), WithOutput(&OutputOption{amount: 1000000}), WithIssuer(initWallet)) - -// valid, Create Transaction will send all balances from input to output. -txB, err := evilwallet.CreateTransaction(WithInputs("1"), WithOutput(&OutputOption{aliasName: "2"}), WithIssuer(initWallet)) - -// valid, CreateTransaction will send 1000000 to `2`, and prepare a remainder if needed. -txC, err := evilwallet.CreateTransaction(WithInputs("1"), WithOutput(&OutputOption{aliasName: "2", amount: 1000000}), WithIssuer(initWallet)) -``` - -To send a transaction, you need to get client(s) from the evil wallet and send it: - -```go -clients := evilwallet.GetClients(1) - -clients[0].PostTransaction(txC) -``` - -### Compose your own scenario - -The most exciting part of evil wallet is to create whatever scenario easily! - -The custom spend is constructed in `[]ConflictSlice`, here's an example of `guava`: - -```go -err = evilwallet.SendCustomConflicts([]ConflictSlice{ - { - // A - []Option{WithInputs("1"), WithOutputs([]*OutputOption{{aliasName: "2"}, {aliasName: "3"}}), WithIssuer(wallet)}, - }, - { - // B - []Option{WithInputs("2"), WithOutput(&OutputOption{aliasName: "4"})}, - []Option{WithInputs("2"), WithOutput(&OutputOption{aliasName: "5"})}, - }, - { - // C - []Option{WithInputs("3"), WithOutput(&OutputOption{aliasName: "6"})}, - []Option{WithInputs("3"), WithOutput(&OutputOption{aliasName: "7"})}, - }, - { - // D - []Option{WithInputs([]string{"5", "6"}), WithOutput(&OutputOption{aliasName: "8"})}, - }, -}) -``` - -Each element in the `ConflictSlice` (`A`, `B`, `C` and `D`) contains 1 or more `[]Option`, which is options of a transaction to create, that is `A` contains 1 transaction, and `B` contains 2 transactions, etc. Transactions are issued by order (`A` -> `B` -> `C` -> `D`), but they are issued simultaneously in the same `ConflictSlice` element in order to create double spends. - -Below is an runnable example to send `guava` scenario: - -```go -evilwallet := NewEvilWallet() - -wallet, err := evilwallet.RequestFundsFromFaucet(WithOutputAlias("1")) - -err = evilwallet.SendCustomConflicts([]ConflictSlice{ - { - // A - []Option{WithInputs("1"), WithOutputs([]*OutputOption{{aliasName: "2"}, {aliasName: "3"}}), WithIssuer(wallet)}, - }, - { - // B - []Option{WithInputs("2"), WithOutput(&OutputOption{aliasName: "4"})}, - []Option{WithInputs("2"), WithOutput(&OutputOption{aliasName: "5"})}, - }, - { - // C - []Option{WithInputs("3"), WithOutput(&OutputOption{aliasName: "6"})}, - []Option{WithInputs("3"), WithOutput(&OutputOption{aliasName: "7"})}, - }, - { - // D - []Option{WithInputs([]string{"5", "6"}), WithOutput(&OutputOption{aliasName: "8"})}, - }, -}) -``` - -## Evil spammer library - -To use the evil spammer, you need to: - -1. prepare an evil wallet and request funds, -2. prepare evil scenario if any, -3. prepare evil spammer options, such as duration, spam rate, etc., -4. create a spammer and start spamming. - -The behaviour of the spammer is controlled by: - -- Spam options -- Evil Scenario - -Example of the simple spam with double spends: - -```go -evilWallet := evilwallet.NewEvilWallet() -err := evilWallet.RequestFreshFaucetWallet() - -scenarioDs := evilwallet.NewEvilScenario( - evilwallet.WithScenarioCustomConflicts(evilwallet.DoubleSpendBatch(5)), -) - -options := []Options{ - WithSpamRate(5, time.Second), - WithSpamDuration(time.Second * 10), - WithEvilWallet(evilWallet), - WithEvilScenario(scenarioDs), -} - -dsSpammer := NewSpammer(dsOptions...) -dsSpammer.Spam() -``` - -The spammer will treat the provided spamming custom conflicts as a single batch, which will be sent with the provided rate. -So if you use `guava` scenario and rate 5 mps per batch you will be spamming 30 mps on average -(as the `guava` creates 6 distinct transactions). - -### Spam options - -- To change the spamming rate use - -```go -WithSpamRate(rate int, timeUnit time.Duration) Options -``` - -- Duration of the spam can be controlled by either providing duration time or specifying how many batches should be sent. - -```go -WithSpamDuration(maxDuration time.Duration) Options -WithBatchesSent(maxBatchesSent int) Options -``` - -- If you want to create multiple spams and use the same Evil Wallet instance you can provide it with - -```go -WithEvilWallet(evilWallet), -``` - -- To customize the spamming batch and spam behavior, provide EvilScenario - -```go -WithEvilScenario(scenario *evilwallet.EvilScenario) Options -``` - -- By default spammer uses batch spamming function, but you can also spam with data blocks by using: - -```go -WithSpammingFunc(evilspammer.DataSpammingFunction) -``` - -### Evil Scenario - -There are several scenario batches in `evilwallet/customscenarios` already, which are shown in previous section. -Besides, you are able to define your own spamming scenario with alias in `EvilBatch`, which is similar to the `ConflictBatch` in evil wallet but rather simple. Only aliases for inputs and outputs are needed, then the evil spammer will find valid unspent outputs automatically, match outputs to provided aliases and start issuing transactions. Finally, make your defined scenario (`[]EvilBatch`) an option with `WithScenarioCustomConflicts` and pass it to `NewEvilScenario`. - -Below is `guava` scenario: - -```go -EvilBatch{ - []ScenarioAlias{ - {Inputs: []string{"1"}, Outputs: []string{"2", "3"}}, - }, - []ScenarioAlias{ - {Inputs: []string{"2"}, Outputs: []string{"4"}}, - {Inputs: []string{"2"}, Outputs: []string{"5"}}, - }, - []ScenarioAlias{ - {Inputs: []string{"3"}, Outputs: []string{"6"}}, - {Inputs: []string{"3"}, Outputs: []string{"7"}}, - }, - []ScenarioAlias{ - {Inputs: []string{"6", "5"}, Outputs: []string{"8"}}, - }, -} -``` - -#### Deep spamming - -Except basic functionality to customize spam batches, set the rate and duration, the Evil Spammer allows also for deep spamming. - -To create deep conflict and UTXO structure you need to enable the deep spam with an option - -```go -evilwallet.WithScenarioDeepSpamEnabled() -``` - -The spammer will reuse outputs created during that it remembers from previous spams or if you provide a specific input `RestrictedReuse` wallet containing outputs generated during some previous spam. -If you want to save outputs from the spam for a specific usage in the future, and you don't want the Evil Wallet to remember it and use it automatically you need to provide `RestrictedReuse` wallet. -After spam ends, you can use this wallet in the next deep spam. -In the example below, we firstly save outputs from a simple `tx` spam and use the outputs later in the controlled manner to create deep spam with level 2. - -```go -evilWallet := evilwallet.NewEvilWallet() - -evilWallet.RequestFreshFaucetWallet() - -// outputs from tx spam will be saved here, this wallet can be later reused as an input wallet for deep spam -restrictedOutWallet := evilWallet.NewWallet(evilwallet.RestrictedReuse) - -// transaction spam is the default one, no need to provide custom scenario batch -scenarioTx := evilwallet.NewEvilScenario( - evilwallet.WithScenarioReuseOutputWallet(restrictedOutWallet), -) -guava, _ := GetScenario("guava") -customScenario := evilwallet.NewEvilScenario( - evilwallet.WithScenarioDeepSpamEnabled(), - evilwallet.WithScenarioInputWalletForDeepSpam(restrictedOutWallet), - evilwallet.WithScenarioCustomConflicts(guava), -) - -options := []Options{ - WithSpamRate(5, time.Second), - WithBatchesSent(50), - WithEvilWallet(evilWallet), -} -txOptions := append(options, WithEvilScenario(scenarioTx)) -customOptions := append(options, WithEvilScenario(customScenario)) - -txSpammer := NewSpammer(txOptions...) -customDeepSpammer := NewSpammer(customOptions...) - -txSpammer.Spam() -customDeepSpammer.Spam() -``` - -If you want to use the outputs generated within the same spam you can instruct the spammer to save the outputs to the `Reuse` wallet and make it the input wallet for the spam at the same time, like in the example below: - -```go -evilWallet := evilwallet.NewEvilWallet() - -evilWallet.RequestFreshFaucetWallet() - -outWallet := evilWallet.NewWallet(evilwallet.Reuse) - -customScenario := evilwallet.NewEvilScenario( - evilwallet.WithScenarioDeepSpamEnabled(), - evilwallet.WithScenarioInputWalletForDeepSpam(outWallet), - evilwallet.WithScenarioReuseOutputWallet(outWallet), - evilwallet.WithScenarioCustomConflicts(evilwallet.Scenario1()), -) - -options := []Options{ - WithSpamRate(1, time.Second), - WithBatchesSent(50), - WithEvilWallet(evilWallet), -} -customOptions := append(options, WithEvilScenario(customScenario)) - -customDeepSpammer := NewSpammer(customOptions...) - -customDeepSpammer.Spam() -``` diff --git a/docs/maintain/goshimmer/0.9/docs/tooling/integration_tests.md b/docs/maintain/goshimmer/0.9/docs/tooling/integration_tests.md deleted file mode 100644 index 17918cdd8fe..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tooling/integration_tests.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -description: Running the integration tests spins up a `tester` container within which every test can specify its own GoShimmer network with Docker. -image: /img/tooling/integration-testing.png -keywords: - - integration test - - tester - - network - - docker - - peer - - docker compose - - linux - - macOS ---- - -# Integration Tests with Docker - -[![Integration testing](/img/tooling/integration-testing.png 'Integration testing')](/img/tooling/integration-testing.png) - -Running the integration tests spins up a `tester` container within which every test can specify its own GoShimmer -network with Docker as schematically shown in the figure above. - -Peers can communicate freely within their Docker network and this is exactly how the tests are run using the `tester` -container. Test can be written in regular Go style while the framework provides convenience functions to create a new -network, access a specific peer's web API or logs. - -## How to Run - -Prerequisites: - -- Docker 17.12.0+ -- Docker compose: file format 3.5 - -```shell -# Mac & Linux -cd tools/integration-tests -./runTests.sh -``` - -To run only selected tests provide their names as a parameter. - -```shell -./runTests.sh 'value mana' -``` - -The tests produce `*.log` files for every networks' peer in the `logs` folder after every run. - -On GitHub logs of every peer are stored as artifacts and can be downloaded for closer inspection once the job finishes. - -## Creating Tests - -Tests can be written in regular Go style. Each tested component should reside in its own test file -in `tools/integration-tests/tester/tests`. -`main_test` with its `TestMain` function is executed before any test in the package and initializes the integration test -framework. - -Each test has to specify its network where the tests are run. This can be done via the framework at the beginning of a -test. - -```go -// create a network with name 'testnetwork' with 6 peers and wait until every peer has at least 3 neighbors -n := f.CreateNetwork("testnetwork", 6, 3) -// must be called to create log files and properly clean up -defer n.Shutdown() -``` - -### Using Custom Snapshots - -When creating a test's network, you can specify a set of `Snapshots` in the `CreateNetworkConfig` struct. The framework will proceed to create and render the snapshot available to the peers. -An example of a snaphot used in the code is as such: - -``` -var ConsensusSnapshotDetails = framework.SnapshotInfo{ - FilePath: "/assets/dynamic_snapshots/consensus_snapshot.bin", - // node ID: 2GtxMQD9 - MasterSeed: "EYsaGXnUVA9aTYL9FwYEvoQ8d1HCJveQVL7vogu6pqCP", - GenesisTokenAmount: 800_000, // pledged to peer master - // peer IDs: jnaC6ZyWuw, iNvPFvkfSDp - PeersSeedBase58: []string{ - "Bk69VaYsRuiAaKn8hK6KxUj45X5dED3ueRtxfYnsh4Q8", - "HUH4rmxUxMZBBtHJ4QM5Ts6s8DP3HnFpChejntnCxto2", - }, - PeersAmountsPledged: []uint64{1_600_000, 800_000}, -} -``` - -The last parameter to the `CreateNetwork` function can be used to alter peers' configuration to use a generated snapshot file (e.g. `conf.BlockLayer.Snapshot.File = snaphotInfo.FilePath`). - -The `CommonSnapshotConfigFunc` function can be used for the average scenario: it will use the same `SnapshotInfo` for all peers. - -## Nodes' Debug Tools - -Every node in the test's network has their ports exposed on the host as follows: `service_port + 100*n` where `n` is the index of the peer you want to connect to. - -Service ports: - -- API `8080` -- Dashboard `8081` -- DAGs Visualizer `8061` -- Delve Debugger `40000` - -For example for `peer_replica_2` the following ports are exposed: - -- API [http://localhost:8280](http://localhost:8280) -- Dashboard [http://localhost:8261](http://localhost:8261) -- DAGs Visualizer [http://localhost:8281](http://localhost:8281) -- Delve Debugger [http://localhost:40200](http://localhost:40200) - -## Debugging tests - -Tests can be run defining a `DEBUG=1` (e.g. `DEBUG=1 ./runTests.sh`) environment variable. The main container driving the tests will be run under a Delve Go debugger listening -on `localhost:50000`. -The following launch configuration can be used from the VSCode IDE to attach to the debugger and step through the code: - -``` -{ - "version": "0.2.0", - "configurations": [ - { - "name": "Connect to Integration tester", - "type": "go", - "request": "attach", - "mode": "remote", - "port": 50000, - "host": "127.0.0.1" - } - ] -} -``` - -> When the tester container gets connected to the test network the debugger will suffer a sudden disconnection: it is a caveat of Docker's way of doing networking. Just attach the debugger again and you are ready to go again. - -### Preventing Network shutdown - -When the test completes for either a PASS or a FAIL, the underlying test network is destroyed. To prevent this and give you a chance to do your thing you will have to place the breakpoint on the `tests.ShutdownNetwork` method. - -## Other Tips - -Useful for development is to only execute the test you're currently building. For that matter, simply modify the `docker-compose.yml` file as follows: - -```yaml -entrypoint: go test ./tests -run -v -mod=readonly -``` diff --git a/docs/maintain/goshimmer/0.9/docs/tooling/overview.md b/docs/maintain/goshimmer/0.9/docs/tooling/overview.md deleted file mode 100644 index c66b36360fa..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tooling/overview.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -description: GoShimmer comes with a docker private network, integration tests and a CLI wallet to test the stability of the protocol. -image: /img/logo/goshimmer_light.png -keywords: - - tools - - docker - - private network - - integration test - - cli - - wallet - - cli wallet - - dags visualizer ---- - -# Tooling - -GoShimmer comes with some tools to test the stability of the protocol. - -We provide a documentation for the following tools: - -- The [docker private network](docker_private_network.md) with which a local test network can be set up locally with docker. -- The [integration tests](integration_tests.md) spins up a `tester` container within which every test can specify its own GoShimmer network with Docker. -- The [cli-wallet](../tutorials/wallet_library.md) is described as part of the tutorial section. -- The [DAGs Visualizer](dags_visualizer.md) is the all-round tool for visualizing DAGs. -- The [rand-seed and rand-address](rand_seed_and_rand_address.md) to randomly generate a seed, with the relative public key, or a random address. diff --git a/docs/maintain/goshimmer/0.9/docs/tooling/rand_seed_and_rand_address.md b/docs/maintain/goshimmer/0.9/docs/tooling/rand_seed_and_rand_address.md deleted file mode 100644 index ea5028e524c..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tooling/rand_seed_and_rand_address.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -description: 'You can use the rand-address and rand-seed tools to generate random seeds and addresses through a simple command.' -keywords: - - address - - seed - - public key - - private key - - generate - - generation ---- - -# Rand Seed and Rand Address - -You can use the [`rand-address`](#rand-address) and [`rand-seed`](#rand-seed) tools to generate addresses and seeds in a single command. - -## Rand Address - -You can use the `rand-address` tool to generate a random address by running the following command from the `tools/rand-address` directory: - -```shell -cd tools/rand-address -go run main.go -``` - -### Expected Output - -The script will output a Base58 string representing the newly generated address, for example: - -```shell -13n6HnqiLQVaE2sp8BExM51C2z1BLw7SrFjNAUK439YCC -``` - -## Rand Seed - -You can use the `rand-address` tool to generate a text file with the following: - -- A [seed](../tutorials/send_transaction.md#seed), represented in Base64 and Base58. -- The seed's relative identity, as a Base58 string. -- The relative identity's public, key in Base58. - -```shell -cd tools/rand-seed -go run main.go -``` - -### Expected Output - -The script will generate a `random-seed.txt` file in the current working directory, for example: - -```plaintext -base64:ri9C8oAT3IPsus2j+IllMbW2B3nOqe4uC56zfr344zY= -base58:CiwjnjMRwEbCGiATWjNsrVptBTNH13AHrVNmG31KK9cy -Identity - base58:BCUnRc6c -Identity - base58:BCUnRc6cv4YVnB3Rw5DDfdsFuVVUW97MyLEBzWxHqfQj -Public Key - base58:Ht9VR8qAgmruDPzsQbak3AJvXcJY6q6Mxyaz4pDicDEw -``` diff --git a/docs/maintain/goshimmer/0.9/docs/tutorials/dApp.md b/docs/maintain/goshimmer/0.9/docs/tutorials/dApp.md deleted file mode 100644 index aff0dd7136a..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tutorials/dApp.md +++ /dev/null @@ -1,277 +0,0 @@ ---- -description: Learn how to write simple dApps as GoShimmer plugins such as a chat dApp and a network delay dApp. -image: /img/logo/goshimmer_light.png -keywords: - - chat - - payload - - block - - bytes layout - - web api endpoint ---- - -# How to Create dApps - -:::info - -This guide is meant for developers familiar with the Go programming language. - -::: - -:::warning DISCLAIMER - -GoShimmer is a rapidly evolving prototype software. As such, the described steps here will likely change in the future. Specifically, we are envisioning to ease the process of dApp creation and installation for node owners. Furthermore, the current approach is in no way hardened and should be seen as purely experimental. Do not write any software for actual production use. - -::: - -## Introduction - -Throughout this tutorial we will learn how to write simple dApps as GoShimmer plugins. We provide two different examples: a chat dApp and a network delay dApp. Hope you enjoy the reading! - -## Chat dApp - -In this guide we are going to explain how to write a very simple chat dApp so that anyone, connected to a GoShimmer node, could write a short block and read what is being written into the Tangle. - -The complete source code of the application can be found [in the repository](https://github.com/iotaledger/goshimmer/tree/develop/plugins/chat). - -### Overview - -Our chat dApp can be implemented in a few simple steps: - -1. A node sends a special block containing a chat payload via the Tangle. -2. Upon receipt, every other node in the network processes this block and - if the chat dApp/plugin is enabled - triggers an event that a chat block has been received. - -Within GoShimmer we need 3 components to realize this undertaking. First, we need to **define and register a chat payload type**. Second, we need a way to **initiate a block** with a chat payload via the web API. And lastly, we need to **listen** for chat payloads and take appropriate action. - -If a node does not have our chat dApp installed and activated, the chat block will be simply treated as a raw data block without any particular meaning. In general that means that in order for a dApp to be useful, node owners need to explicitly install it. In our case we simply ship it with GoShimmer as a [plugin](../implementation_design/plugin.md). - -### Define & Register The Chat Payload - -First, we need to decide what data our chat payload should contain and define the byte layout accordingly. -In our case we need a `From` field to identify the sender of the block (e.g., a nickname, the ID of the node); a `To` field to identify an optional recipient of the block (e.g., a chat room ID, a nickname); a `Block` field containing the actual chat block. -Therefore, we can define the byte layout as follows: - -``` -length // every payload has to have this -type // every payload has to have this -From -To -Block -``` - -Next, we need to fulfill the `Payload` interface and provide the functionality to read/write a payload from/to bytes. The [`hive.go/marshalutil`](https://github.com/iotaledger/hive.go/tree/master/marshalutil) package simplifies this step tremendously. - -```Go -// Payload represents the generic interface for a payload that can be embedded in Blocks of the Tangle. -type Payload interface { - // Type returns the Type of the Payload. - Type() Type - - // Bytes returns a marshaled version of the Payload. - Bytes() []byte - - // String returns a human readable version of the Payload. - String() string -} -``` - -Finally, we need to create and register our chat payload type so that it can be properly unmarshalled. - -```Go -// Type represents the identifier which addresses the chat payload type. -var Type = payload.NewType(payloadType, PayloadName, func(data []byte) (payload payload.Payload, err error) { - var consumedBytes int - payload, consumedBytes, err = FromBytes(data) - if err != nil { - return nil, err - } - if consumedBytes != len(data) { - return nil, errors.New("not all payload bytes were consumed") - } - return -}) -``` - -### Create The Web API Endpoints - -In order to issue a block with our newly created chat payload, we need to create a web API endpoint. Here we simply bind a json request containing the necessary fields: `from`, `to` and `block` and then issue it into the Tangle with `blocklayer.Tangle().IssuePayload(chatPayload)`. This plugin takes care of all the specifics and employs the `BlockFactory` to, i.a., select tips and sign the block. - -```Go -webapi.Server().POST("chat", SendChatBlock) - -// SendChatBlock sends a chat block. -func SendChatBlock(c echo.Context) error { - req := &Request{} - if err := c.Bind(req); err != nil { - return c.JSON(http.StatusBadRequest, jsonmodels.NewErrorResponse(err)) - } - chatPayload := NewPayload(req.From, req.To, req.Block) - - blk, err := blocklayer.Tangle().IssuePayload(chatPayload) - if err != nil { - return c.JSON(http.StatusBadRequest, Response{Error: err.Error()}) - } - - return c.JSON(http.StatusOK, Response{BlockID: blk.ID().Base58()}) -} -``` - -### Listen for Chat Payloads - -Every dApp listens for blocks from the _communication layer_ and when its payload type is detected, takes appropriate action. For us that means listening for chat payload type and triggering an event if we encounter any. In this case the event will contain information about the chat block and also the `BlockID` in terms of a Tangle block as well as its issuance timestamp. - -```Go -func onReceiveBlockFromBlockLayer(blockID tangle.BlockID) { - var chatEvent *ChatEvent - blocklayer.Tangle().Storage.Block(blockID).Consume(func(block *tangle.Block) { - if block.Payload().Type() != Type { - return - } - - chatPayload, _, err := FromBytes(block.Payload().Bytes()) - if err != nil { - app.LogError(err) - return - } - - chatEvent = &ChatEvent{ - From: chatPayload.From, - To: chatPayload.To, - Block: chatPayload.Block, - Timestamp: block.IssuingTime(), - BlockID: block.ID().Base58(), - } - }) - - if chatEvent == nil { - return - } - - app.LogInfo(chatEvent) - Events.BlockReceived.Trigger(chatEvent) -} -``` - -## Network Delay dApp - -In this guide we are going to explain how to write a very simple dApp based on an actual dApp we are using in GoShimmer to help us measure the network delay, i.e., how long it takes for every active node in the network to receive a block. - -The complete source code of the application can be found [in the repository](https://github.com/iotaledger/goshimmer/tree/develop/plugins/networkdelay). - -### Overview - -Our network delay dApp should help us to identify the time it takes for every active node to receive and process a block. That can be done in a few simple steps: - -1. A (known) node sends a special block containing a network delay payload. -2. Upon receipt, every other node in the network answers to the special block by posting its current time to our remote logger. -3. For simplicity, we gather the information in an [ELK stack](https://www.elastic.co/what-is/elk-stack). This helps us to easily interpret and analyze the data. - -Within GoShimmer we need 3 components to realize this undertaking. First, we need to **define and register a network delay payload type**. Second, we need a way to **initiate a block** with a network delay payload via the web API. And lastly, we need to **listen** for network delay payloads and take appropriate action. - -If a node does not have our dApp installed and activated, the block will be simply treated as a raw data block without any particular meaning. In general that means that in order for a dApp to be useful, node owners need to explicitly install it. In our case we simply ship it with GoShimmer as a [plugin](../implementation_design/plugin.md). - -### Define & Register The Network Delay Object - -First, we need to decide what data our network delay payload should contain and define the byte layout accordingly. -In our case we need an `ID` to identify a network delay block and the `sent time` of the initiator. -Therefore, we can define the byte layout as follows: - -``` -length // every payload has to have this -type // every payload has to have this -id<32bytes> -sentTime -``` - -Next, we need to fulfill the `Payload` interface and provide the functionality to read/write a payload from/to bytes. The [`hive.go/marshalutil`](https://github.com/iotaledger/hive.go/tree/master/marshalutil) package simplifies this step tremendously. - -```Go -// Payload represents the generic interface for a payload that can be embedded in Blocks of the Tangle. -type Payload interface { - // Type returns the Type of the Payload. - Type() Type - - // Bytes returns a marshaled version of the Payload. - Bytes() []byte - - // String returns a human readable version of the Payload. - String() string -} -``` - -Finally, we need to create and register our network delay payload type so that it can be properly unmarshalled. - -```Go -// Type represents the identifier which addresses the network delay Object type. -var Type = payload.NewType(189, ObjectName, func(data []byte) (payload payload.Payload, err error) { - var consumedBytes int - payload, consumedBytes, err = FromBytes(data) - if err != nil { - return nil, err - } - if consumedBytes != len(data) { - return nil, errors.New("not all payload bytes were consumed") - } - return -}) -``` - -### Create The Web API Endpoints - -In order to issue a block with our newly created network delay payload, we need to create a web API endpoint. Here we simply create a random `ID` and the `sentTime` and then issue a block with `issuer.IssuePayload()`. This plugin takes care of all the specifics and employs the `BlockFactory` to, i.a., select tips and sign the block. - -```Go -webapi.Server.POST("networkdelay", broadcastNetworkDelayObject) - -func broadcastNetworkDelayObject(c echo.Context) error { - // generate random id - rand.Seed(time.Now().UnixNano()) - var id [32]byte - if _, err := rand.Read(id[:]); err != nil { - return c.JSON(http.StatusInternalServerError, Response{Error: err.Error()}) - } - - blk, err := issuer.IssuePayload(NewObject(id, time.Now().UnixNano())) - if err != nil { - return c.JSON(http.StatusBadRequest, Response{Error: err.Error()}) - } - return c.JSON(http.StatusOK, Response{ID: blk.Id().String()}) -} -``` - -### Listen for Network Delay Payloads - -Every dApp listens for blocks from the _communication layer_ and when its data type is detected, takes appropriate action. For us that means listening for network delay payloads and sending blocks to our remote logger if we encounter any. Of course in this context, we only want to react to network delay payloads which were issued by our analysis/entry node server. Therefore, matching the block signer's public key with a configured public key lets us only react to the appropriate network delay payloads. - -```Go -func onReceiveBlockFromBlockLayer(blockID tangle.BlockID) { - blocklayer.Tangle().Storage.Block(blockID).Consume(func(solidBlock *tangle.Block) { - blockPayload := solidBlock.Payload() - if blockPayload.Type() != Type { - return - } - - // check for node identity - issuerPubKey := solidBlock.IssuerPublicKey() - if issuerPubKey != originPublicKey || issuerPubKey == myPublicKey { - return - } - - networkDelayObject, ok := blockPayload.(*Object) - if !ok { - app.LogInfo("could not cast payload to network delay payload") - return - } - - now := clock.SyncedTime().UnixNano() - - // abort if block was sent more than 1min ago - // this should only happen due to a node resyncing - if time.Duration(now-networkDelayObject.sentTime) > time.Minute { - app.LogDebugf("Received network delay block with >1min delay\n%s", networkDelayObject) - return - } - - sendToRemoteLog(networkDelayObject, now) - }) -} -``` diff --git a/docs/maintain/goshimmer/0.9/docs/tutorials/manual_peering.md b/docs/maintain/goshimmer/0.9/docs/tutorials/manual_peering.md deleted file mode 100644 index 4cc26a29d51..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tutorials/manual_peering.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -description: Manual peering enables node operators to exchange their nodes' information and let them peer with each other, orthogonal to autopeering. -image: /img/logo/goshimmer_light.png -keywords: - - peering - - manual - - node - - node operator - - known peer - - json config - - web api - - public key ---- - -# Manual Peering - -Manual peering enables node operators to exchange their nodes' information and let them peer with each other, orthogonal to autopeering. It can be an additional protection against eclipse attacks as the manual peering is completely in the hands of the node operator based on real world trust. Furthermore, it allows to operate nodes without exposing their IP address to the network. -There are two ways to configure the list of known peers of the node: - -1. Add known peers using the JSON config file -2. Add/View/Delete via the web API of the node - -## How Manual Peering Works - -When the user provides the list of known peers, which looks like a list of IP addresses with ports and public keys of peers, -the node remembers it and starts a background process that is trying to connect with every peer from the list. To establish -the connection with a peer, the other peer should have our local peer in its list of known peers. So the condition for -peers to connect is that they should have each other in their known peers lists. In case of network failure the node -will keep reconnecting with known peers until it succeeds. - -In other words, the only thing that users have to do to be connected via manual peering is to -exchange their IP address with port and public key and set that information to known peers of their nodes and machines will do the rest. - -## How to Set Known Peers via Config File - -Add the following record to the root of your JSON config file that you are using to run the node. - -### Config Record Example - -```json -{ - "manualPeering": { - "knownPeers": "[{\"publicKey\": \"CHfU1NUf6ZvUKDQHTG2df53GR7CvuMFtyt7YymJ6DwS3\", \"address\": \"127.0.0.1:14666\"}]" - } -} -``` - -### Config Record Description - -| Field | Description | -| :---------- | :------------------------------------------------- | -| `publicKey` | Public key of the peer. | -| `address` | IP address of the peer's node and its gossip port. | - -## How to Manage Known Peers Via Web API - -See manual peering API docs [page](../apis/manual_peering.md) -for information on how to manage the known peers list via web API. diff --git a/docs/maintain/goshimmer/0.9/docs/tutorials/monitoring.md b/docs/maintain/goshimmer/0.9/docs/tutorials/monitoring.md deleted file mode 100644 index d4307ab3a28..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tutorials/monitoring.md +++ /dev/null @@ -1,379 +0,0 @@ ---- -description: Node operators who wish to have more insights into what is happening within their node have the option to enable a Prometheus exporter plugin that gathers important metrics about their node. To visualize these metrics, a Grafana Dashboard is utilized. -image: /img/logo/goshimmer_light.png -keywords: - - monitoring - - dashboard - - prometheus - - grafana - - set up - - VPS ---- - -# Setting up Monitoring Dashboard - -## Motivation - -GoShimmer is shipped with its internal node dashboard that you can reach at `127.0.0.1:8081` by default. While this dashboard provides some basic metrics information, its main functionality is to provide a graphical interface to interact with your node. - -Node operators who wish to have more insights into what is happening within their node have the option to enable a [Prometheus](https://prometheus.io/) exporter plugin that gathers important metrics about their node. To visualize these metrics, a [Grafana Dashboard](https://grafana.com/oss/grafana/) is utilized. - -# Setting Up (Run GoShimmer From a VPS) - -To enable the **Monitoring Dashboard** for a GoShimmer node running from a VPS as described [here](setup.md), you need to carry out some additional steps. - -1. Edit `docker-compose.yml` - TODO -2. Create Prometheus config. - TODO -3. Create Grafana config. - TODO -4. Run `docker compose up`. - TODO - -# Setting Up (Run GoShimmer From Your Home Machine) - -Depending on how you run your GoShimmer node, there are different ways to set up the **Monitoring Dashboard**. - -## Docker - -One of the easiest ways to run a node is to use [Docker](https://www.docker.com/). To automatically launch GoShimmer and the Monitoring Dashboard with docker, follow these steps: - -1. [Install docker](https://docs.docker.com/get-docker/). On Linux, make sure you install both the [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/). -2. Clone the GoShimmer repository. - - ```shell - git clone git@github.com:iotaledger/goshimmer.git - ``` - -3. Create a `config.json` from the provided `config.default.json`. - - ```shell - cd goshimmer - cp config.default.json config.json - ``` - - Make sure, that following entry is present in `config.json`: - - ```json - { - "prometheus": { - "bindAddress": "127.0.0.1:9311" - } - } - ``` - -4. From the root of the repo, start GoShimmer with: - - ```shell - docker compose up - ``` - -You should be able to reach the Monitoring Dashboard via browser at [localhost:3000](http://localhost:3000). Default login credentials are: - -- `username` : admin -- `password` : admin - -After initial login, you will be prompted to change your password. -You can experiment with the dashboard, change layout, add panels and discover metrics. Your changes will be saved into a Grafana database located in the repo at `tools/monitoring/grafana/grafana.db`. - -## Binary - -If you run the [released binaries](https://github.com/iotaledger/goshimmer/releases), or build GoShimmer from source, you need to setup Prometheus and Grafana separately, furthermore, you have to configure GoShimmer to export data. - -### GoShimmer Configuration - -1. Make sure that the `prometheus.bindAddress` config parameter is set in your `config.json`: - - ```json - { - "prometheus": { - "bindAddress": "127.0.0.1:9311" - } - } - ``` - -2. Make sure, that the `prometheus` plugin is enabled in your `config.json`: - - ```json - { - "node": { - "disablePlugins": [], - "enablePlugins": ["prometheus"] - } - } - ``` - -### Install and Configure Prometheus - -First, we take a look on how to configure and run Prometheus as a standalone application. Then, we setup a Linux system service that automatically runs Prometheus in the background. - -#### Prometheus as a standalone app - -1. [Download](https://prometheus.io/download/) the latest release of Prometheus for your system. -2. Unpack the downloaded file: - - ```shell - tar xvfz prometheus-*.tar.gz - cd prometheus-* - ``` - -3. Create a `prometheus.yml` in the unpacked directory with the following content: - - ```yaml - scrape_configs: - - job_name: goshimmer_local - scrape_interval: 5s - static_configs: - - targets: - # goshimmer prometheus plugin export - - 127.0.0.1:9311 - ``` - -4. Start Prometheus from the unpacked folder: - - ```shell - # By default, Prometheus stores its database in ./data (flag --storage.tsdb.path). - $ ./prometheus --config.file=prometheus.yml - ``` - -5. You can access the prometheus server at [localhost:9090](http://localhost:9090). -6. (Optional) Prometheus server is running, but observe that [localhost:9090/targets](http://localhost:9090/targets) shows the target being `DOWN`. Run GoShimmer with the configuration from the previous stage, and you will soon see the `goshimmer_local` target being `UP`. - -#### Prometheus as a system service (Linux) - -Note: you have to have root privileges with your user to carry out the following steps. - -1. Create a Prometheus user, directories, and set this user as the owner of those directories. - - ```shell - sudo useradd --no-create-home --shell /bin/false prometheus - sudo mkdir /etc/prometheus - sudo mkdir /var/lib/prometheus - sudo chown prometheus:prometheus /etc/prometheus - sudo chown prometheus:prometheus /var/lib/prometheus - ``` - -2. Download Prometheus source, extract and rename. - - ```shell - wget https://github.com/prometheus/prometheus/releases/download/v2.19.1/prometheus-2.19.1.linux-amd64.tar.gz - tar xvfz prometheus-2.19.1.linux-amd64.tar.gz - mv prometheus-2.19.1.linux-amd64.tar.gz prometheus-files - ``` - -3. Copy Prometheus binaries to `/bin` and change their ownership - - ```shell - sudo cp prometheus-files/prometheus /usr/local/bin/ - sudo cp prometheus-files/promtool /usr/local/bin/ - sudo chown prometheus:prometheus /usr/local/bin/prometheus - sudo chown prometheus:prometheus /usr/local/bin/promtool - ``` - -4. Copy Prometheus console libraries to `/etc` and change their ownership. - - ```shell - sudo cp -r prometheus-files/consoles /etc/prometheus - sudo cp -r prometheus-files/console_libraries /etc/prometheus - sudo chown -R prometheus:prometheus /etc/prometheus/consoles - sudo chown -R prometheus:prometheus /etc/prometheus/console_libraries - ``` - -5. Create Prometheus config file, define targets. - To create and open up the config file: - - ```shell - sudo nano /etc/prometheus/prometheus.yml - ``` - - Put the following content into the file: - - ```yaml - scrape_configs: - - job_name: goshimmer_local - scrape_interval: 5s - static_configs: - - targets: - # goshimmer prometheus plugin export - - 127.0.0.1:9311 - ``` - - Save and exit the editor. -6. Change ownership of the config file. - - ```shell - sudo chown prometheus:prometheus /etc/prometheus/prometheus.yml - ``` - -7. Create a Prometheus service file. - - ```shell - sudo nano /etc/systemd/system/prometheus.service - ``` - - Copy the following content into the file: - - ``` - [Unit] - Description=Prometheus GoShimmer Server - Wants=network-online.target - After=network-online.target - - [Service] - User=prometheus - Group=prometheus - Type=simple - ExecStart=/usr/local/bin/prometheus \ - --config.file /etc/prometheus/prometheus.yml \ - --storage.tsdb.path /var/lib/prometheus/ \ - --web.console.templates=/etc/prometheus/consoles \ - --web.console.libraries=/etc/prometheus/console_libraries - - [Install] - WantedBy=multi-user.target - ``` - -8. Reload `systemd` service to register the prometheus service. - - ```shell - sudo systemctl daemon-reload - sudo systemctl start prometheus - ``` - -9. Check if the service is running. - - ```shell - sudo systemctl status prometheus - ``` - -10. You can access the prometheus server at [localhost:9090](http://localhost:9090). -11. (Optional) Prometheus server is running, but observe that [localhost:9090/targets](http://localhost:9090/targets) shows the target being `DOWN`. Run GoShimmer with the configuration from the previous stage, and you will soon see the `goshimmer_local` target being `UP`. - -+1. When you want to stop the service, run: - -```shell -sudo systemctl stop prometheus -``` - -Prometheus now collects metrics from your node, but we need to setup Grafana to visualize the collected data. - -### Install and Configure Grafana - -Head over to [Grafana Documentation](https://grafana.com/docs/grafana/latest/installation/) and install Grafana. For Linux, the OSS Release is recommended. - -#### Grafana as standalone app - -Depending on where you install Grafana from, the configuration directories will change. For clarity, we will proceed with the binary install here. - -1. [Download Grafana](https://grafana.com/grafana/download) binary and extract it into a folder. - For example: - - ```shell - wget https://dl.grafana.com/oss/release/grafana-7.0.4.linux-amd64.tar.gz - tar -zxvf grafana-7.0.4.linux-amd64.tar.gz - ``` - -2. We will need couple files from the GoShimmer repository. Here we suppose, that you have the repository directory `goshimmer` on the same level as the extracted `grafana-7.0.4` directory: - - ``` - ├── grafana-7.0.4 - │ ├── bin - │ ├── conf - │ ├── LICENSE - │ ├── NOTICE.md - │ ├── plugins-bundled - │ ├── public - │ ├── README.md - │ ├── scripts - │ └── VERSIO - ├── goshimmer - │ ├── CHANGELOG.md - │ ├── client - │ ├── config.default.json - ... - ``` - - We copy a couple configuration files from the repository into Grafana's directory: - - ```shell - cp -R goshimmer/tools/monitoring/grafana/dashboards/local_dashboard.json grafana-7.0.4/public/dashboards/ - cp goshimmer/tools/monitoring/grafana/provisioning/datasources/datasources.yaml grafana-7.0.4/conf/provisioning/datasources/datasources.yaml - cp goshimmer/tools/monitoring/grafana/provisioning/dashboards/dashboards.yaml grafana-7.0.4/conf/provisioning/dashboards/dashboards.yaml - ``` - -3. Run Grafana. - - ```shell - cd grafana-7.0.4/bin - ./grafana-server - ``` - -4. Open Moitoring Dashboard at [localhost:3000](http://localhost:3000). - -Default login credentials are: - -- `username` : admin -- `password` : admin - -#### Grafana as a system service (Linux) - -Instead of running the `grafana-server` app each time we can create a service that runs in the background. - -When you install Grafana from - -- [APT repository](https://grafana.com/docs/grafana/latest/installation/debian/#install-from-apt-repository) or `.deb` [package](https://grafana.com/docs/grafana/latest/installation/debian/#install-deb-package) (Ubuntu or Debian), -- [YUM repository](https://grafana.com/docs/grafana/latest/installation/rpm/#install-from-yum-repository) or `.rpm` [package](https://grafana.com/docs/grafana/latest/installation/rpm/#install-with-rpm) (CentOS, Fedora, OpenSuse, RedHat), - -then Grafana is configured to run as a system service without any modification. All you need to do is copy config files from the GoShimmer repository: - -1. Copy [datasource yaml config](https://github.com/iotaledger/goshimmer/blob/develop/tools/monitoring/grafana/provisioning/datasources/datasources.yaml) to `/etc/grafana`: - (assuming you are at the root of the cloned GoShimmer repository) - - ```shell - sudo cp tools/monitoring/grafana/provisioning/datasources/datasources.yaml /etc/grafana/provisioning/datasources - ``` - -2. Copy [dashboard yaml config](https://github.com/iotaledger/goshimmer/blob/develop/tools/monitoring/grafana/provisioning/dashboards/dashboards.yaml) to `/etc/grafana`: - - ```shell - sudo cp tools/monitoring/grafana/provisioning/dashboards/dashboards.yaml /etc/grafana/provisioning/dashboards - ``` - -3. Copy [GoShimmer Local Metrics](https://github.com/iotaledger/goshimmer/blob/develop/tools/monitoring/grafana/dashboards/local_dashboard.json) dashboard to `/var/lib/grafana/`: - - ```shell - sudo cp -R tools/monitoring/grafana/dashboards /var/lib/grafana/ - ``` - -4. Reload daemon and start Grafana. - - ```shell - sudo systemctl daemon-reload - sudo systemctl start grafana-server - ``` - -5. Open Moitoring Dashboard at [localhost:3000](http://localhost:3000). - -Default login credentials are: - -- `username` : admin -- `password` : admin - -#### Grafana config via GUI - -If you successfully installed Grafana and would like to set it up using its graphical interface, here are the steps you need to take: - -1. Run Grafana. -2. Open [localhost:3000](http://localhost:3000) in a browser window. - Default login credentials are: - - `username` : admin - - `password` : admin -3. On the left side, open **Configuration -> Data Sources**. Click on **Add data source** and select **Prometheus** core plugin. -4. Fill the following fields: - - `URL`: http://localhost:9090 - - `Scrape interval`: 5s -5. Click on **Save & Test**. If you have a running Prometheus server, everything should turn green. If the URL can't be reached, try changing the **Access** field to `Browser`. -6. On the left side panel, click on **Dashboards -> Manage**. -7. Click on **Import**. Paste the content of [local_dashboard.json](https://github.com/iotaledger/goshimmer/blob/develop/tools/monitoring/grafana/dashboards/local_dashboard.json) in the **Import via panel json**, or download the life and use the **Upload .json file** option. -8. Now you can open **GoShimmer Local Metrics** dashboard under **Dashboards**. Don't forget to start your node and run Prometheus! diff --git a/docs/maintain/goshimmer/0.9/docs/tutorials/obtain_tokens.md b/docs/maintain/goshimmer/0.9/docs/tutorials/obtain_tokens.md deleted file mode 100644 index c4ae374f77c..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tutorials/obtain_tokens.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -description: You can obtain tokens using the Faucet dApp, using the Go Client Library, the HTTP API and the Pollen Wallet -image: /img/tutorials/request_funds/pollen_wallet.png -keywords: - - faucet - - proof of work - - client library - - wallet - - dApp - - pollen wallet ---- - -# How to Obtain Tokens From the Faucet - -## The Faucet dApp - -The faucet is a dApp built on top of the [value and communication layer](../apis/communication.md)). It sends IOTA tokens to addresses by listening to faucet request blocks. A faucet block is a Block containing a special payload with an address encoded in Base58, the aManaPledgeID, the cManaPledgeID and a nonce as a proof that some Proof Of Work has been computed. The PoW is just a way to rate limit and avoid abuse of the Faucet. The Faucet has an additional protection by means of granting request to a given address only once. That means that, in order to receive funds from the Faucet multuple times, the address must be different. - -After sending a faucet request block, you can check your balances via [`GetAddressUnspentOutputs()`](../apis/ledgerstate.md). - -## Obtain Tokens From the Faucet - -There are 3 ways to send a faucet request block to obtain IOTA tokens: - -1. Via the Go client library -2. Via the HTTP API directly -3. Via the wallet - -### Via the Go Client Library - -Follow the instructions in [Use the API](../apis/client_lib.md) to set up the API instance. - -Example: - -```go -// provide your Base58 encoded destination address, -// the proof of work difficulty, -// the optional aManaPledgeID (Base58 encoded), -// the optional cManaPledgeID (Base58 encoded) -blockID, err := goshimAPI.SendFaucetRequest("JaMauTaTSVBNc13edCCvBK9fZxZ1KKW5fXegT1B7N9jY", 22, "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5") - ----- or - -// invoke go get github.com/iotaledger/goshimmer/client/wallet for wallet usage -// get the given address from a wallet instance and -connector := wallet.GenericConnector(wallet.NewWebConnector("http://localhost:8080")) -addr := wallet.New(connector).ReceiveAddress() -// use String() to get base58 representation -// the proof of work difficulty, -// the optional aManaPledgeID (Base58 encoded), -// the optional cManaPledgeID (Base58 encoded) -blockID, err := goshimAPI.SendFaucetRequest(addr.Base58(), 22, "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5", "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5") -``` - -### Via the Wallet - -Currently, there is one cli-wallet that you can refer to the tutorial [Command Line Wallet -](./wallet_library.md) and two GUI wallets to use. One from the community member [Dr-Electron ElectricShimmer](https://github.com/Dr-Electron/ElectricShimmer) and another from the foundation [pollen-wallet](https://github.com/iotaledger/pollen-wallet/tree/master). You can request funds from the faucet with these two implementations. - -As for pollen-wallet, follow the instructions in [pollen-wallet](https://github.com/iotaledger/pollen-wallet/tree/master) to build and execute the wallet, or download executable file directly in [GoShimmer wallet release](https://github.com/iotaledger/pollen-wallet/releases). - -You can request funds by pressing the `Request Funds` in the wallet. - -**Note**: You need to create a wallet first before requesting funds. - -![Pollen Wallet](/img/tutorials/request_funds/pollen_wallet.png 'Pollen Wallet') - -This may take a while to receive funds: - -![Pollen Wallet requesting funds](/img/tutorials/request_funds/pollen_wallet_requesting_funds.png 'Pollen Wallet requesting funds') - -When the faucet request is successful, you can check the received balances: - -![Pollen Wallet transfer success](/img/tutorials/request_funds/pollen_wallet_transfer_success.png 'Pollen Wallet requesting transfer success') diff --git a/docs/maintain/goshimmer/0.9/docs/tutorials/send_transaction.md b/docs/maintain/goshimmer/0.9/docs/tutorials/send_transaction.md deleted file mode 100644 index 8f6628e45a9..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tutorials/send_transaction.md +++ /dev/null @@ -1,323 +0,0 @@ ---- -description: The simplest easiest way to create a transaction is to use ready solutions, such as GUI wallets. But you can also create transactions using the Go client library. -image: /img/logo/goshimmer_light.png -keywords: - - transaction - - send - - sign - - create - - seed - - funds - - post - - wallet - - web API ---- - -# How to Send a Transaction - -The simplest and easiest way for creating transaction is to use ready solution, such us GUI wallets: [pollen-wallet](https://github.com/iotaledger/pollen-wallet/tree/master) and [Dr-Electron ElectricShimmer](https://github.com/Dr-Electron/ElectricShimmer) -or command line wallet [Command Line Wallet](wallet_library.md). However, there is also an option to create a transaction directly with the Go client library, which will be main focus of this tutorial. - -For code examples you can go directly to [Code examples](send_transaction.md#code-examples). - -## Funds - -To create a transaction, firstly we need to be in possession of tokens. We can receive them from other network users or request them from the faucet. For more details on how to request funds, see [this](obtain_tokens.md) tutorial. - -## Preparing the Transaction - -A transaction is built from two parts: a transaction essence, and the unlock blocks. The transaction essence contains, among other information, the amount, the origin and where the funds should be sent. The unlock block makes sure that only the owner of the funds being transferred is allowed to successfully perform this transaction. - -### Seed - -In order to send funds we need to have a private key that can be used to prove that we own the funds and consequently unlock them. If you want to use an existing seed from one of your wallets, just use the backup seed showed during a wallet creation. With this, we can decode the string with the `base58` library and create the `seed.Seed` instance. That will allow us to retrieve the wallet addresses (`mySeed.Address()`) and the corresponding private and public keys (`mySeed.KeyPair()`). - -```Go -seedBytes, _ := base58.Decode("BoDjAh57RApeqCnoGnHXBHj6wPwmnn5hwxToKX5PfFg7") // ignoring error -mySeed := walletseed.NewSeed(seedBytes) -``` - -Another option is to generate a completely new seed and addresses. - -```Go -mySeed := walletseed.NewSeed() -fmt.Println("My secret seed:", myWallet.Seed.String()) -``` - -We can obtain the addresses from the seed by providing their index, in our example it is `0`. Later we will use the same index to retrieve the corresponding keys. - -```Go -myAddr := mySeed.Address(0) -``` - -Additionally, we should make sure that unspent outputs we want to use are already confirmed. -If we use a wallet, this information will be available along with the wallet balance. We can also use the dashboard and look up for our address in the explorer. To check the confirmation status with Go use `PostAddressUnspentOutputs()` API method to get the outputs and check their inclusion state. - -```Go -resp, _ := goshimAPI.PostAddressUnspentOutputs([]string{myAddr.Base58()}) // ignoring error -for _, output := range resp.UnspentOutputs[0].Outputs { - fmt.Println("outputID:", output.Output.OutputID.Base58, "confirmed:", output.ConfirmationState.Confirmed) -} -``` - -### Transaction Essence - -The transaction essence can be created with: -`NewTransactionEssence(version, timestamp, accessPledgeID, consensusPledgeID, inputs, outputs)` -We need to provide the following arguments: - -```Go -var version TransactionEssenceVersion -var timestamp time.Time -var accessPledgeID identity.ID -var consensusPledgeID identity.ID -var inputs ledgerstate.Inputs -var outputs ledgerstate.Outputs -``` - -#### Version and Timestamp - -We use `0` for a version and provide the current time as a timestamp of the transaction. - -```Go -version = 0 -timestamp = time.Now() -``` - -#### Mana pledge IDs - -We also need to specify the nodeID to which we want to pledge the access and consensus mana. We can use two different nodes for each type of mana. -We can retrieve an identity instance by converting base58 encoded node ID as in the following example: - -```Go -pledgeID, err := mana.IDFromStr(base58encodedNodeID) -accessPledgeID = pledgeID -consensusPledgeID = pledgeID -``` - -or discard mana by pledging it to the empty nodeID: - -```Go -accessPledgeID = identity.ID{} -consensusPledgeID = identity.ID{} -``` - -#### Inputs - -As inputs for the transaction we need to provide unspent outputs. -To get unspent outputs of the address we can use the following example. - -```Go -resp, _ := goshimAPI.GetAddressUnspentOutputs(myAddr.Base58()) // ignoring error -// iterate over unspent outputs of an address -for _, output := range resp2.Outputs { - var out ledgerstate.Output - out, _ = output.ToLedgerstateOutput() // ignoring error -} -``` - -To check the available output's balance use `Balances()` method and provide the token color. We use the default, IOTA color. - -```Go -balance, colorExist := out.Balances().Get(ledgerstate.ColorIOTA) -fmt.Println(balance, exist) -``` - -or iterate over all colors and balances: - -```Go -out.Balances().ForEach(func(color ledgerstate.Color, balance uint64) bool { - fmt.Println("Color:", color.Base58()) - fmt.Println("Balance:", balance) - return true - }) -``` - -At the end we need to wrap the selected output to match the interface of the inputs: - -```Go -inputs = ledgerstate.NewInputs(ledgerstate.NewUTXOInput(out)) -``` - -#### Outputs - -To create the most basic type of output use -`ledgerstate.NewSigLockedColoredOutput()` and provide it with a balance and destination address. Important is to provide the correct balance value. The total balance with the same color has to be equal for input and output. - -```Go -balance := ledgerstate.NewColoredBalances(map[ledgerstate.Color]uint64{ - ledgerstate.ColorIOTA: uint64(100), - }) -outputs := ledgerstate.NewOutputs(ledgerstate.NewSigLockedColoredOutput(balance, destAddr.Address())) -``` - -The same as in case of inputs we need to adapt it with `ledgerstate.NewOutputs()` before passing to the `NewTransactionEssence` function. - -### Signing a Transaction - -After preparing the transaction essence, we should sign it and put the signature to the unlock block part of the transaction. -We can retrieve private and public key pairs from the seed by providing it with indexes corresponding to the addresses that holds the unspent output that we want to use in our transaction. - -```Go -kp := *mySeed.KeyPair(0) -txEssence := NewTransactionEssence(version, timestamp, accessPledgeID, consensusPledgeID, inputs, outputs) -``` - -We can sign the transaction in two ways: with ED25519 or BLS signature. The wallet seed library uses `ed25519` package and keys, so we will use `Sign()` method along with `ledgerstate.ED25519Signature` constructor to sign the transaction essence bytes. -Next step is to create the unlock block from our signature. - -```Go -signature := ledgerstate.NewED25519Signature(kp.PublicKey, kp.PrivateKey.Sign(txEssence.Bytes()) -unlockBlock := ledgerstate.NewSignatureUnlockBlock(signature) -``` - -Putting it all together, now we are able to create transaction with previously created transaction essence and adapted unlock block. - -```Go -tx := ledgerstate.NewTransaction(txEssence, ledgerstate.UnlockBlocks{unlockBlock}) -``` - -## Sending a Transaction - -There are two web API methods that allows us to send the transaction: -`PostTransaction()` and `IssuePayload()`. The second one is a more general method that sends the attached payload. We are going to use the first one that will additionally check the transaction validity before issuing and wait with sending the response until the block is booked. -The method accepts a byte array, so we need to call `Bytes()`. -If the transaction will be booked without any problems, we should be able to get the transaction ID from the API response. - -```Go -resp, err := goshimAPI.PostTransaction(tx.Bytes()) -if err != nil { - return -} -fmt.Println("Transaction issued, txID:", resp.TransactionID) -``` - -## Code Examples - -### Create the Transaction - -Constructing a new `ledgerstate.Transaction`. - -```go -import ( - "fmt" - "net/http" - "time" - - "github.com/iotaledger/goshimmer/client" - walletseed "github.com/iotaledger/goshimmer/client/wallet/packages/seed" - "github.com/iotaledger/goshimmer/packages/ledgerstate" - "github.com/iotaledger/goshimmer/packages/mana" -) - -func buildTransaction() (tx *ledgerstate.Transaction, err error) { - // node to pledge access mana. - accessManaPledgeIDBase58 := "2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5" - accessManaPledgeID, err := mana.IDFromStr(accessManaPledgeIDBase58) - if err != nil { - return - } - - // node to pledge consensus mana. - consensusManaPledgeIDBase58 := "1HzrfXXWhaKbENGadwEnAiEKkQ2Gquo26maDNTMFvLdE3" - consensusManaPledgeID, err := mana.IDFromStr(consensusManaPledgeIDBase58) - if err != nil { - return - } - - /** - N.B to pledge mana to the node issuing the transaction, use empty pledgeIDs. - emptyID := identity.ID{} - accessManaPledgeID, consensusManaPledgeID := emptyID, emptyID - **/ - - // destination address. - destAddressBase58 := "your_base58_encoded_address" - destAddress, err := ledgerstate.AddressFromBase58EncodedString(destAddressBase58) - if err != nil { - return - } - - // output to consume. - outputIDBase58 := "your_base58_encoded_outputID" - out, err := ledgerstate.OutputIDFromBase58(outputIDBase58) - if err != nil { - return - } - inputs := ledgerstate.NewInputs(ledgerstate.NewUTXOInput(out)) - - // UTXO output. - output := ledgerstate.NewSigLockedColoredOutput(ledgerstate.NewColoredBalances(map[ledgerstate.Color]uint64{ - ledgerstate.ColorIOTA: uint64(1337), - }), destAddress) - outputs := ledgerstate.NewOutputs(output) - - // build tx essence. - txEssence := ledgerstate.NewTransactionEssence(0, time.Now(), accessManaPledgeID, consensusManaPledgeID, inputs, outputs) - - // sign. - seed := walletseed.NewSeed([]byte("your_seed")) - kp := seed.KeyPair(0) - sig := ledgerstate.NewED25519Signature(kp.PublicKey, kp.PrivateKey.Sign(txEssence.Bytes())) - unlockBlock := ledgerstate.NewSignatureUnlockBlock(sig) - - // build tx. - tx = ledgerstate.NewTransaction(txEssence, ledgerstate.UnlockBlocks{unlockBlock}) - return -} -``` - -### Post the Transaction - -There are 2 available options to post the created transaction. - -- GoShimmer client lib -- Web API - -#### Post via client lib - -```go -func postTransactionViaClientLib() (res string , err error) { - // connect to goshimmer node - goshimmerClient := client.NewGoShimmerAPI("http://127.0.0.1:8080", client.WithHTTPClient(http.Client{Timeout: 60 * time.Second})) - - // build tx from previous step - tx, err := buildTransaction() - if err != nil { - return - } - - // send the tx payload. - res, err = goshimmerClient.PostTransaction(tx.Bytes()) - if err != nil { - return - } - return -} -``` - -#### Post via web API - -First, get the transaction bytes. - -```go -// build tx from previous step -tx, err := buildTransaction() -if err != nil { - return -} -bytes := tx.Bytes() - -// print bytes -fmt.Println(string(bytes)) -``` - -Then, post the bytes. - -```shell -curl --location --request POST 'http://localhost:8080/ledgerstate/transactions' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "tx_bytes": "bytes..." -}' -``` diff --git a/docs/maintain/goshimmer/0.9/docs/tutorials/setup.md b/docs/maintain/goshimmer/0.9/docs/tutorials/setup.md deleted file mode 100644 index f698165b93a..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tutorials/setup.md +++ /dev/null @@ -1,644 +0,0 @@ ---- -description: How to set up your own GoShimmer node in the GoShimmer testnet with Docker -image: /img/tutorials/setup/dashboard.png -keywords: - - node - - set up - - docker - - http API - - tcp - - dashboard - - prometheus - - grafana ---- - -# Setting up a GoShimmer node - -This page describes how to set up your own GoShimmer node in the GoShimmer testnet with Docker. - -:::warning DISCLAIMER -**Note that there will be breaking changes frequently (approx. bi-weekly) where the entire network needs to upgrade. If you don't have time to continuously monitor and upgrade your node, then running a GoShimmer node might not be for you.** - -We want to emphasize that running a GoShimmer node requires proper knowledge in Linux and IT related topics such as networking and so on. It is not meant as a node to be run by people with little experience in the mentioned fields. **Do not plan to run any production level services on your node/network.** -::: - -## Why You Should Run a Node - -Running a node in the GoShimmer testnet helps us in the following ways: - -- It increases the amount of nodes in the network and thus lets it form a more realistic network. -- Your node will be configured to send debug log blocks to a centralized logger from which we can assess and debug research questions and occurring problems. -- Your node is configured to send metric data to a centralized analysis server where we store information such as resource consumption, traffic, and so on. This data helps us further fostering the development of GoShimmer and assessing network behavior. -- If you expose your HTTP API port, you provide an entrypoint for other people to interact with the network. - -:::note - -Any metric data is anonymous. - -::: - -## Installing GoShimmer with Docker - -#### Hardware Requirements - -:::note - -We do not provide a Docker image or binaries for ARM based systems such as Raspberry Pis. - -::: - -We recommend running GoShimmer on a x86 VPS with following minimum hardware specs: - -- 2 cores / 4 threads -- 4 GB of memory -- 40 GB of disk space - -A cheap [CX21 Hetzner instance](https://www.hetzner.de/cloud) is thereby sufficient. - -If you plan on running your GoShimmer node from home, please only do so if you know how to properly configure NAT on your router, as otherwise your node will not correctly participate in the network. - ---- - -:::info - -In the following sections we are going to use a CX21 Hetzner instance with Ubuntu 20.04 while being logged in as root - -::: - -Lets first upgrade the packages on our system: - -```shell -apt update && apt dist-upgrade -y -``` - -#### Install Docker - -Follow the official [Docker installation guide](https://docs.docker.com/engine/install/ubuntu/) - -On windows-subsystem for Linux (WSL2) it may be necessary to start docker seperately: - -``` -/etc/init.d/docker start -``` - -Note, this may not work on WSL1. - -Check whether docker is running by executing `docker ps`: - -```shell -docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -``` - -### Install Docker Compose - -Docker compose gives us the ability to define our services with `docker-compose.yml` files instead of having to define all container parameters directly on the CLI. - -Download docker compose: - -```shell -apt-get install docker-compose-plugin -``` - -Check that docker compose works: - -```shell -docker compose --version -Docker Compose version v2.10.2 -``` - -### Define the docker-compose.yml - -First, lets create a user defined bridged network. Unlike the already existing `bridge` network, the user defined one will have container name DNS resolution for containers within that network. This is useful if later we want to setup additional containers which need to speak with the GoShimmer container. - -```shell -docker network create --driver=bridge goshimmer -c726034d295c3df66803b92c71ca517a0cf0e3c65c1c6d84ee5fa34ae76cbcd4 -``` - -Lets create a folder holding our `docker-compose.yml`: - -```shell -mkdir /opt/goshimmer -``` - -Lets create a folder holding our database: - -```shell -cd /opt/goshimmer -sudo mkdir mainnetdb && sudo chown 65532:65532 mainnetdb -sudo mkdir peerdb && sudo chown 65532:65532 peerdb -``` - -Finally, lets create our `docker-compose.yml`: - -```shell -nano docker-compose.yml -``` - -and add following content: - -```yaml -version: '3.3' - -networks: - outside: - external: - name: goshimmer - -services: - goshimmer: - image: iotaledger/goshimmer:latest - container_name: goshimmer - hostname: goshimmer - stop_grace_period: 2m - volumes: - - './mainnetdb:/app/mainnetdb:rw' - - './peerdb:/app/peerdb:rw' - - '/etc/localtime:/etc/localtime:ro' - ports: - # Autopeering - - '0.0.0.0:14626:14626/udp' - # Gossip - - '0.0.0.0:14666:14666/tcp' - # HTTP API - - '0.0.0.0:8080:8080/tcp' - # Dashboard - - '0.0.0.0:8081:8081/tcp' - # pprof profiling - - '0.0.0.0:6061:6061/tcp' - environment: - - ANALYSIS_CLIENT_SERVERADDRESS=analysisentry-01.devnet.shimmer.iota.cafe:21888 - - AUTOPEERING_BINDADDRESS=0.0.0.0:14626 - - DASHBOARD_BINDADDRESS=0.0.0.0:8081 - - GOSSIP_BINDADDRESS=0.0.0.0:14666 - - WEBAPI_BINDADDRESS=0.0.0.0:8080 - - PROFILING_BINDADDRESS=0.0.0.0:6061 - - NETWORKDELAY_ORIGINPUBLICKEY=9DB3j9cWYSuEEtkvanrzqkzCQMdH1FGv3TawJdVbDxkd - - PROMETHEUS_BINDADDRESS=0.0.0.0:9311 - command: > - --skip-config=true - --autoPeering.entryNodes=2PV5487xMw5rasGBXXWeqSi4hLz7r19YBt8Y1TGAsQbj@analysisentry-01.devnet.shimmer.iota.cafe:15626,5EDH4uY78EA6wrBkHHAVBWBMDt7EcksRq6pjzipoW15B@entry-0.devnet.tanglebay.com:14646,CAB87iQZR6BjBrCgEBupQJ4gpEBgvGKKv3uuGVRBKb4n@entry-1.devnet.tanglebay.com:14646 - --node.disablePlugins=portcheck - --node.enablePlugins=remotelog,networkdelay,spammer,prometheus - --database.directory=/app/mainnetdb - --node.peerDBDirectory=/app/peerdb - --logger.level=info - --logger.disableEvents=false - --logger.remotelog.serverAddress=metrics-01.devnet.shimmer.iota.cafe:5213 - networks: - - outside -``` - -:::info - -If performance is a concern, you can also run your containers with `network_mode: "host"`, however, you must then adjust the hostnames in the configs for the corresponding containers and perhaps also create some iptable rules to block traffic from outside accessing your services directly. - -:::warning INFO - -If your home network is IPv6-only (as is common for some ISPs in a few countries like Germany), make sure your docker installation is configured to support IPv6 as this is not always the default setting. If your ports and firewalls are configured correctly and your GoShimmer node does start but does not seem to find any neighbors even after a little while, this might be the solution to your problem. Find the very short guide to enable IPv6 support for docker in the [Docker documentation](https://docs.docker.com/config/daemon/ipv6/). - -::: - -Note how we are setting up NATs for different ports: - -| Port | Functionality | Protocol | -| ----- | -------------- | -------- | -| 14626 | Autopeering | UDP | -| 14666 | Gossip | TCP | -| 8080 | HTTP API | TCP/HTTP | -| 8081 | Dashboard | TCP/HTTP | -| 6061 | pprof HTTP API | TCP/HTTP | - -It is important that the ports are correctly mapped so that the node can gain inbound neighbors. - -:::warning INFO - -If the UDP NAT mapping is not configured correctly, GoShimmer will terminate with an error block stating to check the NAT configuration - -::: - -## Running the GoShimmer Node - -Within the `/opt/goshimmer` folder where the `docker-compose.yml` resides, simply execute: - -```shell -docker compose up -d -Pulling goshimmer (iotaledger/goshimmer:0.2.0)... -... -``` - -to start the GoShimmer node. - -You should see your container running now: - -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -687f52b78cb5 iotaledger/goshimmer:0.2.0 "/run/goshimmer --sk…" 19 seconds ago Up 17 seconds 0.0.0.0:6061->6061/tcp, 0.0.0.0:8080-8081->8080-8081/tcp, 0.0.0.0:10895->10895/tcp, 0.0.0.0:14666->14666/tcp, 0.0.0.0:14626->14626/udp goshimmer -``` - -You can follow the log output of the node via: - -```shell -docker logs -f --since=1m goshimmer -``` - -### Syncing - -When the node starts for the first time, it must synchronize its state with the rest of the network. GoShimmer currently uses the Tangle Time to help nodes determine their synced status. - -#### Dashboard - -The dashboard of your GoShimmer node should be accessible via `http://:8081`. If your node is still synchronizing, you might see a higher inflow of BPS. - -[![GoShimmer Dashboard](/img/tutorials/setup/dashboard.png)](/img/tutorials/setup/dashboard.png) - -After a while, your node's dashboard should also display up to 8 neighbors: -[![GoShimmer Dashboard Neighbors](/img/tutorials/setup/dashboard_neighbors.png)](/img/tutorials/setup/dashboard_neighbors.png) - -#### HTTP API - -GoShimmer also exposes an HTTP API. To check whether that works correctly, you can access it via `http://:8080/info` which should return a JSON response in the form of: - -```json -{ - "version": "v0.6.2", - "networkVersion": 30, - "tangleTime": { - "blockID": "6ndfmfogpH9H8C9X9Fbb7Jmuf8RJHQgSjsHNPdKUUhoJ", - "time": 1621879864032595415, - "synced": true - }, - "identityID": "D9SPFofAGhA5V9QRDngc1E8qG9bTrnATmpZMdoyRiBoW", - "identityIDShort": "XBgY5DsUPng", - "publicKey": "9DB3j9cWYSuEEtkvanrzqkzCQMdH1FGv3TawJdVbDxkd", - "solidBlockCount": 74088, - "totalBlockCount": 74088, - "enabledPlugins": [ - ... - ], - "disabledPlugins": [ - ... - ], - "mana": { - "access": 1, - "accessTimestamp": "2021-05-24T20:11:05.451224937+02:00", - "consensus": 10439991680906, - "consensusTimestamp": "2021-05-24T20:11:05.451228137+02:00" - }, - "manaDelegationAddress": "1HMQic52dz3xLY2aeDXcDhX53LgbsHghdfD8eGXR1qVHy", - "mana_decay": 0.00003209, - "scheduler": { - "running": true, - "rate": "5ms", - "nodeQueueSizes": {} - }, - "rateSetter": { - "rate": 20000, - "size": 0 - } -} -``` - -## Managing the GoShimmer node lifecycle - -### Stopping the Node - -```shell -docker compose stop -``` - -### Resetting the Node - -```shell -docker compose down -``` - -### Upgrading the Node - -**Ensure that the image version in the `docker-compose.yml` is `latest`** then execute following commands: - -```shell -docker compose down -rm db/* -docker compose pull -docker compose up -d -``` - -### Following Log Output - -```shell -docker logs -f --since=1m goshimmer -``` - -### Create a log.txt - -```shell -docker logs goshimmer > log.txt -``` - -### Update Grafana Dashboard - -If you set up the Grafana dashboard for your node according to the next section "Setting up the Grafana dashboard", the following method will help you to update when a new version is released. - -You have to manually copy the new [dashboard file](https://github.com/iotaledger/goshimmer/blob/develop/tools/docker-network/grafana/dashboards/local_dashboard.json) into `/opt/goshimmer/grafana/dashboards` directory. -Supposing you are at `/opt/goshimmer/`: - -```shell -wget https://raw.githubusercontent.com/iotaledger/goshimmer/develop/tools/docker-network/grafana/dashboards/local_dashboard.json -cp local_dashboard.json grafana/dashboards -``` - -Restart the grafana container: - -```shell -docker restart grafana -``` - -## Setting up the Grafana dashboard - -### Add Prometheus and Grafana Containers to `docker-compose.yml` - -Append the following to the previously described `docker-compose.yml` file (**make sure to also copy the space in front of "prometheus"/the entire whitespace**): - -```yaml -prometheus: - image: prom/prometheus:latest - container_name: prometheus - restart: unless-stopped - ports: - - '9090:9090/tcp' - command: - - --config.file=/etc/prometheus/prometheus.yml - volumes: - - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro - - ./prometheus/data:/prometheus:rw - depends_on: - - goshimmer - networks: - - outside - -grafana: - image: grafana/grafana:latest - container_name: grafana - restart: unless-stopped - environment: - # path to provisioning definitions can only be defined as - # environment variables for grafana within docker - - GF_PATHS_PROVISIONING=/var/lib/grafana/provisioning - ports: - - '3000:3000/tcp' - user: '472' - volumes: - - ./grafana:/var/lib/grafana:rw - networks: - - outside -``` - -#### Create Prometheus config - -1. Create a `prometheus/data` directory in `/opt/goshimmer`: - -```shell -cd /opt/goshimmer -mkdir -p prometheus/data -``` - -2. Create a `prometheus.yml` in `prometheus` directory: - -```shell -nano prometheus/prometheus.yml -``` - -The content of the file should be: - -```yaml -scrape_configs: - - job_name: goshimmer_local - scrape_interval: 5s - static_configs: - - targets: - - goshimmer:9311 -``` - -3. Add permissions to `prometheus` config directory: - -```shell -chmod -R 777 prometheus -``` - -#### Create Grafana configs - -1. Create necessary config dirs in `/opt/goshimmer/`. - -```shell -mkdir -p grafana/provisioning/datasources grafana/provisioning/dashboards grafana/provisioning/notifiers grafana/provisioning/plugins -mkdir -p grafana/dashboards -``` - -2. Create a datasource configuration file in `grafana/provisioning/datasources`: - -```shell -nano grafana/provisioning/datasources/datasources.yaml -``` - -With the following content: - -```yaml -apiVersion: 1 - -datasources: - - name: Prometheus - type: prometheus - # access mode. proxy or direct (Server or Browser in the UI). Required - access: proxy - orgId: 1 - url: http://prometheus:9090 - jsonData: - graphiteVersion: '1.1' - timeInterval: '1s' - # json object of data that will be encrypted. - secureJsonData: - # database password, if used - password: - # basic auth password - basicAuthPassword: - version: 1 - # allow users to edit datasources from the UI. - editable: true -``` - -3. Create a dashboard configuration file in `grafana/provisioning/dashboards`: - -```shell -nano grafana/provisioning/dashboards/dashboards.yaml -``` - -With the following content: - -```yaml -apiVersion: 1 - -providers: - - name: 'GoShimmer Local Metrics' - orgId: 1 - folder: '' - type: file - disableDeletion: false - editable: true - updateIntervalSeconds: 10 - allowUiUpdates: true - options: - path: /var/lib/grafana/dashboards -``` - -4. Add predefined GoShimmer Local Metrics Dashboard. - -Head over to the GoShimmer repository and download [local_dashboard.json](https://github.com/iotaledger/goshimmer/blob/develop/tools/docker-network/grafana/dashboards/local_dashboard.json). - -```shell -wget https://raw.githubusercontent.com/iotaledger/goshimmer/develop/tools/docker-network/grafana/dashboards/local_dashboard.json -cp local_dashboard.json grafana/dashboards -``` - -5. Add permissions to Grafana config folder - -```shell -chmod -R 777 grafana -``` - -#### Run GoShimmer with Prometheus and Grafana - -```shell -docker compose up -d -``` - -The Grafana dashboard should be accessible at `http://:3000`. - -Default login credentials are: - -- `username`: admin -- `password`: admin - -## Installing Goshimmer by Building From Source - -### Software Requirements - -Upgrade your systems' packages by running the following command: - -```shell -apt update && apt dist-upgrade -y -``` - -#### Installing RocksDB Compression Libraries - -GoShimmer uses RocksDB as its underlying database engine. That requires installing its compression libraries. Please use the tutorial from RocksDB's Github: - -```shell -https://github.com/facebook/rocksdb/blob/main/INSTALL.md -``` - -#### GCC and G++ - -GCC and G++ are required for the compilation to work properly. You can install them by running the following command: - -```shell -sudo apt install gcc g++ -``` - -#### Installing Golang-go - -In order for the build script to work later on, we have to install the programming language Go. Which version you need to install is specified in: - -```shell -https://github.com/iotaledger/goshimmer/blob/4e3ff2d23d65ddd31053f195fb40d530ef62acf3/go.mod#L3 -``` - -Use apt to install: - -```shell -apt install golang-go -``` - -Check the go version: - -```shell -go version -``` - -If apt did not install the correct go version, use the tutorial provided by the go.dev page: - -```shell -https://go.dev/doc/install -``` - -Use `go version` to check if it successfully installed golang-go. - -### Clone the Repository - -Once you have installed the [software requirements](#software-requirements), you should clone the [GoShimmer repository](https://github.com/iotaledger/goshimmer/) into the `/opt` directory. You can do so by running the following commands: - -```shell -cd /opt -git clone https://github.com/iotaledger/goshimmer.git -``` - -### Download the Snapshot - -You can download the latest snapshot by running the following command from the goshimmer directory you created when you [cloned the repository](#clone-the-repository): - -```shell -sudo wget -O snapshot.bin https://dbfiles-goshimmer.s3.eu-central-1.amazonaws.com/snapshots/nectar/snapshot-latest.bin -``` - -### Making the Node Dashboard Accessible - -You will need to modify your goshimmer configuration file to make the Node Dashboard accessible. Below we described a method using the nano text editor, but you can use your text editor of choice. - -```shell -nano config.default.json -``` - -In the config file where it says **dashboard**, change the **bindAddress** from `"127.0.0.1:8081"` to `"0.0.0.0:8081"`. - -Rename the file to config.json and save your changes. - -:::note -If you do not save the file as `config.json`, the node dashboard will not be accessible through your browser. -::: - -### Run the GoShimmer Node - -You can now run the build script for the goshimmer binary with the following command: - -```shell -./scripts/build.sh -``` - -:::tip -You can use the `screen` command to keep the node running if you terminate your current ssh session. -::: - -You can now run the GoShimmer binary to start your node: - -```shell -./goshimmer -``` - -You can "detach" from the GoShimmer screen by pressing your `CTRL+A+D` keys. This will remove the GoShimmer window, but it will still be running. - -You need the number from the start of the window name to reattach it. If you forget it, you can always use the `-ls` (list) option, as shown below, to get a list of the detached windows: - -```shell -screen -ls -``` - -You can use the -r (reattach) option and the number of the session to reattach it, like so: - -```shell -screen -r (your session id) -``` - -### Stopping the Node - -To stop a screen session and your GoShimmer node press `CTRL+A+K` inside the running window. This will stop your screen session. diff --git a/docs/maintain/goshimmer/0.9/docs/tutorials/static_identity.md b/docs/maintain/goshimmer/0.9/docs/tutorials/static_identity.md deleted file mode 100644 index f957fbaf2c4..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tutorials/static_identity.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: Create a static GoShimmer identity using a random 32byte autopeering seed, open ssl or rand-seed. -image: /img/logo/goshimmer_light.png -keywords: - - seed - - static - - identity - - tools - - base64 ---- - -# Create a Static Identity - -To create a static GoShimmer identity, you will need to generate a random 32 byte seed. You can use `openssl` or the `rand-seed` tool we provide under the GoShimmer folder `tools/rand-seed`. -For example, by running: - -- `openssl rand -base64 32`: generates a random 32 byte sequence encoded in base64. The output should look like: `gP0uRLhwBG2yJJmnLySX4S4R5G250Z3dbN9yBR6VSyY=` -- `go run main.go` under the GoShimmer folder `tools/rand-seed`: generates a random 32 byte sequence encoded in both base64 and base58. The output is written into the file `random-seed.txt` and should look like: - -``` -base64:nQW9MhNSLpIqBUiZe90XI320g680zxFoB1UIK09Acus= -base58:BZx5tDLymckUV5wiswXJtajgQrBEzTBBRR4uGfr1YNGS -``` - -You can now copy one of that strings (together with the encoding type prefix) and paste it into the GoShimmer `config.json` under the `node` section: - -```json -"node": { - "seed":"base64:gP0uRLhwBG2yJJmnLySX4S4R5G250Z3dbN9yBR6VSyY=", - "disablePlugins": [], - "enablePlugins": [] -}, -``` - -Or if you are using docker and prefer to set this with a command, you can define the same by changing the GoShimmer docker-compose.yml: - -```yaml -goshimmer: - network_mode: host - image: iotaledger/goshimmer - build: - context: ./ - dockerfile: Dockerfile - container_name: iota_goshimmer - command: > - --node.enablePlugins=prometheus - --node.seed="base64:gP0uRLhwBG2yJJmnLySX4S4R5G250Z3dbN9yBR6VSyY=" - # Mount volumes: - # make sure to give read/write access to the folder ./mainnetdb (e.g., chmod -R 777 ./mainnetdb) - # optionally, you can mount a config.json into the container - volumes: - - ./mainnetdb/:/app/mainnetdb/:rw - - ./config.json:/app/config.json:ro - # Expose ports: - # gossip: - "14666:14666/tcp" - # autoPeering: - "14626:14626/udp" - # webAPI: - "8080:8080/tcp" - # dashboard: - "8081:8081/tcp" - ports: - - "14666:14666/tcp" - - "14626:14626/udp" - - "9311:9311/tcp" # prometheus exporter - - "8080:8080/tcp" # webApi - - "8081:8081/tcp" # dashboard -``` diff --git a/docs/maintain/goshimmer/0.9/docs/tutorials/wallet_library.md b/docs/maintain/goshimmer/0.9/docs/tutorials/wallet_library.md deleted file mode 100644 index 18e2b496a97..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/tutorials/wallet_library.md +++ /dev/null @@ -1,1182 +0,0 @@ ---- -description: The main features in the wallet are requesting tokens from the faucet, sending tokens or assets to addresses, creating digital assets, creating-, transferring- or destroying NFTs, managing NFT owned tokens or assets and delegating tokens or digital assets -image: /img/tutorials/wallet_library/created_nft.png -keywords: - - wallet - - bash - - devnet - - tokens - - digital assets - - creating - - NFT - - managing - - requesting ---- - -# Command Line Wallet - -This section describes how to use the command line wallet (cli-wallet). - -GoShimmer ships with a basic (golang) wallet library so that developers and testers can use it to move tokens around, create digital assets, NFTs or delegate funds. - -The cli-wallet is built by using this wallet library to demonstrate the capabilities of the protocol. - -The main features in the wallet are: - -- [Requesting tokens from the faucet](#requesting-tokens) -- [Sending tokens or assets to addresses](#sending-tokens-and-assets) -- [Creating digital assets](#creating-digital-assets) -- [Creating](#creating-nfts), [transferring](#transferring-nfts) or [destroying](#destroying-nfts) Non-Fungible Tokens (NFTs) -- [Managing NFT owned tokens or assets](#managing-nft-owned-assets) -- [Delegating tokens or digital assets](#delegating-assets) - -:::info -The command line wallet and this tutorial are aimed at a developer audience, you should least be familiar with using a terminal to use it. -::: - -## Initializing the Wallet - -1. Download the latest cli-wallet for the system of your choice from the [GoShimmer GitHub Releases](https://github.com/iotaledger/goshimmer/releases) page. -2. If needed, make the downloaded binary executable. If you are using linux you can run: - - ```shell - chmod +x - ``` - -:::info -For simplicity, we renamed the binary to `cli-wallet` in this tutorial. -::: - -You will need to initialize the wallet the first time you start it. This involves generating a secret seed that is used to generate addresses and sign transactions. The wallet will automatically persist the seed in `wallet.dat` after the first run. - -You can configure the wallet by creating a `config.json` file in the directory of the executable: - -```json -{ - "WebAPI": "http://127.0.0.1:8080", - "basicAuth": { - "enabled": false, - "username": "goshimmer", - "password": "goshimmer" - }, - "reuse_addresses": false, - "faucetPowDifficulty": 25, - "assetRegistryNetwork": "nectar" -} -``` - -- The `WebAPI` tells the wallet which node API to communicate with. Set it to the url of a node API. -- If the node has basic authentication enabled, you may configure your wallet with a username and password. -- The `resuse_addresses` option specifies if the wallet should treat addresses as reusable, or whether it should try to spend from any wallet address only once. -- The `faucetPowDifficulty` option defines the difficulty of the faucet request POW the wallet should do. -- The `assetRegistryNetwork` option defines which asset registry network to use for pushing/fetching asset metadata to/from the registry. By default, the wallet chooses the `nectar` network. - -You can initialize your wallet by running the `init` command: - -```shell -./cli-wallet init -``` - -If successful, you'll see the generated seed (encoded in base58) on your screen: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 -GENERATING NEW WALLET ... [DONE] - -================================================================ -!!! PLEASE CREATE A BACKUP OF YOUR SEED !!! -!!! !!! -!!! ExzYy6wS2k59dPh19Q9JiAf6z1jyDq1hieDEMmbUzkbE !!! -!!! !!! -!!! PLEASE CREATE A BACKUP OF YOUR SEED !!! -================================================================ - -CREATING WALLET STATE FILE (wallet.dat) ... [DONE] -``` - -## Requesting Tokens - -You can request testnet tokens by executing the `request-funds` command: - -```shell -./cli-wallet request-funds -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Requesting funds from faucet ... [PERFORMING POW] (this can take a while) -Requesting funds from faucet ... [DONE] -``` - -Once you have executed the `request-funds` command, you can check the balance of your wallet by running the `balance` command: - -```shell -./cli-wallet balance -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[PEND] 1000000 I IOTA IOTA -``` - -Until the network has confirmed the transaction, the status of your token balance will be pending (`[PEND]`). Pending token balances can not be spent, wait until status becomes `[ OK ]`. You can call the `balance` command again to check for status changes. - -## Creating Digital Assets - -Digital assets are tokens with a special attribute, namely a color. A color is a sequence of 32 bytes represented as a base58 encoded string. The color of a token is derived from the unique transaction that created the asset. Therefore, it is not possible to create assets with the same color in a subsequent transaction. - -The transaction "minting" the assets can specify the amount of tokens to be created with the unique color. - -You can create assets with the cli-wallet executing the `create-asset` command: - -```shell -./cli-wallet create-asset -name MyUniqueToken -symbol MUT -amount 1000 -``` - -- The `name` flag specifies the name of the asset. -- The `symbol` flag specifies the symbol of the asset. -- The `amount` flag specifies the amount of asset tokens to create. - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Creating 1000 tokens with the color 'HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn' ... [DONE] -``` - -If you execute the `balance` command shortly after, you will notice that the wallet balances have changed: - -```shell -./cli-wallet balance -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[PEND] 999000 IOTA IOTA -[PEND] 1000 HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken -``` - -To create `myUniqueToken`, the wallet has tagged 1000 IOTA tokens with the color `HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn`. The IOTA balance has decreased, but we have received assets in return for the used IOTAs. The created asset tokens behave exactly like other IOTA tokens, they can be transferred without fees to any address. - -### Fetching Information of a Digital Asset - -In the [previous example](#creating-digital-assets), we have created a digital asset called `MyUniqueToken`. The wallet knows it's name, symbol and initial supply as we provided this input while creating it. The network however does not store this information, it only knows its unique identifier, the assetID (or color). - -To help others discover an asset's attributes, when you create an asset the `cli-wallet` will automatically send this information to a metadata registry service. - -When you receive a locally unknown asset to your wallet, it queries this registry service for the metadata. You can also query this metadata yourself by running the `asset-info` command in the wallet: - -```shell -./cli-wallet asset-info -id HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Asset Info - -PROPERTY VALUE ------------------------ -------------------------------------------- -Name MyUniqueToken -Symbol MUT -AssetID(color) HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn -Initial Supply 1000 -Creating Transaction G7ergf7YzVUSqQMS69jGexYtihbhpsvELEsPHWToYtKj -Network test -``` - -## Sending Tokens and Assets - -Funds in IOTA are tied to addresses. Only the owner of the private key behind the address is able to spend (move) the funds, let them be IOTA tokens or digital assets. In previous sections, you have [requested funds](#requesting-tokens) from the faucet, which actually sent -these tokens to an address provided by your wallet. When you created `MyUniqueToken`, the wallet internally generated a new address -to hold the assets. You may examine the addresses used by the wallet by executing the `address -list` command: - -```shell -./cli-wallet address -list -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -INDEX ADDRESS SPENT ------ -------------------------------------------- ----- -0 19ZD79gRvVzXpQV4QfpY5gefqgrBA4gp11weeyqbY89FK true -1 1BbywJFGFtDFXpZidmjN39d8cVWUskT2MhbFqSrmVs3qi false -``` - -Consequently, when you wish to send tokens, you need to provide an address where to send the tokens to. - -### Simple Send - -The `send-funds` command can be used to send IOTA or colored tokens to any address. You can run the following command to see what options you have: - -```shell -./cli-wallet send-funds -help -``` - -Expected output: - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet send-funds [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -amount int - the amount of tokens that are supposed to be sent - -color string - (optional) color of the tokens to transfer (default "IOTA") - -consensus-mana-id string - node ID to pledge consensus mana to - -dest-addr string - destination address for the transfer - -fallb-addr string - (optional) fallback address that can claim back the (unspent) sent funds after fallback deadline - -fallb-deadline int - (optional) unix timestamp after which only the fallback address can claim the funds back - -help - show this help screen - -lock-until int - (optional) unix timestamp until which time the sent funds are locked from spending -``` - -You can ignore the mana pledge options, as your wallet can derive pledge IDs automatically. The most important options are: - -- `amount` is the amount of token you want to send. -- `color` is an optional flag to send digital assets with a certain color. When not specified, it defaults to the color of the IOTA token. -- `dest-addr` is the destination address for the transfer. You will have to set this to the address you wish to transfer tokens to. -- `fallb-addr` and `fallb-deadline` are optional flags to initiate a conditional transfer. A conditional transfer has a fallback deadline set, after which, only the `fallback-address` can unlock the funds. Before the fallback deadline, it is only the receiver of the funds who can spend the funds. Therefore, conditional transfers have to be claimed by the receiving party before the deadline expires. -- `lock-until` is an optional flag for a simple time locking mechanism. Before the time lock expires, the funds are locked and can not be spent by the owner. - -To send 500 `MyUniqueTokens` to the address `1E5Q82XTF5QGyC598br9oCj71cREyjD1CGUk2gmaJaFQt`, you have to tell the wallet that `MyUniqueTokens` are of color `HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn`, as shown in the following command: - -```shell -./cli-wallet send-funds -amount 500 -color HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn -dest-addr 1E5Q82XTF5QGyC598br9oCj71cREyjD1CGUk2gmaJaFQt -``` - -### Time Locked Sending - -If you don't want the receiver to be able to spend the tokens you have sent right away, you should execute the `send-funds` command with the `-lock-until` flag. The `-lock-until` flag expects a unix timestamp. For example, on linux, you can get a unix timestamp 7 days in the future by executing: - -```shell -date -d "+7 days" +%s -1621426409 -``` - -Once you have a unix timestamp, you can execute the transfer by running: - -```shell -./cli-wallet send-funds -amount 500 -dest-addr 1E5Q82XTF5QGyC598br9oCj71cREyjD1CGUk2gmaJaFQt -lock-until 1621426409 -``` - -### Conditional Sending - -You have the option to specify a fallback unlocking mechanism on the tokens that you send. If the recipient doesn't claim the funds before the fallback deadline you specify expires, the fallback address can essentially take back the tokens. - -If you want to send some IOTAs, but if the receiver doesn't claim them for a week you want to -have them back you should use the `-fallb-addr` and `-fallb-dealine` flags when executing the `send-funds` command. - -If you want to use your own wallet as fallback address, you can get your wallet's receive by running `./cli-wallet address -receive`: - -```shell -./cli-wallet address -receive -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Latest Receive Address: 17KoEZbWoBLRjBsb6oSyrSKVVqd7DVdHUWpxfBFbHaMSm -``` - -Once you have the desired fallback address, you can execute a transfer with a fallback address by running: - -```shell -./cli-wallet send-funds -amount 500 -dest-addr 1E5Q82XTF5QGyC598br9oCj71cREyjD1CGUk2gmaJaFQt \ --fallb-addr 17KoEZbWoBLRjBsb6oSyrSKVVqd7DVdHUWpxfBFbHaMSm --fallb-deadline 1621426409 -``` - -When you receive conditional funds, they will be displayed on the balance page in the wallet: - -```shell -./cli-wallet balance -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[ OK ] 500 I IOTA IOTA - -Conditional Token Balances - execute `claim-conditional` command to sweep these funds into wallet - -STATUS OWNED UNTIL BALANCE COLOR TOKEN NAME ------- ------------------------------ --------------- -------------------------------------------- ------------------------- -[ OK ] 2021-05-19 14:13:29 +0200 CEST 500 I IOTA IOTA -``` - -As the output suggests, you need to execute the `claim-conditional` command to claim these funds: - -```shell -./cli-wallet claim-conditional -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Claiming conditionally owned funds... [DONE] -``` - -After claiming the funds, you can see your balance has been updated by running: - -```shell -./cli-wallet balance -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[PEND] 500 IOTA IOTA -``` - -## Creating NFTs - -NFTs are non-fungible tokens that have unique properties. In IOTA, NFTs are represented as non-forkable, uniquely identifiable outputs. When you spend an NFT, the transaction will only be considered valid if it satisfies the constraints defined in the outputs. For example, the immutable data attached to the output can not change. Therefore, we can create an NFT and record immutable metadata in its output. - -You can list the option for the `create-nft` command by running `./cli-wallet create-nft -help`: - -```shell -./cli-wallet create-nft -help -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet create-nft [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -color string - color of the tokens that should be deposited into the nft upon creation (on top of the minimum required) (default "IOTA") - -consensus-mana-id string - node ID to pledge consensus mana to - -help - show this help screen - -immutable-data string - path to the file containing the immutable data that shall be attached to the nft - -initial-amount int - the amount of tokens that should be deposited into the nft upon creation (on top of the minimum required) -``` - -None of the flags are strictly required to mint an NFT, so we could just execute the command as it is. However, in most cases, you will want to attach immutable metadata to it, which is only possible during creation. Each NFT must have some IOTAs backing it (locked into its output) to prevent bloating the ledger database. Currently, the minimum requirement is 100 IOTA tokens, but bear in mind that it might change in the future. On top of the minimum required amount IOTAs, you can lock any additional funds into the NFT. You can use the `-initial-amount` and `-color` flags to do so. - -To attach immutable data to the NFT, you should define a path to a file that holds the metadata. The wallet will read the byte content of the file, and attach it to the NFT. Currently, the maximum allowed metadata file size is 4 kilobytes. You should use the `-immutable-data` flag to specify a path to a file that holds the metadata. - -For example, you can create a `nft_metadata.json` file in the directory of the cli-wallet with the following content: - -```json -{ - "title": "Asset Metadata", - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "MyFirstNFT" - }, - "description": { - "type": "string", - "description": "My very first NFT that has this metadata attached to it." - }, - "image": { - "type": "string", - "description": "" - } - } -} -``` - -The above JSON file is just a template, you can define any binary data that fits the size limit to be attached to the NFT. - -After you have created your data file, you can create the NFT by executing: - -```shell -./cli-wallet create-nft -immutable-data nft_metadata.json -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Created NFT with ID: gSfeBrWp1HwDLwSL7rt1qEMM59YBFZ4iBgAqHuqaQHo5 -Creating NFT ... [DONE] -``` - -The created NFT's unique identifier is `gSfeBrWp1HwDLwSL7rt1qEMM59YBFZ4iBgAqHuqaQHo5`, which is also a valid IOTA address. Navigate to a node dashboard/explorer and search for the address. On a node dashboard, you would see something like this: - -[![Created NFT Example](/img/tutorials/wallet_library/created_nft.png)](/img/tutorials/wallet_library/created_nft.png) - -The immutable data field contains the attached binary metadata (encoded in base64 in the node dashboard). - -The NFT is also displayed on the balance page of the cli-wallet: - -```shell -./cli-wallet balance -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[ OK ] 500 MUT HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken -[ OK ] 996200 I IOTA IOTA - -Owned NFTs (Governance Controlled Aliases) - -STATUS NFT ID (ALIAS ID) BALANCE COLOR TOKEN NAME ------- -------------------------------------------- --------------- -------------------------------------------- ------------------------- -[ OK ] gSfeBrWp1HwDLwSL7rt1qEMM59YBFZ4iBgAqHuqaQHo5 100 IOTA IOTA -``` - -## Transferring NFTs - -You can use the `transfer-nft` command to send NFT. You can run the `transfer-nft` command with the `-help` flagTo view the available options. - -```shell -./cli-wallet transfer-nft -help -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet transfer-nft [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -consensus-mana-id string - node ID to pledge consensus mana to - -dest-addr string - destination address for the transfer - -help - show this help screen - -id string - unique identifier of the nft that should be transferred - -reset-delegation - defines whether to reset the delegation status of the alias being transferred - -reset-state-addr - defines whether to set the state address to dest-addr -``` - -There are 2 mandatory flags that you will need to provide for a valid transfer: `-id` and `-dest-addr`. `-id` is unique identifier of the NFT that you wish to transfer, `-dest-addr` is the destination address. - -The following command will send some of the `MyUniqueTokens` created in the previous example to `1E5Q82XTF5QGyC598br9oCj71cREyjD1CGUk2gmaJaFQt`: - -```shell -./cli-wallet transfer-nft -id gSfeBrWp1HwDLwSL7rt1qEMM59YBFZ4iBgAqHuqaQHo5 -dest-addr 1E5Q82XTF5QGyC598br9oCj71cREyjD1CGUk2gmaJaFQt -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Transferring NFT... [DONE] -``` - -## Destroying NFTs - -The owner of an NFT has the ability to destroy it. When an NFT is destroyed, all of its balance will be transferred to the NFT's current owner, and the alias output representing the NFT will be spent without creating a corresponding next alias output. - -You can use the `destroy-nft` command to destroy a NFT. You can run the `destroy-nft` command with the `-help` flag to view the available options. - -```shell -./cli-wallet destroy-nft -help -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet destroy-nft [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -consensus-mana-id string - node ID to pledge consensus mana to - -help - show this help screen - -id string - unique identifier of the nft that should be destroyed - -``` - -The following example shows how to create a NFT, and destroy it right after. - -1. Create the NFT - - ```shell - ./cli-wallet create-nft - ``` - - Expected output: - - ``` - IOTA 2.0 DevNet CLI-Wallet 0.2 - - Created NFT with ID: bdrvyKvaE6CZUEbdRDK57oBCRb2SLUyE8padFGxrV3zg - Creating NFT ... [DONE] - ``` - -2. Check the balance page shows that the NFT status is `OK`: - - ```shell - ./cli-wallet balance - ``` - - Expected output: - - ```shell - IOTA 2.0 DevNet CLI-Wallet 0.2 - - Available Token Balances - - STATUS BALANCE COLOR TOKEN NAME - ------ --------------- -------------------------------------------- ------------------------- - [ OK ] 500 MUT HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken - [ OK ] 996100 I IOTA IOTA - - Owned NFTs (Governance Controlled Aliases) - - STATUS NFT ID (ALIAS ID) BALANCE COLOR TOKEN NAME - ------ -------------------------------------------- --------------- -------------------------------------------- ------------------------- - [ OK ] bdrvyKvaE6CZUEbdRDK57oBCRb2SLUyE8padFGxrV3zg 100 IOTA IOTA - ``` - -3. Destroy the nft: - - ```shell - ./cli-wallet destroy-nft -id bdrvyKvaE6CZUEbdRDK57oBCRb2SLUyE8padFGxrV3zg - ``` - - Expected output: - - ``` - IOTA 2.0 DevNet CLI-Wallet 0.2 - - Destroying NFT... [DONE] - ``` - -## Managing NFT Owned Assets - -An NFT is not only a valid IOTA address via its NFT ID, but it is also stored as an output in the ledger. Therefore, the NFT is not only capable of receiving funds to its address, but the owner can directly manage the funds held in the NFT output. - -- The owner may deposit assets into an NFT, or withdraw assets from there, essentially using it as a standalone wallet. -- Other users in the network can send any asset to the NFT address, that will be owned by the NFT. The owner might choose to deposit those funds into the NFT, or sweep them into their own wallet. - -### Deposit Assets Into Owned NFT - -You can use the `deposit-to-nft` command to transfer tokens to a NFT. You can run the `deposit-to-nft` command with the `-help` flag to view the available options. - -```shell -./cli-wallet deposit-to-nft -help -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet deposit-to-nft [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -amount int - the amount of tokens that are supposed to be deposited - -color string - color of funds to deposit (default "IOTA") - -consensus-mana-id string - node ID to pledge consensus mana to - -help - show this help screen - -id string - unique identifier of the nft to deposit to -``` - -To deposit some previously created `MyUniqueTokens` into the NFT, we need to specify the following flags: - -- `-id` the NFT ID to deposit to. -- `-amount` amount of assets to deposit. -- `-color` asset color to deposit. - -You can check your balance before the transfer by running: - -```shell -./cli-wallet balance -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[ OK ] 996300 I IOTA IOTA -[ OK ] 500 MUT HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken - -Owned NFTs (Governance Controlled Aliases) - -STATUS NFT ID (ALIAS ID) BALANCE COLOR TOKEN NAME ------- -------------------------------------------- --------------- -------------------------------------------- ------------------------- -[ OK ] f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg 100 IOTA IOTA -``` - -You can run the following command to deposit 500 `MyUniqueTokens` to the nft with id `f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg`: - -```shell -./cli-wallet deposit-to-nft -id f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg -amount 500 -color HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 -Depositing funds into NFT ... [DONE] -``` - -After the transfer is successful, you can recheck your balance, and it should show your NFT now has 500 `MyUniqueTokens`: - -```shell -./cli-wallet balance -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[ OK ] 996300 I IOTA IOTA - -Owned NFTs (Governance Controlled Aliases) - -STATUS NFT ID (ALIAS ID) BALANCE COLOR TOKEN NAME ------- -------------------------------------------- --------------- -------------------------------------------- ------------------------- -[ OK ] f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg 100 IOTA IOTA - 500 HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken -``` - -### Withdrawing Assets From NFT - -You can use the `withdraw-from-nft` command to withdraw tokens from a NFT. If the withdrawal leaves less than the minimum required funds in the NFT, the transaction will fail. You can run the `withdraw-from-nft` command with the `-help` flag to view the available options. - -```shell -./cli-wallet withdraw-from-nft -help -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet withdraw-from-nft [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -amount int - the amount of tokens that are supposed to be withdrew - -color string - color of funds to withdraw (default "IOTA") - -consensus-mana-id string - node ID to pledge consensus mana to - -dest-addr string - (optional) address to send the withdrew tokens to - -help - show this help screen - -id string - unique identifier of the nft to withdraw from -``` - -You can execute the following command to withdraw the previously deposited `MyUniqueTokens`: - -```shell -./cli-wallet withdraw-from-nft -id f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg -amount 500 -color HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Withdrawing funds from NFT... [DONE] -``` - -Once the transaction has been confirmed, you will see the updated balance: - -```shell -./cli-wallet balance -``` - -Expected output: - -```shell -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[ OK ] 500 MUT HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken -[ OK ] 996300 I IOTA IOTA - -Owned NFTs (Governance Controlled Aliases) - -STATUS NFT ID (ALIAS ID) BALANCE COLOR TOKEN NAME ------- -------------------------------------------- --------------- -------------------------------------------- ------------------------- -[ OK ] f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg 100 IOTA IOTA -``` - -### Sweep NFT Owned Funds - -You can use the `sweep-nft-owned-fun` command to collect all funds currently stored in a NFT. You can run the `sweep-nft-owned-fun` command with the `-help` flag to view the available options. - -```shell -./cli-wallet sweep-nft-owned-funds -help -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet sweep-nft-owned-funds [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -consensus-mana-id string - node ID to pledge consensus mana to - -help - show this help screen - -id string - unique identifier of the nft that should be checked for outputs with funds - -to string - optional address where to sweep -``` - -The only mandatory flag is `-id`, as it specifies which NFT ID (address) to scan for funds. - -In the following example, a sender has sent token to our NFT `f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg` with a normal `send-funds` command: - -```shell -./senders-wallet send-funds -amount 1000000 -dest-addr f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg -``` - -You can execute the `sweep-nft-owned-funds` command to transfer these funds into our wallet: - -```shell -./cli-wallet sweep-nft-owned-funds -id f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg -``` - -Expected output: - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Sweeping NFT owned funds... [DONE] -``` - -If you check the balance, it should be updated. So the wallet contains 1 IOTA more: - -```shell -./cli-wallet balance -``` - -Expected output: - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[ OK ] 500 MUT HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken -[ OK ] 1996300 I IOTA IOTA - -Owned NFTs (Governance Controlled Aliases) - -STATUS NFT ID (ALIAS ID) BALANCE COLOR TOKEN NAME ------- -------------------------------------------- --------------- -------------------------------------------- ------------------------- -[ OK ] f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg 100 IOTA IOTA -``` - -### Sweep NFT Owned NFTs - -NFTs can own other NFTs, that in turn can own other NFTs and so on... wow, NFTception! -Let's say your friend created an NFT, and transferred it to your NFT's ID `f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg`. - -```shell -./your-friends-wallet create-nft -``` - -Expected output: - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Created NFT with ID: faf9tkdBfcTv2AgPm3Zt8duX4iUGKjqbEyrdBYsUb2hi -Creating NFT ... [DONE] -``` - -``` -./your-friends-wallet transfer-nft -id faf9tkdBfcTv2AgPm3Zt8duX4iUGKjqbEyrdBYsUb2hi -dest-addr f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg -``` - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Transferring NFT... [DONE] -``` - -Your NFT `f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg` now owns NFT `faf9tkdBfcTv2AgPm3Zt8duX4iUGKjqbEyrdBYsUb2hi`. -To sweep the owned NFT into your wallet, execute the `sweep-nft-owned-nft` command: - -```shell -./cli-wallet sweep-nft-owned-nfts -help -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet sweep-nft-owned-nfts [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -consensus-mana-id string - node ID to pledge consensus mana to - -help - show this help screen - -id string - unique identifier of the nft that should be checked for owning other nfts - -to string - optional address where to sweep -``` - -All you need to specify is the `-id` of your NFT that you would like to check for owned NFTs: - -```shell -./cli-wallet sweep-nft-owned-nfts -id f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg -``` - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Swept NFT faf9tkdBfcTv2AgPm3Zt8duX4iUGKjqbEyrdBYsUb2hi into the wallet -Sweeping NFT owned NFTs... [DONE] -``` - -That's it, your wallet owns `faf9tkdBfcTv2AgPm3Zt8duX4iUGKjqbEyrdBYsUb2hi` now. If this NFT owned other funds or NFTs, -you would be able to sweep them into your wallet just like you did for `f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg`. - -```shell -./cli-wallet balance -``` - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[ OK ] 1996300 I IOTA IOTA -[ OK ] 500 MUT HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken - -Owned NFTs (Governance Controlled Aliases) - -STATUS NFT ID (ALIAS ID) BALANCE COLOR TOKEN NAME ------- -------------------------------------------- --------------- -------------------------------------------- ------------------------- -[ OK ] f1BW8jcdDn3staviCVbVz54NqVwsshb5gpNLqY6Rrgrg 100 IOTA IOTA -[ OK ] faf9tkdBfcTv2AgPm3Zt8duX4iUGKjqbEyrdBYsUb2hi 100 IOTA IOTA -``` - -## Delegating Assets - -The primary use case of fund delegation in Coordicide is to enable refreshing a node's access mana without requiring -the use of a master key that has full control over the funds. A delegated key can not spend the funds, but can -"refresh" the outputs holding the funds in a transaction that can pledge mana to any arbitrary nodes. - -A token holder can therefore keep their funds in secure cold storage, while delegating them to a node or third party -to utilize the mana generated by the funds. Assuming there is demand for access mana in the network, the holder of the -assets can then sell the generated mana to realize return on their assets. - -Delegating funds via the cli-wallet is rather simple: you just need to execute the `delegate-funds` command. By default, -the cli-wallet will delegate funds to the node that the wallet is connected to, unless you specify a delegation -address via the `-del-addr` flag. -specify a valid IOTA address where to delegate to. - -```shell -./cli-wallet delegate-funds -help -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet delegate-funds [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -amount int - the amount of tokens that should be delegated - -color string - color of the tokens that should delegated (default "IOTA") - -consensus-mana-id string - node ID to pledge consensus mana to - -del-addr string - address to delegate funds to. when omitted, wallet delegates to the node it is connected to - -help - show this help screen - -until int - unix timestamp until which the delegated funds are timelocked -``` - -- Mandatory parameter is only the `-amount`. -- Use the `-del-addr` flag to delegate to arbitrary address. -- You may specify a delegation deadline via the `-until` flag. If this is set, the delegated party can not unlock - the funds for refreshing mana after the deadline expired, but the neither can the owner reclaim the funds before - that. If the `-until` flag is omitted, the delegation is open-ended, the owner can reclaim the delegated funds at - any time. -- You can specify a certain asset to be delegated (`-color`), default is IOTA. - -Let's delegate some funds to an address provided by a node in the network, `1EqJf5K1LJ6bVMCrxxxdZ6VNYoBTvEoXgxnbLJe7aqajc`: - -```shell -./cli-wallet delegate-funds -amount 1000000 -del-addr 1EqJf5K1LJ6bVMCrxxxdZ6VNYoBTvEoXgxnbLJe7aqajc -``` - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Delegating to address 1EqJf5K1LJ6bVMCrxxxdZ6VNYoBTvEoXgxnbLJe7aqajc -Delegation ID is: tGoTKjt2y277ssKax9stsZXfLGdf8bPj3TZFaUDcAEwK -Delegating funds... [DONE] -``` - -If we omitted the `-del-addr` flag and its value, the wallet would have asked the node it is connected to, to provide -a delegation address. You can get this delegation address yourself as well by running the `server-status` command in -the wallet, or querying the `/info` endpoint of a node through the webapi. - -```shell -./cli-wallet server-status -``` - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Server ID: 2GtxMQD94KvDH1SJPJV7icxofkyV1njuUZKtsqKmtux5 -Server Synced: true -Server Version: v0.5.9 -Delegation Address: 1HG9Z5NSiWTmT1HG65JLmn1jxQj7xUcVppKKi2vHAZLmr -``` - -By running the `balance` command, we can see the delegated funds: - -```shell -./cli-wallet balance -``` - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[ OK ] 500 MUT HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken -[ OK ] 996500 I IOTA IOTA - -Delegated Funds - -STATUS DELEGATION ID (ALIAS ID) BALANCE COLOR TOKEN NAME ------- -------------------------------------------- --------------- -------------------------------------------- ------------------------- -[ OK ] tGoTKjt2y277ssKax9stsZXfLGdf8bPj3TZFaUDcAEwK 1000000 IOTA IOTA -``` - -To be able to reclaim the delegated funds, we will need the delegation ID of the delegated funds. - -## Reclaiming Delegated Assets - -To reclaim delegated funds, you have to tell the cli-wallet the delegation ID that is displayed on the balance page. -Use the `reclaim-delegated` command once you got the delegation ID: - -```shell - ./cli-wallet reclaim-delegated -help -IOTA 2.0 DevNet CLI-Wallet 0.2 - -USAGE: - cli-wallet reclaim-delegated [OPTIONS] - -OPTIONS: - -access-mana-id string - node ID to pledge access mana to - -consensus-mana-id string - node ID to pledge consensus mana to - -help - show this help screen - -id string - delegation ID that should be reclaimed - -to-addr string - optional address where to send reclaimed funds, wallet receive address by default -``` - -To reclaim the funds delegated in the previous section, simply run: - -```shell -./cli-wallet reclaim-delegated -id tGoTKjt2y277ssKax9stsZXfLGdf8bPj3TZFaUDcAEwK -``` - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Reclaimed delegation ID is: tGoTKjt2y277ssKax9stsZXfLGdf8bPj3TZFaUDcAEwK -Reclaiming delegated fund... [DONE] -``` - -The balance should appear in the `Available Balances` section of the balance page: - -```shell -./cli-wallet balance -``` - -``` -IOTA 2.0 DevNet CLI-Wallet 0.2 - -Available Token Balances - -STATUS BALANCE COLOR TOKEN NAME ------- --------------- -------------------------------------------- ------------------------- -[ OK ] 500 MUT HJdkZkn6MKda9fNuXFQZ8Dzdzu1wvuSUQp8QX1AMH4wn MyUniqueToken -[ OK ] 1996500 I IOTA IOTA -``` - -## Common Flags - -As you may have noticed, there are some universal flags in many commands, namely: - -- `-help` that brings up the command usage and help information, -- `access-mana-id` that is the nodeID to which the transaction should pledge access mana to, and -- `consensus-mana-id` that is the nodeID to which the transaction should pledge consensus mana to. - -The latter teo are determined by default by your wallet depending on which node you connect it to. However, if that node -doesn't filter user submitted transactions based on the mana pledge IDs, you are free to define which node to pledge -mana to. - -## Command Reference - -### balance - -Show the balances held by this wallet. - -### send-funds - -Initiate a transfer of tokens or assets (funds). - -### consolidate-funds - -Consolidate all available funds to one wallet address. - -### claim-conditional - -Claim (move) conditionally owned funds into the wallet. - -### request-funds - -Request funds from the testnet-faucet. - -### create-asset - -Create an asset in the form of colored coins. - -### delegate-funds - -Delegate funds to an address. - -### reclaim-delegated - -Reclaim previously delegated funds. - -### create-nft - -Create an NFT as an unforkable alias output. - -### transfer-nft - -Transfer the ownership of an NFT. - -### destroy-nft - -Destroy an NFT. - -### deposit-to-nft - -Deposit funds into an NFT. - -### withdraw-from-nft - -Withdraw funds from an NFT. - -### sweep-nft-owned-funds - -Sweep all available funds owned by NFT into the wallet. - -### sweep-nft-owned-nfts - -weep all available NFTs owned by NFT into the wallet. - -### address - -Start the address manager of this wallet. - -### init - -Generate a new wallet using a random seed. - -### server-status - -Display the server status. - -### pending-mana - -Display current pending mana of all outputs in the wallet grouped by address. - -### pledge-id - -Query nodeIDs accepted as pledge IDs in transaction by the node (server). - -### help - -Display this help screen. diff --git a/docs/maintain/goshimmer/0.9/docs/welcome.md b/docs/maintain/goshimmer/0.9/docs/welcome.md deleted file mode 100644 index 49b6b8aa818..00000000000 --- a/docs/maintain/goshimmer/0.9/docs/welcome.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -description: GoShimmer is a prototype node software exploring the implementation of IOTA 2.0. This wiki helps the reader to understand the basic concepts and ideas behind Coordicide, and how its modules are implemented in GoShimmer. -image: /img/logo/goshimmer_light.png -keywords: - - welcome - - goshimmer - - discord - - github ---- - -# Welcome - -Welcome to the GoShimmer docs! - -GoShimmer is a prototype node software exploring the implementation of [IOTA 2.0](https://github.com/iotaledger/IOTA-2.0-Research-Specifications). This wiki helps the reader to understand the basic concepts and ideas behind the Coordicide, and how its modules are implemented in GoShimmer. - -Due to the prototypical nature of the project, things written today may reflect how things are tomorrow. We seek to keep the docs as updated as possible, since it is also used as guide for our team. If you find any inconsistencies, feel free to contact us on [Discord](https://discord.iota.org/) or even better, [create an issue in this repository](https://github.com/iotaledger/goshimmer/issues/new/choose). diff --git a/docs/maintain/goshimmer/0.9/sidebars.js b/docs/maintain/goshimmer/0.9/sidebars.js deleted file mode 100644 index 78feaeef83b..00000000000 --- a/docs/maintain/goshimmer/0.9/sidebars.js +++ /dev/null @@ -1,339 +0,0 @@ -/** - * Creating a sidebar enables you to: - - create an ordered group of docs - - render a sidebar for each doc of that group - - provide next/previous navigation - - The sidebars can be generated from the filesystem, or explicitly defined here. - - Create as many sidebars as you want. - */ - -module.exports = { - // But you can create a sidebar manually - docs: [ - { - type: 'doc', - label: 'Welcome', - id: 'welcome', - }, - { - type: 'doc', - label: 'FAQ', - id: 'faq', - }, - { - type: 'category', - label: 'Tutorials', - items: [ - { - type: 'doc', - label: 'Set up a node', - id: 'tutorials/setup', - }, - { - type: 'doc', - label: 'Obtain tokens', - id: 'tutorials/obtain_tokens', - }, - { - type: 'doc', - label: 'Wallet library', - id: 'tutorials/wallet_library', - }, - - { - type: 'doc', - label: 'Write a dApp', - id: 'tutorials/dApp', - }, - - { - type: 'doc', - label: 'Manual peering', - id: 'tutorials/manual_peering', - }, - - { - type: 'doc', - label: 'Create a static identity', - id: 'tutorials/static_identity', - }, - - { - type: 'doc', - label: 'Set up the Monitoring Dashboard', - id: 'tutorials/monitoring', - }, - - { - type: 'doc', - label: 'How to create and send transactions', - id: 'tutorials/send_transaction', - }, - ], - }, - - { - type: 'category', - label: 'Implementation design', - items: [ - { - type: 'doc', - label: 'Event driven model', - id: 'implementation_design/event_driven_model', - }, - { - type: 'doc', - label: 'Packages and plugins', - id: 'implementation_design/packages_plugins', - }, - - { - type: 'doc', - label: 'Plugin', - id: 'implementation_design/plugin', - }, - - { - type: 'doc', - label: 'Configuration parameters', - id: 'implementation_design/configuration_parameters', - }, - - { - type: 'doc', - label: 'Object storage', - id: 'implementation_design/object_storage', - }, - ], - }, - { - type: 'category', - label: 'Protocol Specification', - items: [ - { - type: 'doc', - label: 'Protocol Specification', - id: 'protocol_specification/overview', - }, - { - type: 'doc', - label: 'Protocol High Level Overview', - id: 'protocol_specification/protocol', - }, - - { - type: 'category', - label: 'Components', - items: [ - { - type: 'doc', - label: 'Overview', - id: 'protocol_specification/components/overview', - }, - - { - type: 'doc', - label: 'Tangle', - id: 'protocol_specification/components/tangle', - }, - - { - type: 'doc', - label: 'Autopeering', - id: 'protocol_specification/components/autopeering', - }, - - { - type: 'doc', - label: 'Mana', - id: 'protocol_specification/components/mana', - }, - - { - type: 'doc', - label: 'Congestion Control', - id: 'protocol_specification/components/congestion_control', - }, - - { - type: 'doc', - label: 'Consensus Mechanism', - id: 'protocol_specification/components/consensus_mechanism', - }, - - { - type: 'doc', - label: 'UTXO and Ledgerstate', - id: 'protocol_specification/components/ledgerstate', - }, - - { - type: 'doc', - label: 'Advanced Outputs (Experimental)', - id: 'protocol_specification/components/advanced_outputs', - }, - - { - type: 'doc', - label: 'Markers', - id: 'protocol_specification/components/markers', - }, - ], - }, - { - type: 'doc', - label: 'Glossary', - id: 'protocol_specification/glossary', - }, - ], - }, - { - type: 'category', - label: 'API', - items: [ - { - type: 'doc', - label: 'Client Lib', - id: 'apis/client_lib', - }, - - { - type: 'doc', - label: 'WebAPI', - id: 'apis/webAPI', - }, - - { - type: 'doc', - label: 'Node Info', - id: 'apis/info', - }, - - { - type: 'doc', - label: 'Autopeering', - id: 'apis/autopeering', - }, - - { - type: 'doc', - label: 'Manual Peering', - id: 'apis/manual_peering', - }, - - { - type: 'doc', - label: 'Communication Layer', - id: 'apis/communication', - }, - - { - type: 'doc', - label: 'Ledgerstate', - id: 'apis/ledgerstate', - }, - - { - type: 'doc', - label: 'Mana', - id: 'apis/mana', - }, - - { - type: 'doc', - label: 'Snapshot', - id: 'apis/snapshot', - }, - - { - type: 'doc', - label: 'Faucet', - id: 'apis/faucet', - }, - - { - type: 'doc', - label: 'Spammer', - id: 'apis/spammer', - }, - ], - }, - { - type: 'category', - label: 'Tooling', - items: [ - { - type: 'doc', - label: 'Overview', - id: 'tooling/overview', - }, - - { - type: 'doc', - label: 'Docker Private Network', - id: 'tooling/docker_private_network', - }, - - { - type: 'doc', - label: 'Integration Tests', - id: 'tooling/integration_tests', - }, - - { - type: 'doc', - label: 'DAGs Visualizer', - id: 'tooling/dags_visualizer', - }, - - { - type: 'doc', - label: 'Evil Spammer', - id: 'tooling/evil_spammer', - }, - - { - type: 'doc', - label: 'Rand Seed and Rand Address', - id: 'tooling/rand_seed_and_rand_address', - }, - ], - }, - { - type: 'category', - label: 'Team Resources', - items: [ - { - type: 'doc', - label: 'How To Do a Release', - id: 'teamresources/release', - }, - - { - type: 'doc', - label: 'Code Guidelines', - id: 'teamresources/guidelines', - }, - - { - type: 'doc', - label: 'Local Development', - id: 'teamresources/local_development', - }, - - { - type: 'doc', - label: 'Modify the Analysis Dashboard', - id: 'teamresources/analysis_dashboard', - }, - ], - }, - { - type: 'link', - label: 'Release Notes', - href: 'https://github.com/iotaledger/goshimmer/releases', - }, - ], -}; diff --git a/package.json b/package.json index ba76a1f712d..7919f81d260 100644 --- a/package.json +++ b/package.json @@ -27,7 +27,7 @@ "write-heading-ids": "docusaurus write-heading-ids" }, "dependencies": { - "@algolia/client-search": "^4.14.1", + "@algolia/client-search": "^4.20.0", "@docusaurus/core": "2.4.1", "@docusaurus/plugin-client-redirects": "2.4.1", "@docusaurus/preset-classic": "2.4.1", @@ -57,7 +57,7 @@ "react-image-gallery": "^1.3.0", "react-player": "^2.11.2", "react-popper": "^2.3.0", - "react-select": "^5.7.4", + "react-select": "^5.7.7", "rehype-jargon": "3.0.0", "rehype-katex": "4", "rehype-lodash-template": "^0.2.1", @@ -91,7 +91,7 @@ "@tsconfig/docusaurus": "^1.0.6", "@types/react": "18.2.24", "@types/react-dom": "18.2.7", - "@types/webpack-env": "^1.16.3", + "@types/webpack-env": "^1.18.2", "@typescript-eslint/eslint-plugin": "^5.9.1", "@typescript-eslint/parser": "^5.9.1", "eslint": "^8.48.0", diff --git a/src/icons/iota/index.ts b/src/icons/iota/index.ts index a293356ca85..5b096a14155 100644 --- a/src/icons/iota/index.ts +++ b/src/icons/iota/index.ts @@ -1,7 +1,6 @@ import { default as Chronicle } from '@site/static/icons/iota/chronicle.svg'; import { default as Chrysalis } from '@site/static/icons/iota/chrysalis.svg'; import { default as GettingStarted } from '@site/static/icons/iota/getting_started.svg'; -import { default as GoShimmer } from '@site/static/icons/iota/go_shimmer.svg'; import { default as Hornet } from '@site/static/icons/iota/hornet.svg'; import { default as Identity } from '@site/static/icons/iota/identity.svg'; import { default as IntegrationServices } from '@site/static/icons/iota/integration_services.svg'; @@ -25,7 +24,6 @@ export default { Chronicle, Chrysalis, GettingStarted, - GoShimmer, Hornet, Identity, IntegrationServices, diff --git a/theme/package.json b/theme/package.json index eff72213841..ce17a559b92 100644 --- a/theme/package.json +++ b/theme/package.json @@ -19,7 +19,7 @@ "@iota-wiki/plugin-docs": "workspace:^", "@metamask/providers": "^10.2.1", "clsx": "^1.2.1", - "html-react-parser": "^4.0.0", + "html-react-parser": "^4.2.2", "react-markdown": "6" }, "devDependencies": { diff --git a/tutorials/docusaurus.config.js b/tutorials/docusaurus.config.js index a202c9bdf53..619aaf50e08 100644 --- a/tutorials/docusaurus.config.js +++ b/tutorials/docusaurus.config.js @@ -181,17 +181,6 @@ module.exports = { ], }, ], - [ - '@iota-wiki/plugin-tutorial', - { - title: 'Guide - Deploy a Solidity Smart Contract Using Hardhat', - description: - 'In this tutorial you will learn how to deploy a solidity smart contract on EVM using hardhat.', - preview: '/IOTA-Smart-Contract-Tutorials-F.jpg', - route: '/shimmer/smart-contracts/guide/evm/tooling#hardhat', - tags: ['text', 'video'], - }, - ], [ '@iota-wiki/plugin-tutorial', { diff --git a/tutorials/pages/shimmerevm-hardhat.md b/tutorials/pages/shimmerevm-hardhat.md index c2ebca0a219..baa75398e07 100644 --- a/tutorials/pages/shimmerevm-hardhat.md +++ b/tutorials/pages/shimmerevm-hardhat.md @@ -220,7 +220,7 @@ etherscan: { Then you can verify by running: ```sh -npx hardhat verify --network shimmerevm-testnet
+npx hardhat verify --network shimmerevm
``` :::tip Address and unlock time diff --git a/versionedConfig.js b/versionedConfig.js index 1a3d2425c5d..d371e249fd1 100644 --- a/versionedConfig.js +++ b/versionedConfig.js @@ -253,17 +253,4 @@ exports.maintainPluginsConfig = [ }, ], }, - { - id: 'goshimmer', - label: 'GoShimmer', - description: 'Research node implementation for IOTA 2.0', - icon: 'GoShimmer', - subsection: 'maintain-layer-1', - versions: [ - { - label: '0.9', - badges: ['IOTA 2.0'], - }, - ], - }, ]; diff --git a/yarn.lock b/yarn.lock index 5ed1080c1f0..74474bc7f6c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -78,6 +78,13 @@ __metadata: languageName: node linkType: hard +"@algolia/cache-common@npm:4.20.0": + version: 4.20.0 + resolution: "@algolia/cache-common@npm:4.20.0" + checksum: a46377de8a309feea109aae1283fc9157c73766a4c51e3085870a1fc49f6e33698814379f3bbdf475713fa0663dace86fc90f0466e64469b1b885a0538abace4 + languageName: node + linkType: hard + "@algolia/cache-in-memory@npm:4.18.0": version: 4.18.0 resolution: "@algolia/cache-in-memory@npm:4.18.0" @@ -120,6 +127,16 @@ __metadata: languageName: node linkType: hard +"@algolia/client-common@npm:4.20.0": + version: 4.20.0 + resolution: "@algolia/client-common@npm:4.20.0" + dependencies: + "@algolia/requester-common": 4.20.0 + "@algolia/transporter": 4.20.0 + checksum: 88a27b5f8bba38349e1dbe47634e2ee159a413ff1a3baf6a65fbf244835f8d368e9f0a5ccce8bfe94ec405b38608be5bed45bcb140517f3aba6fe3b7045db373 + languageName: node + linkType: hard + "@algolia/client-personalization@npm:4.18.0": version: 4.18.0 resolution: "@algolia/client-personalization@npm:4.18.0" @@ -131,7 +148,7 @@ __metadata: languageName: node linkType: hard -"@algolia/client-search@npm:4.18.0, @algolia/client-search@npm:^4.14.1": +"@algolia/client-search@npm:4.18.0": version: 4.18.0 resolution: "@algolia/client-search@npm:4.18.0" dependencies: @@ -142,6 +159,17 @@ __metadata: languageName: node linkType: hard +"@algolia/client-search@npm:^4.20.0": + version: 4.20.0 + resolution: "@algolia/client-search@npm:4.20.0" + dependencies: + "@algolia/client-common": 4.20.0 + "@algolia/requester-common": 4.20.0 + "@algolia/transporter": 4.20.0 + checksum: 9fb6624dab6753f336f3207ee2af3558baeec4772ef739b6f6ed6a754c366e2e8d62cbf1cf8b28d5f763bec276a0a5fc36db2bf6f53a707890a411afcf550e92 + languageName: node + linkType: hard + "@algolia/events@npm:^4.0.1": version: 4.0.1 resolution: "@algolia/events@npm:4.0.1" @@ -156,6 +184,13 @@ __metadata: languageName: node linkType: hard +"@algolia/logger-common@npm:4.20.0": + version: 4.20.0 + resolution: "@algolia/logger-common@npm:4.20.0" + checksum: 06ed28f76b630c8e7597534b15138ab6f71c10dfc6e13f1fb1b76965b39c88fd1d9cb3fe6bb9d046de6533ebcbe5ad92e751bc36fabe98ceda39d1d5f47bb637 + languageName: node + linkType: hard + "@algolia/logger-console@npm:4.18.0": version: 4.18.0 resolution: "@algolia/logger-console@npm:4.18.0" @@ -181,6 +216,13 @@ __metadata: languageName: node linkType: hard +"@algolia/requester-common@npm:4.20.0": + version: 4.20.0 + resolution: "@algolia/requester-common@npm:4.20.0" + checksum: 8580ffd2be146bbdb5d4a57668bba4a5014f406cb2e5c65f596db6babab46c48d30c6e4732034ee1f987970aa27dcdab567959d654fa5fa74c4bcaf98312a724 + languageName: node + linkType: hard + "@algolia/requester-node-http@npm:4.18.0": version: 4.18.0 resolution: "@algolia/requester-node-http@npm:4.18.0" @@ -201,6 +243,17 @@ __metadata: languageName: node linkType: hard +"@algolia/transporter@npm:4.20.0": + version: 4.20.0 + resolution: "@algolia/transporter@npm:4.20.0" + dependencies: + "@algolia/cache-common": 4.20.0 + "@algolia/logger-common": 4.20.0 + "@algolia/requester-common": 4.20.0 + checksum: f834d5c8fcb7dfa9b7044cb81e9fab44a32f9dd0c3868a0f85fe0de4f4d27ad11fdc9c3c78541bc944c2593f4be56517a8ce593309d062b8a46ca0d6fcb5dcbc + languageName: node + linkType: hard + "@ampproject/remapping@npm:^2.2.0": version: 2.2.1 resolution: "@ampproject/remapping@npm:2.2.1" @@ -583,7 +636,7 @@ __metadata: languageName: node linkType: hard -"@babel/parser@npm:^7.12.7, @babel/parser@npm:^7.18.8, @babel/parser@npm:^7.21.2, @babel/parser@npm:^7.22.5, @babel/parser@npm:^7.22.7": +"@babel/parser@npm:^7.12.7, @babel/parser@npm:^7.18.8, @babel/parser@npm:^7.22.5, @babel/parser@npm:^7.22.7": version: 7.22.7 resolution: "@babel/parser@npm:7.22.7" bin: @@ -592,6 +645,15 @@ __metadata: languageName: node linkType: hard +"@babel/parser@npm:^7.23.0": + version: 7.23.0 + resolution: "@babel/parser@npm:7.23.0" + bin: + parser: ./bin/babel-parser.js + checksum: 453fdf8b9e2c2b7d7b02139e0ce003d1af21947bbc03eb350fb248ee335c9b85e4ab41697ddbdd97079698de825a265e45a0846bb2ed47a2c7c1df833f42a354 + languageName: node + linkType: hard + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@npm:^7.22.5": version: 7.22.5 resolution: "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@npm:7.22.5" @@ -2698,7 +2760,7 @@ __metadata: resolution: "@iota-wiki/cli@workspace:cli" dependencies: "@babel/generator": ^7.21.5 - "@babel/parser": ^7.21.2 + "@babel/parser": ^7.23.0 "@babel/types": ^7.21.5 "@iota-wiki/core": "workspace:^" "@types/babel__generator": ^7 @@ -2739,7 +2801,7 @@ __metadata: version: 0.0.0-use.local resolution: "@iota-wiki/core@workspace:." dependencies: - "@algolia/client-search": ^4.14.1 + "@algolia/client-search": ^4.20.0 "@docusaurus/core": 2.4.1 "@docusaurus/module-type-aliases": 2.4.1 "@docusaurus/plugin-client-redirects": 2.4.1 @@ -2758,7 +2820,7 @@ __metadata: "@tsconfig/docusaurus": ^1.0.6 "@types/react": 18.2.24 "@types/react-dom": 18.2.7 - "@types/webpack-env": ^1.16.3 + "@types/webpack-env": ^1.18.2 "@typescript-eslint/eslint-plugin": ^5.9.1 "@typescript-eslint/parser": ^5.9.1 clsx: ^1.2.1 @@ -2783,7 +2845,7 @@ __metadata: react-image-gallery: ^1.3.0 react-player: ^2.11.2 react-popper: ^2.3.0 - react-select: ^5.7.4 + react-select: ^5.7.7 rehype-jargon: 3.0.0 rehype-katex: 4 rehype-lodash-template: ^0.2.1 @@ -2830,7 +2892,7 @@ __metadata: "@types/react": 18.2.24 clsx: ^1.2.1 copyfiles: ^2.4.1 - html-react-parser: ^4.0.0 + html-react-parser: ^4.2.2 nodemon: ^2.0.16 prettier: ^2.8.8 react: 18.2.0 @@ -5154,10 +5216,10 @@ __metadata: languageName: node linkType: hard -"@types/webpack-env@npm:^1.16.3": - version: 1.18.1 - resolution: "@types/webpack-env@npm:1.18.1" - checksum: 3173c069763e51a96565d602af7e6dac9d772ae4aa6f26cac187cbf599a7f0b88f790b4b050b9dbdb0485daed3061b4a337863f3b8ce66f8a4e51f75ad387c6a +"@types/webpack-env@npm:^1.18.2": + version: 1.18.2 + resolution: "@types/webpack-env@npm:1.18.2" + checksum: 883908ade827d35a10efc574fb6f2728a7c520d4296cf1507633ac7457204ccd697bc6c8cadac99bc5d96074a6109c658ebfde59f42ba5ba0fdfffc538892b0f languageName: node linkType: hard @@ -10243,17 +10305,17 @@ __metadata: languageName: node linkType: hard -"html-react-parser@npm:^4.0.0": - version: 4.0.0 - resolution: "html-react-parser@npm:4.0.0" +"html-react-parser@npm:^4.2.2": + version: 4.2.2 + resolution: "html-react-parser@npm:4.2.2" dependencies: domhandler: 5.0.3 html-dom-parser: 4.0.0 react-property: 2.0.0 - style-to-js: 1.1.3 + style-to-js: 1.1.4 peerDependencies: react: 0.14 || 15 || 16 || 17 || 18 - checksum: 7e0a65d047236ba0f09f3b0edb6c98bc652829e83c5db5ba494d449cbf2a0f822d461ed6ebe0d9e6812d7b844be177211124a92df1322bcf121f9a34937f3e9a + checksum: 5f8c0aeba02d7d3ff2bce4c32eafbbf77fd1aa664f15555ef49d75cce666211edb9c87a73a9b5648467442361f617438da038d27185fdbae983ac9731d67cbb7 languageName: node linkType: hard @@ -15317,9 +15379,9 @@ plugin-image-zoom@flexanalytics/plugin-image-zoom: languageName: node linkType: hard -"react-select@npm:^5.7.4": - version: 5.7.4 - resolution: "react-select@npm:5.7.4" +"react-select@npm:^5.7.7": + version: 5.7.7 + resolution: "react-select@npm:5.7.7" dependencies: "@babel/runtime": ^7.12.0 "@emotion/cache": ^11.4.0 @@ -15333,7 +15395,7 @@ plugin-image-zoom@flexanalytics/plugin-image-zoom: peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 - checksum: ca72941ad1d2c578ec04c09ed3deb7e373f987e589f403fadedc6fcc3e29935b5425ec4d2628f0fe58c21319bcaf153c0d0172432e09fc6423da869d848de757 + checksum: 6fd0c211d377addba6e6762a614ae674936df39a3f46ec19fd06e7acae8d6cadeb93d4723b10e25eff1ff8235077bae9459f293936334d82b28fe5071081c057 languageName: node linkType: hard @@ -17028,12 +17090,12 @@ plugin-image-zoom@flexanalytics/plugin-image-zoom: languageName: node linkType: hard -"style-to-js@npm:1.1.3": - version: 1.1.3 - resolution: "style-to-js@npm:1.1.3" +"style-to-js@npm:1.1.4": + version: 1.1.4 + resolution: "style-to-js@npm:1.1.4" dependencies: - style-to-object: 0.4.1 - checksum: 7aaeacff909d43bbfc28e9a004019224d0eda895ac3d0d9f3160b6ad044dca8d3965f7b2b92ac649f63f62889324b560147d21cf3149f29794692c5d7b207110 + style-to-object: 0.4.2 + checksum: 0ed2b3400fb602f9ab6d024fb2ff23630e2f2cbdd98150e13ba16a2d9b46d7213b5e71fde15cef30b7e24f336f87ef6e8c7a29b520ff8c6a1dca92f8762e7358 languageName: node linkType: hard @@ -17046,7 +17108,16 @@ plugin-image-zoom@flexanalytics/plugin-image-zoom: languageName: node linkType: hard -"style-to-object@npm:0.4.1, style-to-object@npm:^0.4.0": +"style-to-object@npm:0.4.2": + version: 0.4.2 + resolution: "style-to-object@npm:0.4.2" + dependencies: + inline-style-parser: 0.1.1 + checksum: 314a80bcfadde41c2b9c8d717a4b1f2220954561040c2c7740496715da5cb95f99920a8eeefe2d4a862149875f352a12eda9bbef5816d7e0a71910da00d1521f + languageName: node + linkType: hard + +"style-to-object@npm:^0.4.0": version: 0.4.1 resolution: "style-to-object@npm:0.4.1" dependencies: