From 8beea81b336f3a55916c9c3b367ffa19c5904d0c Mon Sep 17 00:00:00 2001 From: Craig <3979063+craig8@users.noreply.github.com> Date: Wed, 18 Nov 2020 22:12:51 -0800 Subject: [PATCH 01/14] Create debug-env.yml --- .github/workflows/debug-env.yml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 .github/workflows/debug-env.yml diff --git a/.github/workflows/debug-env.yml b/.github/workflows/debug-env.yml new file mode 100644 index 0000000000..2ca296c1e4 --- /dev/null +++ b/.github/workflows/debug-env.yml @@ -0,0 +1,30 @@ +on: push + +jobs: + one: + runs-on: ubuntu-latest + steps: + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + run: echo "$GITHUB_CONTEXT" + - name: Dump job context + env: + JOB_CONTEXT: ${{ toJson(job) }} + run: echo "$JOB_CONTEXT" + - name: Dump steps context + env: + STEPS_CONTEXT: ${{ toJson(steps) }} + run: echo "$STEPS_CONTEXT" + - name: Dump runner context + env: + RUNNER_CONTEXT: ${{ toJson(runner) }} + run: echo "$RUNNER_CONTEXT" + - name: Dump strategy context + env: + STRATEGY_CONTEXT: ${{ toJson(strategy) }} + run: echo "$STRATEGY_CONTEXT" + - name: Dump matrix context + env: + MATRIX_CONTEXT: ${{ toJson(matrix) }} + run: echo "$MATRIX_CONTEXT" From e9b1b4ebf6abea1dddfad5faa1be1bf6eff429e0 Mon Sep 17 00:00:00 2001 From: Craig <3979063+craig8@users.noreply.github.com> Date: Wed, 18 Nov 2020 22:16:49 -0800 Subject: [PATCH 02/14] Update debug-env.yml --- .github/workflows/debug-env.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/debug-env.yml b/.github/workflows/debug-env.yml index 2ca296c1e4..068cd8b4fa 100644 --- a/.github/workflows/debug-env.yml +++ b/.github/workflows/debug-env.yml @@ -4,11 +4,11 @@ jobs: one: runs-on: ubuntu-latest steps: - - name: Dump GitHub context + - id: Dump GitHub context env: GITHUB_CONTEXT: ${{ toJson(github) }} run: echo "$GITHUB_CONTEXT" - - name: Dump job context + - id: Dump job context env: JOB_CONTEXT: ${{ toJson(job) }} run: echo "$JOB_CONTEXT" From 59c1eb55310e41587f453b168f1739672dc3f061 Mon Sep 17 00:00:00 2001 From: Craig <3979063+craig8@users.noreply.github.com> Date: Wed, 18 Nov 2020 22:17:32 -0800 Subject: [PATCH 03/14] Update debug-env.yml --- .github/workflows/debug-env.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/debug-env.yml b/.github/workflows/debug-env.yml index 068cd8b4fa..e4f05a2d6b 100644 --- a/.github/workflows/debug-env.yml +++ b/.github/workflows/debug-env.yml @@ -4,11 +4,11 @@ jobs: one: runs-on: ubuntu-latest steps: - - id: Dump GitHub context + - id: DumpGitHubcontext env: GITHUB_CONTEXT: ${{ toJson(github) }} run: echo "$GITHUB_CONTEXT" - - id: Dump job context + - id: Dumpjobcontext env: JOB_CONTEXT: ${{ toJson(job) }} run: echo "$JOB_CONTEXT" From 80acdff8d8e9ea8a4907d6b9ff285d17dd9d98a4 Mon Sep 17 00:00:00 2001 From: Craig <3979063+craig8@users.noreply.github.com> Date: Wed, 18 Nov 2020 22:19:50 -0800 Subject: [PATCH 04/14] Update debug-env.yml --- .github/workflows/debug-env.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/debug-env.yml b/.github/workflows/debug-env.yml index e4f05a2d6b..41609cd4a9 100644 --- a/.github/workflows/debug-env.yml +++ b/.github/workflows/debug-env.yml @@ -16,7 +16,7 @@ jobs: env: STEPS_CONTEXT: ${{ toJson(steps) }} run: echo "$STEPS_CONTEXT" - - name: Dump runner context + - name: Dumprunnercontext env: RUNNER_CONTEXT: ${{ toJson(runner) }} run: echo "$RUNNER_CONTEXT" @@ -24,7 +24,7 @@ jobs: env: STRATEGY_CONTEXT: ${{ toJson(strategy) }} run: echo "$STRATEGY_CONTEXT" - - name: Dump matrix context + - name: Dumpmatrixcontext env: MATRIX_CONTEXT: ${{ toJson(matrix) }} run: echo "$MATRIX_CONTEXT" From afcc2d752befcb9b78ccef42d413d8e20cfe2832 Mon Sep 17 00:00:00 2001 From: Mark Bonicillo Date: Wed, 2 Dec 2020 11:47:47 -0800 Subject: [PATCH 05/14] Remove misleading comment regarding 'database' config --- services/core/SQLHistorian/config.sqlite | 3 --- services/core/SQLHistorian/config_device_data_filter.sqlite | 3 --- 2 files changed, 6 deletions(-) diff --git a/services/core/SQLHistorian/config.sqlite b/services/core/SQLHistorian/config.sqlite index 1640fbc63f..1f542200f9 100644 --- a/services/core/SQLHistorian/config.sqlite +++ b/services/core/SQLHistorian/config.sqlite @@ -2,9 +2,6 @@ "connection": { "type": "sqlite", "params": { - # if no directory is given, location will be under install_dir/.agent-data directory - # in secure mode as this will be only directory in which agent will have write access - # In regular mode it will be under install_dir/data for backward compatibility "database": "historian_test.sqlite" } }, diff --git a/services/core/SQLHistorian/config_device_data_filter.sqlite b/services/core/SQLHistorian/config_device_data_filter.sqlite index 11f0510ffb..ac9c4260ea 100644 --- a/services/core/SQLHistorian/config_device_data_filter.sqlite +++ b/services/core/SQLHistorian/config_device_data_filter.sqlite @@ -2,9 +2,6 @@ "connection": { "type": "sqlite", "params": { - # if no directory is given, location will be under install_dir/.agent-data directory - # in secure mode as this will be only directory in which agent will have write access - # In regular mode it will be under install_dir/data for backward compatibility "database": "historian_test.sqlite" } }, From 183c2d95036a1432776f97966cbe2a8aecffc1e3 Mon Sep 17 00:00:00 2001 From: schandrika Date: Thu, 3 Dec 2020 16:25:56 -0800 Subject: [PATCH 06/14] fix for jira_306 and jira_307 --- volttron/platform/certs.py | 141 ++++++++++++++++--------------------- 1 file changed, 59 insertions(+), 82 deletions(-) diff --git a/volttron/platform/certs.py b/volttron/platform/certs.py index 520b9ddecd..beef81ac8f 100644 --- a/volttron/platform/certs.py +++ b/volttron/platform/certs.py @@ -67,7 +67,7 @@ ENC_STANDARD = 65537 SHA_HASH = 'sha256' # # days before the certificate will timeout. -DEFAULT_DAYS = 365 +DEFAULT_DAYS = 365 * 10 # 10 years DEFAULT_TIMOUT = 60 * 60 * 24 * 360 * 10 @@ -574,53 +574,12 @@ def deny_csr(self, common_name): fp.write(jsonapi.dumps(meta)) def sign_csr(self, csr_file): - ca_crt = self.ca_cert() - ca_pkey = _load_key(self.private_key_file(self.root_ca_name)) with open(csr_file, 'rb') as f: csr = x509.load_pem_x509_csr(data=f.read(), backend=default_backend()) subject_common_name = csr.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value - - if self.cert_exists(subject_common_name): - crt = self.cert(subject_common_name) - return crt.public_bytes(encoding=serialization.Encoding.PEM) - - crt = x509.CertificateBuilder().subject_name( - csr.subject - ).issuer_name( - ca_crt.subject - ).public_key( - csr.public_key() - ).serial_number( - int(time.time()) # pylint: disable=no-member - ).not_valid_before( - datetime.datetime.utcnow() - ).not_valid_after( - datetime.datetime.utcnow() + datetime.timedelta(days=365 * 10) - ).add_extension( - extension=x509.KeyUsage( - digital_signature=True, key_encipherment=True, content_commitment=True, - data_encipherment=False, key_agreement=False, encipher_only=False, decipher_only=False, - key_cert_sign=False, crl_sign=False - ), - critical=True - ).add_extension( - extension=x509.BasicConstraints(ca=False, path_length=None), - critical=True - ).add_extension( - extension=x509.AuthorityKeyIdentifier.from_issuer_public_key(ca_pkey.public_key()), - critical=False - ).sign( - private_key=ca_pkey, - algorithm=hashes.SHA256(), - backend=default_backend() - ) - - new_cert_file = self.cert_file( - csr.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value) - with open(new_cert_file, 'wb') as f: - f.write(crt.public_bytes(encoding=serialization.Encoding.PEM)) - return crt.public_bytes(encoding=serialization.Encoding.PEM) + cert, _ = self.create_signed_cert_files(name=subject_common_name, overwrite=False, csr=csr) + return cert.public_bytes(encoding=serialization.Encoding.PEM) def cert_exists(self, cert_name, remote=False): """ @@ -798,7 +757,7 @@ def save_key(self, file_path): os.chmod(key_file, 0o600) def create_signed_cert_files(self, name, cert_type='client', ca_name=None, - overwrite=True, valid_days=DEFAULT_DAYS, + overwrite=True, valid_days=DEFAULT_DAYS, csr=None, **kwargs): """ Create a new certificate and sign it with the volttron instance's @@ -811,6 +770,9 @@ def create_signed_cert_files(self, name, cert_type='client', ca_name=None, overwritten :param name: name used to save the newly created certificate and private key. Files are saved as .crt and .pem + :param csr: Certificate Signing Request(CSR) based on which cert should be created. + In this case no new private key is generated. CSR's public bytes and subject are used in building the + certificate :param kwargs: dictionary object containing various details about who we are. Possible arguments: @@ -822,22 +784,32 @@ def create_signed_cert_files(self, name, cert_type='client', ca_name=None, CN - Common Name :return: True if certificate creation was successful """ - if not overwrite: - if self.cert_exists(name): - return False + if csr: + remote = True + else: + remote = False + + if not overwrite and self.cert_exists(name, remote=remote): + if remote: + return _load_cert(self.cert_file(name, remote)), None + else: + return _load_cert(self.cert_file(name)), self.private_key_file(name) if not ca_name: ca_name = self.root_ca_name cert, key, serial = _create_signed_certificate(ca_cert=self.cert(ca_name), ca_key=_load_key(self.private_key_file(ca_name)), - name=name, valid_days=valid_days, type=cert_type, **kwargs) - - self._save_cert(name, cert, key) + name=name, valid_days=valid_days, type=cert_type, + csr=csr, **kwargs) + if csr: + self._save_cert(name, cert, key, remote=remote) + else: + self._save_cert(name, cert, key) self.update_ca_db(cert, ca_name, serial) return cert, key - def _save_cert(self, name, cert, pk): + def _save_cert(self, name, cert, pk, remote=False): """ Save the given certificate and private key using name.crt and name.pem respectively. @@ -846,26 +818,28 @@ def _save_cert(self, name, cert, pk): :param pk: :class: ` :return: """ - with open(self.cert_file(name), "wb") as f: + with open(self.cert_file(name, remote=remote), "wb") as f: f.write(cert.public_bytes(serialization.Encoding.PEM)) os.chmod(self.cert_file(name), 0o644) - encryption = serialization.NoEncryption() - if PROMPT_PASSPHRASE: - encryption = serialization.BestAvailableEncryption( - get_passphrase(prompt1='Enter passphrase for private ' - 'key ' + - name + ":") - ) - - # Write our key to disk for safe keeping - key_file = self.private_key_file(name) - with open(key_file, "wb") as f: - f.write(pk.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=encryption - )) - os.chmod(key_file, 0o600) + + if pk: + encryption = serialization.NoEncryption() + if PROMPT_PASSPHRASE: + encryption = serialization.BestAvailableEncryption( + get_passphrase(prompt1='Enter passphrase for private ' + 'key ' + + name + ":") + ) + + # Write our key to disk for safe keeping + key_file = self.private_key_file(name) + with open(key_file, "wb") as f: + f.write(pk.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=encryption + )) + os.chmod(key_file, 0o600) def update_ca_db(self, cert, ca_name, serial): """ @@ -947,7 +921,7 @@ def _create_private_key(): ) -def _create_signed_certificate(ca_cert, ca_key, name, valid_days=365, type='client', **kwargs): +def _create_signed_certificate(ca_cert, ca_key, name, valid_days=DEFAULT_DAYS, type='client', csr=None, **kwargs): """ Creates signed cert of type provided and signs it with ca_key provided. To create subject for the new certificate common name is set new value, rest of the attributes are copied from subject of provided ca certificate @@ -965,15 +939,18 @@ def _create_signed_certificate(ca_cert, ca_key, name, valid_days=365, type='clie # crptography 2.2.2 ski = ca_cert.extensions.get_extension_for_class( x509.SubjectKeyIdentifier) - - key = _create_private_key() - # key = rsa.generate_private_key( - # public_exponent=65537, - # key_size=2048, - # backend=default_backend() - # ) fqdn = kwargs.pop('fqdn', None) - if kwargs: + + if csr: + key = None + public_key = csr.public_key() + else: + key = _create_private_key() + public_key = key.public_key() + + if csr: + subject = csr.subject + elif kwargs: subject = _create_subject(**kwargs) else: temp_list = ca_cert.subject.rdns @@ -1004,11 +981,11 @@ def _create_signed_certificate(ca_cert, ca_key, name, valid_days=365, type='clie ).issuer_name( issuer ).public_key( - key.public_key() + public_key ).not_valid_before( datetime.datetime.utcnow() ).not_valid_after( - # Our certificate will be valid for 365 days + # Our certificate will be valid for 3650 days datetime.datetime.utcnow() + datetime.timedelta(days=valid_days) ).add_extension( x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ski), @@ -1021,7 +998,7 @@ def _create_signed_certificate(ca_cert, ca_key, name, valid_days=365, type='clie critical=True ).add_extension( x509.SubjectKeyIdentifier( - _create_fingerprint(key.public_key())), + _create_fingerprint(public_key)), critical=False ) # cryptography 2.7 From f16516cef7de98a6d0081db2d9ad8c58c945266d Mon Sep 17 00:00:00 2001 From: schandrika Date: Thu, 3 Dec 2020 16:26:40 -0800 Subject: [PATCH 07/14] fixed keynotfound error --- volttron/platform/web/csr_endpoints.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/volttron/platform/web/csr_endpoints.py b/volttron/platform/web/csr_endpoints.py index 9504939454..c0e5b7deb4 100644 --- a/volttron/platform/web/csr_endpoints.py +++ b/volttron/platform/web/csr_endpoints.py @@ -115,7 +115,7 @@ def _csr_request_new(self, env, data): response = None try: - if json_response['cert']: + if json_response.get('cert', None): json_response['cert'] = json_response['cert'].decode('utf-8') response = Response(jsonapi.dumps(json_response), content_type='application/json', From d6a6da3078cdd9339ab55ba25e984a3b14a10cb3 Mon Sep 17 00:00:00 2001 From: schandrika Date: Thu, 3 Dec 2020 16:27:38 -0800 Subject: [PATCH 08/14] made all_platforms=True for @PubSub.subscribe --- examples/ListenerAgent/listener/agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/ListenerAgent/listener/agent.py b/examples/ListenerAgent/listener/agent.py index 95814fba41..a1991724c1 100644 --- a/examples/ListenerAgent/listener/agent.py +++ b/examples/ListenerAgent/listener/agent.py @@ -107,7 +107,7 @@ def onstart(self, sender, **kwargs): query = Query(self.core) _log.info('query: %r', query.query('serverkey').get()) - @PubSub.subscribe('pubsub', '') + @PubSub.subscribe('pubsub', '', all_platforms=True) def on_match(self, peer, sender, bus, topic, headers, message): """Use match_all to receive all messages and print them out.""" self._logfn( From bd18cd76e05de9d70a6c698392a66f4120b16158 Mon Sep 17 00:00:00 2001 From: Mark Bonicillo Date: Fri, 4 Dec 2020 14:19:11 -0800 Subject: [PATCH 09/14] Revert "Remove misleading comment regarding 'database' config" This reverts commit afcc2d752befcb9b78ccef42d413d8e20cfe2832. --- services/core/SQLHistorian/config.sqlite | 3 +++ services/core/SQLHistorian/config_device_data_filter.sqlite | 3 +++ 2 files changed, 6 insertions(+) diff --git a/services/core/SQLHistorian/config.sqlite b/services/core/SQLHistorian/config.sqlite index 1f542200f9..1640fbc63f 100644 --- a/services/core/SQLHistorian/config.sqlite +++ b/services/core/SQLHistorian/config.sqlite @@ -2,6 +2,9 @@ "connection": { "type": "sqlite", "params": { + # if no directory is given, location will be under install_dir/.agent-data directory + # in secure mode as this will be only directory in which agent will have write access + # In regular mode it will be under install_dir/data for backward compatibility "database": "historian_test.sqlite" } }, diff --git a/services/core/SQLHistorian/config_device_data_filter.sqlite b/services/core/SQLHistorian/config_device_data_filter.sqlite index ac9c4260ea..11f0510ffb 100644 --- a/services/core/SQLHistorian/config_device_data_filter.sqlite +++ b/services/core/SQLHistorian/config_device_data_filter.sqlite @@ -2,6 +2,9 @@ "connection": { "type": "sqlite", "params": { + # if no directory is given, location will be under install_dir/.agent-data directory + # in secure mode as this will be only directory in which agent will have write access + # In regular mode it will be under install_dir/data for backward compatibility "database": "historian_test.sqlite" } }, From 5587f24d8a8ca8792f3fba413e2488125c2bc655 Mon Sep 17 00:00:00 2001 From: Mark Bonicillo Date: Fri, 4 Dec 2020 14:57:19 -0800 Subject: [PATCH 10/14] Add clarification on 'database' parameter for sqlite historian config --- services/core/SQLHistorian/config.sqlite | 16 +++++++++++++--- .../config_device_data_filter.sqlite | 16 +++++++++++++--- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/services/core/SQLHistorian/config.sqlite b/services/core/SQLHistorian/config.sqlite index 1640fbc63f..ad8f4f955c 100644 --- a/services/core/SQLHistorian/config.sqlite +++ b/services/core/SQLHistorian/config.sqlite @@ -2,9 +2,19 @@ "connection": { "type": "sqlite", "params": { - # if no directory is given, location will be under install_dir/.agent-data directory - # in secure mode as this will be only directory in which agent will have write access - # In regular mode it will be under install_dir/data for backward compatibility + # 'database' should be a non-empty string, which represents a location on the filesystem + # By default, the location is relative to the agent's installation directory, however it will respect a rooted or relative path to the database. + # The following are two examples: + + # Example 1 includes a path to the database: + # "my_data/historian_test.sqlite" + + # Example 2 does not include a directory: + # "historian_test.sqlite" + # NOTE: In this example, the location of the database depends on whether the volttron platform is in secure mode. + # In secure mode, the location will be under install_dir/.agent-data directory because this will be the only directory in which the agent will have write-access. + # In regular mode, the location will be under install_dir/data for backward compatibility. + "database": "historian_test.sqlite" } }, diff --git a/services/core/SQLHistorian/config_device_data_filter.sqlite b/services/core/SQLHistorian/config_device_data_filter.sqlite index 11f0510ffb..bbd2aa293b 100644 --- a/services/core/SQLHistorian/config_device_data_filter.sqlite +++ b/services/core/SQLHistorian/config_device_data_filter.sqlite @@ -2,9 +2,19 @@ "connection": { "type": "sqlite", "params": { - # if no directory is given, location will be under install_dir/.agent-data directory - # in secure mode as this will be only directory in which agent will have write access - # In regular mode it will be under install_dir/data for backward compatibility + # 'database' should be a non-empty string, which represents a location on the filesystem + # By default, the location is relative to the agent's installation directory, however it will respect a rooted or relative path to the database. + # The following are two examples: + + # Example 1 includes a path to the database: + # "my_data/historian_test.sqlite" + + # Example 2 does not include a directory: + # "historian_test.sqlite" + # NOTE: In this example, the location of the database depends on whether the volttron platform is in secure mode. + # In secure mode, the location will be under install_dir/.agent-data directory because this will be the only directory in which the agent will have write-access. + # In regular mode, the location will be under install_dir/data for backward compatibility. + "database": "historian_test.sqlite" } }, From 10bc5680933bb75697324f7519a89fc428866ca5 Mon Sep 17 00:00:00 2001 From: craig8 Date: Mon, 7 Dec 2020 11:43:53 -0800 Subject: [PATCH 11/14] Added install agent-dir to volttron-ctl (vctl) command. Updated platform wrapper to utilize the new command Added check to get_address in order to verify that there is something listening on the connection port. --- bootstrap.py | 3 +- requirements.py | 8 +- volttron/platform/__init__.py | 36 ++- volttron/platform/control.py | 99 +------ volttron/platform/install_agents.py | 361 +++++++++++++++++++++++ volttron/platform/vip/agent/utils.py | 10 +- volttrontesting/utils/platformwrapper.py | 27 +- 7 files changed, 417 insertions(+), 127 deletions(-) create mode 100644 volttron/platform/install_agents.py diff --git a/bootstrap.py b/bootstrap.py index bd865d7e0a..e7e94ead13 100644 --- a/bootstrap.py +++ b/bootstrap.py @@ -293,7 +293,8 @@ def main(argv=sys.argv): # If all is specified then install all of the different packages listed in requirements.py po.add_argument('--all', action='append_const', const='all', dest='optional_args') for arg in extras_require: - po.add_argument('--'+arg, action='append_const', const=arg, dest="optional_args") + if 'dnp' not in arg: + po.add_argument('--'+arg, action='append_const', const=arg, dest="optional_args") # Add rmq download actions. rabbitmq = parser.add_argument_group('rabbitmq options') diff --git a/requirements.py b/requirements.py index 790d1ec1a2..e769e21420 100644 --- a/requirements.py +++ b/requirements.py @@ -76,9 +76,11 @@ 'influxdb', 'psycopg2-binary' ], - 'dnp3': [ # dnp3 agent requirements. - 'pydnp3' - ], + # Removing from requirements until we can get cmake installed on + # ubuntu 20.04 and pydnp3 is working with python3 + # 'dnp3': [ # dnp3 agent requirements. + # 'pydnp3' + # ], 'documentation': [ # Requirements for building the documentation 'mock', 'Sphinx', diff --git a/volttron/platform/__init__.py b/volttron/platform/__init__.py index 0e23e74527..c8caa83807 100644 --- a/volttron/platform/__init__.py +++ b/volttron/platform/__init__.py @@ -41,12 +41,14 @@ import logging import os +import traceback + import psutil import sys from configparser import ConfigParser -from ..utils.frozendict import FrozenDict from urllib.parse import urlparse +from ..utils.frozendict import FrozenDict __version__ = '8.0-rc' _log = logging.getLogger(__name__) @@ -91,15 +93,43 @@ def get_config_path() -> str: return os.path.join(get_home(), "config") -def get_address(): +def get_address(verify_listening=False): """Return the VIP address of the platform - If the VOLTTRON_VIP_ADDR environment variable is set, it used. + If the VOLTTRON_VIP_ADDR environment variable is set, it is used to connect to. Otherwise, it is derived from get_home().""" address = os.environ.get('VOLTTRON_VIP_ADDR') if not address: + # Connect via virtual unix socket if linux platform (mac doesn't have @ in it) abstract = '@' if sys.platform.startswith('linux') else '' address = 'ipc://%s%s/run/vip.socket' % (abstract, get_home()) + import zmq.green as zmqgreen + import zmq + # The following block checks to make sure that we can + # connect to the zmq based upon the ipc address. + # + # The zmq.sock.bind() will raise an error because the + # address is already bound (therefore volttron is running there) + sock = None + try: + # TODO: We should not just do the connection test when verfiy_listening is True but always + # Though we leave this here because we have backward compatible unit tests that require + # the get_address to not have somethiing bound to the address. + if verify_listening: + ctx = zmqgreen.Context.instance() + sock = ctx.socket(zmq.PUB) # or SUB - does not make any difference + sock.bind(address) + raise ValueError("Unable to connect to vip address " + f"make sure VOLTTRON_HOME: {get_home()} " + "is set properly") + except zmq.error.ZMQError as e: + print(f"Zmq error was {e}\n{traceback.format_exc()}") + finally: + try: + sock.close() + except AttributeError as e: # Raised when sock is None type + pass + return address diff --git a/volttron/platform/control.py b/volttron/platform/control.py index 33bfb50027..32d141d48e 100644 --- a/volttron/platform/control.py +++ b/volttron/platform/control.py @@ -81,6 +81,7 @@ from volttron.utils.rmq_mgmt import RabbitMQMgmt from volttron.utils.rmq_setup import check_rabbit_status from volttron.platform.agent.utils import is_secure_mode, wait_for_volttron_shutdown +from . install_agents import add_install_agent_parser, install_agent try: import volttron.restricted @@ -612,85 +613,6 @@ def restore_agents_data(agent_uuid): callback=restore_agents_data) -def install_agent(opts, publickey=None, secretkey=None, callback=None): - aip = opts.aip - filename = opts.wheel - tag = opts.tag - vip_identity = opts.vip_identity - if opts.vip_address.startswith('ipc://'): - _log.info("Installing wheel locally without channel subsystem") - filename = config.expandall(filename) - agent_uuid = opts.connection.call('install_agent_local', - filename, - vip_identity=vip_identity, - publickey=publickey, - secretkey=secretkey) - - if tag: - opts.connection.call('tag_agent', agent_uuid, tag) - - else: - try: - _log.debug('Creating channel for sending the agent.') - channel_name = str(uuid.uuid4()) - channel = opts.connection.server.vip.channel('control', - channel_name) - _log.debug('calling control install agent.') - agent_uuid = opts.connection.call_no_get('install_agent', - filename, - channel_name, - vip_identity=vip_identity, - publickey=publickey, - secretkey=secretkey) - - _log.debug('Sending wheel to control') - sha512 = hashlib.sha512() - with open(filename, 'rb') as wheel_file_data: - while True: - # get a request - with gevent.Timeout(60): - request, file_offset, chunk_size = channel.recv_multipart() - if request == 'checksum': - channel.send(sha512.digest()) - break - - assert request == 'fetch' - - # send a chunk of the file - file_offset = int(file_offset) - chunk_size = int(chunk_size) - wheel_file_data.seek(file_offset) - data = wheel_file_data.read(chunk_size) - sha512.update(data) - channel.send(data) - - agent_uuid = agent_uuid.get(timeout=10) - - except Exception as exc: - if opts.debug: - traceback.print_exc() - _stderr.write( - '{}: error: {}: {}\n'.format(opts.command, exc, filename)) - return 10 - else: - if tag: - opts.connection.call('tag_agent', - agent_uuid, - tag) - finally: - _log.debug('closing channel') - channel.close(linger=0) - del channel - - name = opts.connection.call('agent_name', agent_uuid) - _stdout.write('Installed {} as {} {}\n'.format(filename, agent_uuid, name)) - - # Need to use a callback here rather than a return value. I am not 100% - # sure why this is the reason for allowing our tests to pass. - if callback: - callback(agent_uuid) - - def tag_agent(opts): agents = filter_agent(_list_agents(opts.aip), opts.agent, opts) if len(agents) != 1: @@ -2381,29 +2303,14 @@ def main(argv=sys.argv): top_level_subparsers = parser.add_subparsers(title='commands', metavar='', dest='command') - def add_parser(*args, **kwargs): + def add_parser(*args, **kwargs) -> argparse.ArgumentParser: parents = kwargs.get('parents', []) parents.append(global_args) kwargs['parents'] = parents subparser = kwargs.pop("subparser", top_level_subparsers) return subparser.add_parser(*args, **kwargs) - install = add_parser('install', help='install agent from wheel', - epilog='Optionally you may specify the --tag argument to tag the ' - 'agent during install without requiring a separate call to ' - 'the tag command. ') - install.add_argument('wheel', help='path to agent wheel') - install.add_argument('--tag', help='tag for the installed agent') - install.add_argument('--vip-identity', help='VIP IDENTITY for the installed agent. ' - 'Overrides any previously configured VIP IDENTITY.') - if HAVE_RESTRICTED: - install.add_argument('--verify', action='store_true', - dest='verify_agents', - help='verify agent integrity during install') - install.add_argument('--no-verify', action='store_false', - dest='verify_agents', - help=argparse.SUPPRESS) - install.set_defaults(func=install_agent, verify_agents=True) + add_install_agent_parser(add_parser, HAVE_RESTRICTED) tag = add_parser('tag', parents=[filterable], help='set, show, or remove agent tag') diff --git a/volttron/platform/install_agents.py b/volttron/platform/install_agents.py new file mode 100644 index 0000000000..101e419489 --- /dev/null +++ b/volttron/platform/install_agents.py @@ -0,0 +1,361 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import argparse +import hashlib +import logging +import os +import sys +import tempfile +import traceback +import uuid + +import gevent +import yaml + +from volttron.platform import config, jsonapi, get_volttron_root, get_home +from volttron.platform.agent.utils import execute_command +from volttron.platform.packaging import add_files_to_package, create_package + +_log = logging.getLogger(__name__) + +_stdout = sys.stdout +_stderr = sys.stderr + + +def identity_exists(volttron_control, identity): + env = os.environ.copy() + cmds = [volttron_control, "status"] + + data = execute_command(cmds, env=env, logger=_log, + err_prefix="Error checking identity") + for x in data.split("\n"): + if x: + line_split = x.split() + if identity == line_split[2]: + return line_split[0] + return False + + +def install_requirements(agent_source): + req_file = os.path.join(agent_source, "requirements.txt") + + if os.path.exists(req_file): + _log.info(f"Installing requirements for agent from {req_file}.") + cmds = ["pip", "install", "-r", req_file] + try: + execute_command(cmds, logger=_log, + err_prefix="Error installing requirements") + except RuntimeError: + sys.exit(1) + + +def install_agent_directory(opts, package, agent_config): + """ + The main installation method for installing the agent on the correct local + platform instance. + :param opts: + :param package: + :param agent_config: + :return: + """ + if not os.path.isfile(os.path.join(opts.install_path, "setup.py")): + _log.error("Agent source must contain a setup.py file.") + sys.exit(-10) + + install_requirements(opts.install_path) + + wheelhouse = os.path.join(get_home(), "packaged") + opts.package = create_package(opts.install_path, wheelhouse, opts.vip_identity) + + if not os.path.isfile(opts.package): + _log.error("The wheel file for the agent was unable to be created.") + sys.exit(-10) + + agent_exists = False + volttron_control = os.path.join(get_volttron_root(), "env/bin/vctl") + if opts.vip_identity is not None: + # if the identity exists the variable will have the agent uuid in it. + agent_exists = identity_exists(volttron_control, opts.vip_identity) + if agent_exists: + if not opts.force: + _log.error( + "identity already exists, but force wasn't specified.") + sys.exit(-10) + # Note we don't remove the agent here because if we do that will + # not allow us to update without losing the keys. The + # install_agent method either installs or upgrades the agent. + + if agent_config is None: + agent_config = {} + + # if not a dict then config should be a filename + if not isinstance(agent_config, dict): + config_file = agent_config + else: + cfg = tempfile.NamedTemporaryFile() + with open(cfg.name, 'w') as fout: + fout.write(yaml.safe_dump(agent_config)) + config_file = cfg.name + + try: + with open(config_file) as fp: + data = yaml.safe_load(fp) + except: + _log.error("Invalid yaml/json config file.") + sys.exit(-10) + + # Configure the whl file before installing. + add_files_to_package(opts.package, {'config_file': config_file}) + env = os.environ.copy() + + if agent_exists: + cmds = [volttron_control, "--json", "upgrade", opts.vip_identity, opts.package] + else: + cmds = [volttron_control, "--json", "install", opts.package] + + if opts.tag: + cmds.extend(["--tag", opts.tag]) + + out = execute_command(cmds, env=env, logger=_log, + err_prefix="Error installing agent") + + parsed = out.split("\n") + + # If there is not an agent with that identity: + # 'Could not find agent with VIP IDENTITY "BOO". Installing as new agent + # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 6ccbf8dc-4929-4794-9c8e-3d8c6a121776 listeneragent-3.2' + + # The following is standard output of an agent that was previously installed + # If the agent was not previously installed then only the second line + # would have been output to standard out. + # + # Removing previous version of agent "foo" + # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 81b811ff-02b5-482e-af01-63d2fd95195a listeneragent-3.2 + + agent_uuid = None + for l in parsed: + if l.startswith('Installed'): + agent_uuid = l.split(' ')[-2:-1][0] + # if 'Could not' in parsed[0]: + # agent_uuid = parsed[1].split()[-2] + # elif 'Removing' in parsed[0]: + # agent_uuid = parsed[1].split()[-2] + # else: + # agent_uuid = parsed[0].split()[-2] + + output_dict = dict(agent_uuid=agent_uuid) + + if opts.start: + cmds = [volttron_control, "start", agent_uuid] + outputdata = execute_command(cmds, env=env, logger=_log, + err_prefix="Error starting agent") + + # Expected output on standard out + # Starting 83856b74-76dc-4bd9-8480-f62bd508aa9c listeneragent-3.2 + if 'Starting' in outputdata: + output_dict['starting'] = True + + if opts.enable: + cmds = [volttron_control, "enable", agent_uuid] + + if opts.priority != -1: + cmds.extend(["--priority", str(opts.priority)]) + + outputdata = execute_command(cmds, env=env, logger=_log, + err_prefix="Error enabling agent") + # Expected output from standard out + # Enabling 6bcee29b-7af3-4361-a67f-7d3c9e986419 listeneragent-3.2 with priority 50 + if "Enabling" in outputdata: + output_dict['enabling'] = True + output_dict['priority'] = outputdata.split("\n")[0].split()[-1] + + if opts.start: + # Pause for agent_start_time seconds before verifying that the agent + gevent.sleep(opts.agent_start_time) + + cmds = [volttron_control, "status", agent_uuid] + outputdata = execute_command(cmds, env=env, logger=_log, + err_prefix="Error finding agent status") + + # 5 listeneragent-3.2 foo running [10737] + output_dict["started"] = "running" in outputdata + if output_dict["started"]: + pidpos = outputdata.index('[') + 1 + pidend = outputdata.index(']') + output_dict['agent_pid'] = int(outputdata[pidpos: pidend]) + + if opts.json: + sys.stdout.write("%s\n" % jsonapi.dumps(output_dict, indent=4)) + if opts.csv: + keylen = len(output_dict) + keyline = '' + valueline = '' + keys = list(output_dict.keys()) + for k in range(keylen): + if k < keylen - 1: + keyline += "%s," % keys[k] + valueline += "%s," % output_dict[keys[k]] + else: + keyline += "%s" % keys[k] + valueline += "%s" % output_dict[keys[k]] + sys.stdout.write("%s\n%s\n" % (keyline, valueline)) + + +def install_agent(opts, publickey=None, secretkey=None, callback=None): + try: + install_path = opts.install_path + except AttributeError: + install_path = opts.wheel + + if os.path.isdir(install_path): + install_agent_directory(opts, opts, opts.agent_config) + return + + aip = opts.aip + filename = opts.install_path + tag = opts.tag + vip_identity = opts.vip_identity + if opts.vip_address.startswith('ipc://'): + _log.info("Installing wheel locally without channel subsystem") + filename = config.expandall(filename) + agent_uuid = opts.connection.call('install_agent_local', + filename, + vip_identity=vip_identity, + publickey=publickey, + secretkey=secretkey) + + if tag: + opts.connection.call('tag_agent', agent_uuid, tag) + + else: + channel = None + try: + _log.debug('Creating channel for sending the agent.') + channel_name = str(uuid.uuid4()) + channel = opts.connection.server.vip.channel('control', + channel_name) + _log.debug('calling control install agent.') + agent_uuid = opts.connection.call_no_get('install_agent', + filename, + channel_name, + vip_identity=vip_identity, + publickey=publickey, + secretkey=secretkey) + + _log.debug('Sending wheel to control') + sha512 = hashlib.sha512() + with open(filename, 'rb') as wheel_file_data: + while True: + # get a request + with gevent.Timeout(60): + request, file_offset, chunk_size = channel.recv_multipart() + if request == b'checksum': + channel.send(sha512.digest()) + break + + assert request == b'fetch' + + # send a chunk of the file + file_offset = int(file_offset) + chunk_size = int(chunk_size) + wheel_file_data.seek(file_offset) + data = wheel_file_data.read(chunk_size) + sha512.update(data) + channel.send(data) + + agent_uuid = agent_uuid.get(timeout=10) + + except Exception as exc: + if opts.debug: + traceback.print_exc() + _stderr.write( + '{}: error: {}: {}\n'.format(opts.command, exc, filename)) + return 10 + else: + if tag: + opts.connection.call('tag_agent', + agent_uuid, + tag) + finally: + _log.debug('closing channel') + if channel: + channel.close(linger=0) + del channel + + name = opts.connection.call('agent_name', agent_uuid) + _stdout.write('Installed {} as {} {}\n'.format(filename, agent_uuid, name)) + + # Need to use a callback here rather than a return value. I am not 100% + # sure why this is the reason for allowing our tests to pass. + if callback: + callback(agent_uuid) + + +def add_install_agent_parser(add_parser_fn, has_restricted): + install = add_parser_fn('install', help='install agent from wheel', + epilog='Optionally you may specify the --tag argument to tag the ' + 'agent during install without requiring a separate call to ' + 'the tag command. ') + install.add_argument('install_path', help='path to agent wheel or directory for agent installation') + install.add_argument('--tag', help='tag for the installed agent') + install.add_argument('--vip-identity', help='VIP IDENTITY for the installed agent. ' + 'Overrides any previously configured VIP IDENTITY.') + install.add_argument('--agent-config', help="Agent configuration!") + install.add_argument("-f", "--force", action='store_true', + help="agents are uninstalled by tag so force allows multiple agents to be removed at one go.") + install.add_argument("--priority", default=-1, type=int, + help="priority of startup during instance startup") + install.add_argument("--start", action='store_true', + help="start the agent during the script execution") + install.add_argument("--enable", action='store_true', + help="enable the agent with default 50 priority unless --priority set") + install.add_argument("--csv", action='store_true', + help="format the standard out output to csv") + install.add_argument("--json", action="store_true", + help="format the standard out output to json") + install.add_argument("-st", "--agent-start-time", default=5, type=int, + help="the amount of time to wait and verify that the agent has started up.") + if has_restricted: + install.add_argument('--verify', action='store_true', + dest='verify_agents', + help='verify agent integrity during install') + install.add_argument('--no-verify', action='store_false', + dest='verify_agents', + help=argparse.SUPPRESS) + install.set_defaults(func=install_agent, verify_agents=True) diff --git a/volttron/platform/vip/agent/utils.py b/volttron/platform/vip/agent/utils.py index b59387da72..62a156beb4 100644 --- a/volttron/platform/vip/agent/utils.py +++ b/volttron/platform/vip/agent/utils.py @@ -71,17 +71,17 @@ def get_server_keys(): return ks.public, ks.secret -def build_connection(identity, peer='', address=get_address(), +def build_connection(identity, peer='', address=None, publickey=None, secretkey=None, message_bus=None, **kwargs): + address = address if address is not None else get_address() if publickey is None or secretkey is None: publickey, secretkey = get_server_keys(publickey, secretkey) - cn = Connection(address=address, identity=identity, peer=peer, publickey=publickey, secretkey=secretkey, message_bus=message_bus, **kwargs) return cn -def build_agent(address=get_address(), identity=None, publickey=None, +def build_agent(address=None, identity=None, publickey=None, secretkey=None, timeout=10, serverkey=None, agent_class=Agent, volttron_central_address=None, volttron_central_instance_name=None, **kwargs) -> Agent: @@ -101,8 +101,8 @@ def build_agent(address=get_address(), identity=None, publickey=None, :return: an agent based upon agent_class that has been started :rtype: agent_class """ - # if not serverkey: - # serverkey = get_known_host_serverkey(address) + + address = address if address is not None else get_address() # This is a fix allows the connect to message bus to be different than # the one that is currently running. diff --git a/volttrontesting/utils/platformwrapper.py b/volttrontesting/utils/platformwrapper.py index f44c7977fd..b4e6ed698c 100644 --- a/volttrontesting/utils/platformwrapper.py +++ b/volttrontesting/utils/platformwrapper.py @@ -454,7 +454,7 @@ def build_agent(self, address=None, should_spawn=True, identity=None, self._append_allow_curve_key(publickey, agent.core.identity) if should_spawn: - self.logit('platformwrapper.build_agent spawning') + self.logit(f'platformwrapper.build_agent spawning for identity {identity}') event = gevent.event.Event() gevent.spawn(agent.core.run, event) # .join(0) event.wait(timeout=2) @@ -801,7 +801,6 @@ def subscribe_to_all(peer, sender, bus, topic, headers, messages): gevent.sleep(10) - def is_running(self): return utils.is_volttron_running(self.volttron_home) @@ -840,7 +839,7 @@ def _install_agent(self, wheel_file, start, vip_identity): self.logit("VOLTTRON_HOME SETTING: {}".format( self.env['VOLTTRON_HOME'])) env = self.env.copy() - cmd = ['volttron-ctl', '-vv', 'install', wheel_file] + cmd = ['volttron-ctl', '--json', 'install', wheel_file] if vip_identity: cmd.extend(['--vip-identity', vip_identity]) @@ -949,15 +948,7 @@ def install_agent(self, agent_wheel=None, agent_dir=None, config_file=None, else: raise ValueError("Can't determine correct config file.") - script = os.path.join(self.volttron_root, - "scripts/install-agent.py") - cmd = [self.python, script, - "--volttron-home", self.volttron_home, - "--volttron-root", self.volttron_root, - "--agent-source", agent_dir, - "--config", config_file, - "--json", - "--agent-start-time", str(startup_time)] + cmd = ["vctl", "--json", "install", agent_dir, "--agent-config", config_file] if force: cmd.extend(["--force"]) @@ -969,7 +960,6 @@ def install_agent(self, agent_wheel=None, agent_dir=None, config_file=None, stdout = execute_command(cmd, logger=_log, err_prefix="Error installing agent") - self.logit(stdout) # Because we are no longer silencing output from the install, the # the results object is now much more verbose. Our assumption is # that the result we are looking for is the only JSON block in @@ -1015,17 +1005,16 @@ def start_agent(self, agent_uuid): self.logit('Starting agent {}'.format(agent_uuid)) self.logit("VOLTTRON_HOME SETTING: {}".format( self.env['VOLTTRON_HOME'])) - cmd = ['volttron-ctl'] + cmd = ['volttron-ctl', '--json'] cmd.extend(['start', agent_uuid]) - p = Popen(cmd, env=self.env, - stdout=sys.stdout, stderr=sys.stderr, universal_newlines=True) - p.wait() + + result = execute_command(cmd, self.env) # Confirm agent running - cmd = ['volttron-ctl'] + cmd = ['volttron-ctl', '--json'] cmd.extend(['status', agent_uuid]) res = execute_command(cmd, env=self.env) - # 776 TODO: Timing issue where check fails + # 776 TODO: Timing issue where check fails time.sleep(.1) self.logit("Subprocess res is {}".format(res)) assert 'running' in res From 9d118e5c3342876b73ff0850d79ae3b541038604 Mon Sep 17 00:00:00 2001 From: craig8 Date: Mon, 7 Dec 2020 11:51:29 -0800 Subject: [PATCH 12/14] Removed debugging of workflows from repository --- .github/workflows/debug-env.yml | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 .github/workflows/debug-env.yml diff --git a/.github/workflows/debug-env.yml b/.github/workflows/debug-env.yml deleted file mode 100644 index 41609cd4a9..0000000000 --- a/.github/workflows/debug-env.yml +++ /dev/null @@ -1,30 +0,0 @@ -on: push - -jobs: - one: - runs-on: ubuntu-latest - steps: - - id: DumpGitHubcontext - env: - GITHUB_CONTEXT: ${{ toJson(github) }} - run: echo "$GITHUB_CONTEXT" - - id: Dumpjobcontext - env: - JOB_CONTEXT: ${{ toJson(job) }} - run: echo "$JOB_CONTEXT" - - name: Dump steps context - env: - STEPS_CONTEXT: ${{ toJson(steps) }} - run: echo "$STEPS_CONTEXT" - - name: Dumprunnercontext - env: - RUNNER_CONTEXT: ${{ toJson(runner) }} - run: echo "$RUNNER_CONTEXT" - - name: Dump strategy context - env: - STRATEGY_CONTEXT: ${{ toJson(strategy) }} - run: echo "$STRATEGY_CONTEXT" - - name: Dumpmatrixcontext - env: - MATRIX_CONTEXT: ${{ toJson(matrix) }} - run: echo "$MATRIX_CONTEXT" From d9fe08f998a103f1dca97f74cd2f93ddcf5b7cc4 Mon Sep 17 00:00:00 2001 From: Mark Bonicillo Date: Mon, 7 Dec 2020 12:30:46 -0800 Subject: [PATCH 13/14] Move comments on database to sqlite3 documentation --- .../sql-historian/sql-historian.rst | 12 ++++++++---- services/core/SQLHistorian/config.sqlite | 13 ------------- .../SQLHistorian/config_device_data_filter.sqlite | 13 ------------- 3 files changed, 8 insertions(+), 30 deletions(-) diff --git a/docs/source/agent-framework/historian-agents/sql-historian/sql-historian.rst b/docs/source/agent-framework/historian-agents/sql-historian/sql-historian.rst index cdd7763a84..200c67e98d 100644 --- a/docs/source/agent-framework/historian-agents/sql-historian/sql-historian.rst +++ b/docs/source/agent-framework/historian-agents/sql-historian/sql-historian.rst @@ -71,10 +71,14 @@ or Sqlite3 Specifics ----------------- -An Sqlite Historian provides a convenient solution for under powered -systems. The database is parameter is a location on the file system. By -default it is relative to the agents installation directory, however it -will respect a rooted or relative path to the database. +An Sqlite Historian provides a convenient solution for under powered systems. The database is a parameter to a location on the file system; 'database' should be a non-empty string. +By default, the location is relative to the agent's installation directory, however it will respect a rooted or relative path to the database. + +If 'database' does not have a rooted or relative path, the location of the database depends on whether the volttron platform is in secure mode. For more information on secure mode, see :ref:`Running-Agents-as-Unix-User`. +In secure mode, the location will be under /.agent-data directory because this will be the only directory in which the agent will have write-access. +In regular mode, the location will be under /data for backward compatibility. + +The following is a minimal configuration file that uses a relative path to the database. :: diff --git a/services/core/SQLHistorian/config.sqlite b/services/core/SQLHistorian/config.sqlite index ad8f4f955c..1f542200f9 100644 --- a/services/core/SQLHistorian/config.sqlite +++ b/services/core/SQLHistorian/config.sqlite @@ -2,19 +2,6 @@ "connection": { "type": "sqlite", "params": { - # 'database' should be a non-empty string, which represents a location on the filesystem - # By default, the location is relative to the agent's installation directory, however it will respect a rooted or relative path to the database. - # The following are two examples: - - # Example 1 includes a path to the database: - # "my_data/historian_test.sqlite" - - # Example 2 does not include a directory: - # "historian_test.sqlite" - # NOTE: In this example, the location of the database depends on whether the volttron platform is in secure mode. - # In secure mode, the location will be under install_dir/.agent-data directory because this will be the only directory in which the agent will have write-access. - # In regular mode, the location will be under install_dir/data for backward compatibility. - "database": "historian_test.sqlite" } }, diff --git a/services/core/SQLHistorian/config_device_data_filter.sqlite b/services/core/SQLHistorian/config_device_data_filter.sqlite index bbd2aa293b..ac9c4260ea 100644 --- a/services/core/SQLHistorian/config_device_data_filter.sqlite +++ b/services/core/SQLHistorian/config_device_data_filter.sqlite @@ -2,19 +2,6 @@ "connection": { "type": "sqlite", "params": { - # 'database' should be a non-empty string, which represents a location on the filesystem - # By default, the location is relative to the agent's installation directory, however it will respect a rooted or relative path to the database. - # The following are two examples: - - # Example 1 includes a path to the database: - # "my_data/historian_test.sqlite" - - # Example 2 does not include a directory: - # "historian_test.sqlite" - # NOTE: In this example, the location of the database depends on whether the volttron platform is in secure mode. - # In secure mode, the location will be under install_dir/.agent-data directory because this will be the only directory in which the agent will have write-access. - # In regular mode, the location will be under install_dir/data for backward compatibility. - "database": "historian_test.sqlite" } }, From adef64a500280a75334ed15ae13f25a1c7716371 Mon Sep 17 00:00:00 2001 From: Mark Bonicillo Date: Mon, 7 Dec 2020 12:37:11 -0800 Subject: [PATCH 14/14] Update SQLite3 documentation in historian README --- services/core/SQLHistorian/README.rst | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/services/core/SQLHistorian/README.rst b/services/core/SQLHistorian/README.rst index 464226be9b..fd249b5899 100644 --- a/services/core/SQLHistorian/README.rst +++ b/services/core/SQLHistorian/README.rst @@ -91,10 +91,18 @@ YML format SQLite3 ~~~~~~~ -An Sqlite historian provides a convenient solution for under powered -systems. The database parameter is a location on the file system. By -default it is relative to the agents installation directory, however it -will respect a rooted or relative path to the database. +An Sqlite Historian provides a convenient solution for under powered +systems. The database is a parameter to a location on the file system; 'database' should be a non-empty string. +By default, the location is relative to the agent's installation directory, +however it will respect a rooted or relative path to the database. + +If 'database' does not have a rooted or relative path, the location of the database depends on whether the volttron +platform is in secure mode. +In secure mode, the location will be under /.agent-data directory because this will be +the only directory in which the agent will have write-access. +In regular mode, the location will be under /data for backward compatibility. + +The following is a minimal configuration file that uses a relative path to the database. Configuration -------------