From 7e7a18bdb3eb0e44a43e7f0618e0278ce7b17052 Mon Sep 17 00:00:00 2001 From: Tony Meyer Date: Fri, 24 May 2024 11:33:48 +1200 Subject: [PATCH 1/4] fix: don't use f-strings in logging calls (#1227) Removes use of f-strings in `logging.debug` and `logging.warning` calls. Calls to `logging.*` should provide a format string and arguments (generally we use `%` style, but `{` or `$'` would be ok, if inconsistent). There are two main reasons: * Some log handlers (most well known is Sentry, but I believe there are others) will aggregate messages using the format string, and this breaks if the string is pre-interpolated. * Interpolation/formatting is done lazily - for example, a debug level message may never get formatted - so there is a (very small) efficiency gain by avoiding it when possible. Theoretically, there are also safety concerns in having user-provided content, but that's not relevant for any of these cases here, and I think you'd have to _also_ provide an argument to exploit that. --- ops/model.py | 2 +- ops/pebble.py | 4 ++-- ops/storage.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ops/model.py b/ops/model.py index 10ee8ec17..7fa8f3027 100644 --- a/ops/model.py +++ b/ops/model.py @@ -985,7 +985,7 @@ def _cast_network_address(raw: str) -> Union[ipaddress.IPv4Address, ipaddress.IP try: return ipaddress.ip_address(raw) except ValueError: - logger.debug(f"could not cast {raw} to IPv4/v6 address") + logger.debug("could not cast %s to IPv4/v6 address", raw) return raw diff --git a/ops/pebble.py b/ops/pebble.py index 604c67585..b450f0a8d 100644 --- a/ops/pebble.py +++ b/ops/pebble.py @@ -1639,7 +1639,7 @@ def _websocket_to_writer(ws: '_WebSocket', writer: '_WebsocketWriter', command = payload.get('command') if command != 'end': # A command we don't recognize, keep going - logger.warning(f'Invalid I/O command {command!r}') + logger.warning('Invalid I/O command %r', command) continue # Received "end" command (EOF signal), stop thread break @@ -1702,7 +1702,7 @@ def read(self, n: int = -1) -> Union[str, bytes]: command = payload.get('command') if command != 'end': # A command we don't recognize, keep going - logger.warning(f'Invalid I/O command {command!r}') + logger.warning('Invalid I/O command %r', command) continue # Received "end" command, return EOF designator self.eof = True diff --git a/ops/storage.py b/ops/storage.py index 80d610807..2714749ba 100644 --- a/ops/storage.py +++ b/ops/storage.py @@ -58,7 +58,7 @@ def __init__(self, filename: Union['Path', str]): if not os.path.exists(str(filename)): # sqlite3.connect creates the file silently if it does not exist - logger.debug(f"Initializing SQLite local storage: {filename}.") + logger.debug("Initializing SQLite local storage: %s.", filename) if filename != ":memory:": self._ensure_db_permissions(str(filename)) From 0dd27df3d828b2d62e0a396667466a8f0263e45e Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Thu, 23 May 2024 19:42:40 -0500 Subject: [PATCH 2/4] fix: the `other` argument to `RelatationDataContent.update(...)` should be optional (#1226) Fixes the signature for `RelationDataContent.update` to match `MutableMapping`, where `other` is optional (a regression introduced in #1883). The type for `other` has been simplified to `Any`. It should really be `Mapping|_SupportsKeysAndGetItem[str,str]` plus a minimal type that supports `.values`, but it was already messy pulling in `_SupportsKeysAndGetItem` in #1183, and we're just passing this through to `MutableMapping` so it doesn't seem like the tight typing is providing enough benefit to justify the complexity of the signature. [typeshed has three overloads](https://github.com/python/typeshed/blob/f7c03486ee01c8ea74823db75e017341bf3c2ad0/stdlib/typing.pyi#L726), so we could match that (as we did in #1883, just incompletely), if that is desirable. Fixes: #1225 --------- Co-authored-by: Tony Meyer --- ops/model.py | 12 +----------- test/test_model.py | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/ops/model.py b/ops/model.py index 7fa8f3027..3bafa88ab 100644 --- a/ops/model.py +++ b/ops/model.py @@ -91,16 +91,6 @@ }) -# Copied from typeshed. -_KT = typing.TypeVar("_KT") -_VT_co = typing.TypeVar("_VT_co", covariant=True) - - -class _SupportsKeysAndGetItem(typing.Protocol[_KT, _VT_co]): - def keys(self) -> typing.Iterable[_KT]: ... - def __getitem__(self, __key: _KT) -> _VT_co: ... - - logger = logging.getLogger(__name__) MAX_LOG_LINE_LEN = 131071 # Max length of strings to pass to subshell. @@ -1722,7 +1712,7 @@ def __getitem__(self, key: str) -> str: self._validate_read() return super().__getitem__(key) - def update(self, other: _SupportsKeysAndGetItem[str, str], **kwargs: str): + def update(self, other: typing.Any = (), /, **kwargs: str): """Update the data from dict/iterable other and the kwargs.""" super().update(other, **kwargs) diff --git a/test/test_model.py b/test/test_model.py index 1f7c212a2..2037585b1 100644 --- a/test/test_model.py +++ b/test/test_model.py @@ -232,6 +232,32 @@ def test_get_app_relation_data(self, harness: ops.testing.Harness[ops.CharmBase] relation_id, harness.model.app) == harness.get_relation_data( relation_id, local_app) == {'foo': 'bar'} + @pytest.mark.parametrize('args,kwargs', [ + (({'foo': 'baz'}, ), {}), + (([('foo', 'baz')], ), {}), + ((), {'foo': 'baz'}) + ]) + def test_update_app_relation_data( + self, + args: typing.Tuple[typing.Any, ...], + kwargs: typing.Dict[str, str], + harness: ops.testing.Harness[ops.CharmBase], + ): + harness.set_leader(True) + harness.begin() + relation_id = harness.add_relation('db1', 'remote') + harness.add_relation_unit(relation_id, 'remote/0') + with harness._event_context('foo_event'): + harness.update_relation_data( + relation_id, + harness.model.app.name, + {'foo': 'bar'}) + rel = harness.model.get_relation('db1', relation_id) + assert rel is not None + rel.data[harness.model.app].update(*args, **kwargs) + assert harness.get_relation_data( + relation_id, harness.model.app) == {'foo': 'baz'} + def test_unit_relation_data(self, harness: ops.testing.Harness[ops.CharmBase]): relation_id = harness.add_relation('db1', 'remoteapp1') harness.add_relation_unit(relation_id, 'remoteapp1/0') From 3c8c211c18bfcc944b639aa0259f46bcac690056 Mon Sep 17 00:00:00 2001 From: Tiexin Guo Date: Fri, 24 May 2024 10:53:00 +0800 Subject: [PATCH 3/4] test: refactor test_testing to pytest style (#1206) Refactor `test_testing.py` to pytest style. --- test/test_main.py | 1 - test/test_model.py | 2 - test/test_pebble.py | 4 - test/test_real_pebble.py | 30 +- test/test_testing.py | 1803 ++++++++++++++++++++------------------ 5 files changed, 988 insertions(+), 852 deletions(-) diff --git a/test/test_main.py b/test/test_main.py index a13dd0bf1..b5bee2f72 100644 --- a/test/test_main.py +++ b/test/test_main.py @@ -807,7 +807,6 @@ def test_excepthook(self, fake_script: FakeScript): assert re.search('Using local storage: not a Kubernetes podspec charm', calls.pop(0)) assert re.search('Initializing SQLite local storage: ', calls.pop(0)) - self.maxDiff = None assert re.search( '(?ms)juju-log --log-level ERROR -- Uncaught exception while in charm code:\n' 'Traceback .most recent call last.:\n' diff --git a/test/test_model.py b/test/test_model.py index 2037585b1..0373a44b2 100644 --- a/test/test_model.py +++ b/test/test_model.py @@ -3379,8 +3379,6 @@ def test_from_dict(self): class TestSecretClass: - maxDiff = 64 * 1024 - @pytest.fixture def model(self): return ops.Model(ops.CharmMeta(), _ModelBackend('myapp/0')) diff --git a/test/test_pebble.py b/test/test_pebble.py index e72c49953..7dda0c739 100644 --- a/test/test_pebble.py +++ b/test/test_pebble.py @@ -47,8 +47,6 @@ def datetime_nzdt(y: int, m: int, d: int, hour: int, min: int, sec: int, micro: class TestTypes: - maxDiff = None - def test_error(self): error = pebble.Error('error') assert isinstance(error, Exception) @@ -1599,8 +1597,6 @@ def client(monkeypatch: pytest.MonkeyPatch, time: MockTime): class TestClient: - maxDiff = None - def test_client_init(self): pebble.Client(socket_path='foo') # test that constructor runs with pytest.raises(TypeError): diff --git a/test/test_real_pebble.py b/test/test_real_pebble.py index cca1bc311..b64dbcbfb 100644 --- a/test/test_real_pebble.py +++ b/test/test_real_pebble.py @@ -34,7 +34,6 @@ import threading import time import typing -import unittest import urllib.error import urllib.request import uuid @@ -56,15 +55,16 @@ def get_socket_path() -> str: return socket_path +@pytest.fixture +def client(): + return pebble.Client(socket_path=get_socket_path()) + + @pytest.mark.skipif( os.getenv('RUN_REAL_PEBBLE_TESTS') != '1', reason='RUN_REAL_PEBBLE_TESTS not set', ) class TestRealPebble: - @pytest.fixture - def client(self): - return pebble.Client(socket_path=get_socket_path()) - def test_checks_and_health(self, client: pebble.Client): client.add_layer('layer', { 'checks': { @@ -304,19 +304,18 @@ def test_log_forwarding(self, client: pebble.Client): os.getenv('RUN_REAL_PEBBLE_TESTS') != '1', reason='RUN_REAL_PEBBLE_TESTS not set', ) -class TestPebbleStorageAPIsUsingRealPebble(unittest.TestCase, PebbleStorageAPIsTestMixin): - def setUp(self): +class TestPebbleStorageAPIsUsingRealPebble(PebbleStorageAPIsTestMixin): + @pytest.fixture + def pebble_dir(self): pebble_path = os.getenv('PEBBLE') assert pebble_path is not None - self.prefix = tempfile.mkdtemp(dir=pebble_path) - self.client = pebble.Client(socket_path=get_socket_path()) - - def tearDown(self): - shutil.rmtree(self.prefix) + pebble_dir = tempfile.mkdtemp(dir=pebble_path) + yield pebble_dir + shutil.rmtree(pebble_dir) # Remove this entirely once the associated bug is fixed; it overrides the original test in the # test mixin class. - @unittest.skip('pending resolution of https://github.com/canonical/pebble/issues/80') + @pytest.mark.skip(reason='pending resolution of https://github.com/canonical/pebble/issues/80') def test_make_dir_with_permission_mask(self): pass @@ -325,6 +324,5 @@ def test_make_dir_with_permission_mask(self): os.getenv('RUN_REAL_PEBBLE_TESTS') != '1', reason='RUN_REAL_PEBBLE_TESTS not set', ) -class TestNoticesUsingRealPebble(unittest.TestCase, PebbleNoticesMixin): - def setUp(self): - self.client = pebble.Client(socket_path=get_socket_path()) +class TestNoticesUsingRealPebble(PebbleNoticesMixin): + pass diff --git a/test/test_testing.py b/test/test_testing.py index 0488d60e8..98eed62bb 100644 --- a/test/test_testing.py +++ b/test/test_testing.py @@ -77,21 +77,21 @@ def on_storage_changed(self, event: ops.EventBase): self.changes.append(event) -class TestHarness(unittest.TestCase): - def test_add_relation_no_meta_fails(self): +class TestHarness: + def test_add_relation_no_meta_fails(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta="name: mycharm") - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) with pytest.raises(ops.RelationNotFoundError): harness.add_relation('db', 'postgresql') - def test_add_relation(self): + def test_add_relation(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql') assert isinstance(rel_id, int) backend = harness._backend @@ -101,14 +101,14 @@ def test_add_relation(self): assert backend.relation_get(rel_id, 'test-app', is_app=True) == {} assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == {} - def test_add_relation_with_app_data(self): + def test_add_relation_with_app_data(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql', app_data={'x': '1', 'y': '2'}) assert isinstance(rel_id, int) backend = harness._backend @@ -117,14 +117,14 @@ def test_add_relation_with_app_data(self): assert harness.get_relation_data(rel_id, 'postgresql') == {'x': '1', 'y': '2'} assert harness.get_relation_data(rel_id, 'postgresql/0') == {} - def test_add_relation_with_unit_data(self): + def test_add_relation_with_unit_data(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql', unit_data={'a': '1', 'b': '2'}) assert isinstance(rel_id, int) backend = harness._backend @@ -133,14 +133,14 @@ def test_add_relation_with_unit_data(self): assert harness.get_relation_data(rel_id, 'postgresql') == {} assert harness.get_relation_data(rel_id, 'postgresql/0') == {'a': '1', 'b': '2'} - def test_can_connect_default(self): + def test_can_connect_default(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app containers: foo: resource: foo-image ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() c = harness.model.unit.get_container('foo') @@ -159,7 +159,7 @@ def test_can_connect_default(self): assert c.can_connect() c.get_plan() # shouldn't raise ConnectionError - def test_can_connect_begin_with_initial_hooks(self): + def test_can_connect_begin_with_initial_hooks(self, request: pytest.FixtureRequest): pebble_ready_calls: collections.defaultdict[str, int] = collections.defaultdict(int) class MyCharm(ops.CharmBase): @@ -180,7 +180,7 @@ def _on_pebble_ready(self, event: ops.PebbleReadyEvent): bar: resource: bar-image ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin_with_initial_hooks() assert dict(pebble_ready_calls) == {'foo': 1, 'bar': 1} @@ -195,14 +195,14 @@ def _on_pebble_ready(self, event: ops.PebbleReadyEvent): assert container.can_connect() container.get_plan() # shouldn't raise ConnectionError - def test_add_relation_and_unit(self): + def test_add_relation_and_unit(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql') assert isinstance(rel_id, int) harness.add_relation_unit(rel_id, 'postgresql/0') @@ -213,7 +213,7 @@ def test_add_relation_and_unit(self): assert backend.relation_get(rel_id, 'postgresql/0', is_app=False) == \ {'foo': 'bar'} - def test_add_relation_with_remote_app_data(self): + def test_add_relation_with_remote_app_data(self, request: pytest.FixtureRequest): # language=YAML harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app @@ -221,7 +221,7 @@ def test_add_relation_with_remote_app_data(self): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) remote_app = 'postgresql' rel_id = harness.add_relation('db', remote_app) harness.update_relation_data(rel_id, 'postgresql', {'app': 'data'}) @@ -230,7 +230,7 @@ def test_add_relation_with_remote_app_data(self): assert [rel_id] == backend.relation_ids('db') assert backend.relation_get(rel_id, remote_app, is_app=True) == {'app': 'data'} - def test_add_relation_with_our_initial_data(self): + def test_add_relation_with_our_initial_data(self, request: pytest.FixtureRequest): class InitialDataTester(ops.CharmBase): """Record the relation-changed events.""" @@ -250,7 +250,7 @@ def _on_db_relation_changed(self, event: ops.EventBase): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql') harness.update_relation_data(rel_id, 'test-app', {'k': 'v1'}) harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.1'}) @@ -280,7 +280,7 @@ def _on_db_relation_changed(self, event: ops.EventBase): harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'}) assert harness.charm.observed_events == [] - def test_add_peer_relation_with_initial_data_leader(self): + def test_add_peer_relation_with_initial_data_leader(self, request: pytest.FixtureRequest): class InitialDataTester(ops.CharmBase): """Record the relation-changed events.""" @@ -301,7 +301,7 @@ def _on_cluster_relation_changed(self, event: ops.EventBase): cluster: interface: cluster ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # TODO: dmitriis 2020-04-07 test a minion unit and initial peer relation app data # events when the harness begins to emit events for initial data. harness.set_leader(is_leader=True) @@ -333,14 +333,14 @@ def _on_cluster_relation_changed(self, event: ops.EventBase): assert len(harness.charm.observed_events), 1 assert isinstance(harness.charm.observed_events[0], ops.RelationEvent) - def test_remove_relation(self): + def test_remove_relation(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RelationEventCharm, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') # First create a relation @@ -373,14 +373,14 @@ def test_remove_relation(self): 'unit': None, 'relation_id': rel_id}} - def test_remove_specific_relation_id(self): + def test_remove_specific_relation_id(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RelationEventCharm, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') @@ -427,14 +427,14 @@ def test_remove_specific_relation_id(self): 'unit': None, 'relation_id': rel_id_2}} - def test_removing_invalid_relation_id_raises_exception(self): + def test_removing_invalid_relation_id_raises_exception(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RelationEventCharm, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') # First create a relation @@ -450,14 +450,14 @@ def test_removing_invalid_relation_id_raises_exception(self): with pytest.raises(ops.RelationNotFoundError): harness.remove_relation(rel_id + 1) - def test_remove_relation_unit(self): + def test_remove_relation_unit(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RelationEventCharm, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') # First add a relation and unit @@ -502,7 +502,7 @@ def test_remove_relation_unit(self): 'postgresql/0': {'foo': 'bar'}, 'postgresql': {}}}} - def test_removing_relation_removes_remote_app_data(self): + def test_removing_relation_removes_remote_app_data(self, request: pytest.FixtureRequest): # language=YAML harness = ops.testing.Harness(RelationEventCharm, meta=''' name: test-app @@ -510,7 +510,7 @@ def test_removing_relation_removes_remote_app_data(self): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') # Add a relation and update app data @@ -529,7 +529,7 @@ def test_removing_relation_removes_remote_app_data(self): pytest.raises(ops.RelationNotFoundError, backend.relation_get, rel_id, remote_app, is_app=True) - def test_removing_relation_refreshes_charm_model(self): + def test_removing_relation_refreshes_charm_model(self, request: pytest.FixtureRequest): # language=YAML harness = ops.testing.Harness(RelationEventCharm, meta=''' name: test-app @@ -537,7 +537,7 @@ def test_removing_relation_refreshes_charm_model(self): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') # Add a relation and update app data @@ -554,7 +554,7 @@ def test_removing_relation_refreshes_charm_model(self): harness.remove_relation(rel_id) assert self._find_relation_in_model_by_id(harness, rel_id) is None - def test_remove_relation_marks_relation_as_inactive(self): + def test_remove_relation_marks_relation_as_inactive(self, request: pytest.FixtureRequest): relations: typing.List[str] = [] is_broken = False @@ -574,7 +574,7 @@ def _db_relation_broken(self, event: ops.RelationBrokenEvent): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() rel_id = harness.add_relation('db', 'postgresql') harness.remove_relation(rel_id) @@ -591,14 +591,14 @@ def _find_relation_in_model_by_id( return relation return None - def test_removing_relation_unit_removes_data_also(self): + def test_removing_relation_unit_removes_data_also(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RelationEventCharm, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') # Add a relation and unit with data @@ -632,14 +632,17 @@ def test_removing_relation_unit_removes_data_also(self): 'departing_unit': 'postgresql/0', 'relation_id': rel_id}} - def test_removing_relation_unit_does_not_remove_other_unit_and_data(self): + def test_removing_relation_unit_does_not_remove_other_unit_and_data( + self, + request: pytest.FixtureRequest, + ): harness = ops.testing.Harness(RelationEventCharm, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') # Add a relation with two units with data @@ -675,14 +678,14 @@ def test_removing_relation_unit_does_not_remove_other_unit_and_data(self): 'departing_unit': 'postgresql/1', 'relation_id': rel_id}} - def test_relation_events(self): + def test_relation_events(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RelationEventCharm, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') assert harness.charm.get_changes() == [] @@ -723,7 +726,7 @@ def test_relation_events(self): 'relation_id': rel_id, }}] - def test_get_relation_data(self): + def test_get_relation_data(self, request: pytest.FixtureRequest): charm_meta = ''' name: test-app requires: @@ -731,7 +734,7 @@ def test_get_relation_data(self): interface: pgsql ''' harness = ops.testing.Harness(ops.CharmBase, meta=charm_meta) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql') harness.update_relation_data(rel_id, 'postgresql', {'remote': 'data'}) assert harness.get_relation_data(rel_id, 'test-app') == {} @@ -753,7 +756,7 @@ def test_get_relation_data(self): pg_app = ops.Application('postgresql', meta, harness._backend, t_cache) assert harness.get_relation_data(rel_id, pg_app) == {'remote': 'data'} - def test_create_harness_twice(self): + def test_create_harness_twice(self, request: pytest.FixtureRequest): metadata = ''' name: my-charm requires: @@ -761,9 +764,9 @@ def test_create_harness_twice(self): interface: pgsql ''' harness1 = ops.testing.Harness(ops.CharmBase, meta=metadata) - self.addCleanup(harness1.cleanup) + request.addfinalizer(harness1.cleanup) harness2 = ops.testing.Harness(ops.CharmBase, meta=metadata) - self.addCleanup(harness2.cleanup) + request.addfinalizer(harness2.cleanup) harness1.begin() harness2.begin() helper1 = DBRelationChangedHelper(harness1.charm, "helper1") @@ -774,7 +777,7 @@ def test_create_harness_twice(self): assert helper1.changes == [] assert helper2.changes == [(rel_id, 'postgresql')] - def test_begin_twice(self): + def test_begin_twice(self, request: pytest.FixtureRequest): # language=YAML harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app @@ -782,19 +785,19 @@ def test_begin_twice(self): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() with pytest.raises(RuntimeError): harness.begin() - def test_update_relation_exposes_new_data(self): + def test_update_relation_exposes_new_data(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: my-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') rel_id = harness.add_relation('db', 'postgresql') @@ -805,7 +808,7 @@ def test_update_relation_exposes_new_data(self): assert viewer.changes == [{'initial': 'data'}, {'initial': 'data', 'new': 'value'}] - def test_update_relation_no_local_unit_change_event(self): + def test_update_relation_no_local_unit_change_event(self, request: pytest.FixtureRequest): # language=YAML harness = ops.testing.Harness(ops.CharmBase, meta=''' name: my-charm @@ -813,7 +816,7 @@ def test_update_relation_no_local_unit_change_event(self): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() helper = DBRelationChangedHelper(harness.charm, "helper") rel_id = harness.add_relation('db', 'postgresql') @@ -831,7 +834,7 @@ def test_update_relation_no_local_unit_change_event(self): # But there were no changed events registered by our unit. assert helper.changes == [] - def test_update_peer_relation_no_local_unit_change_event(self): + def test_update_peer_relation_no_local_unit_change_event(self, request: pytest.FixtureRequest): # language=YAML harness = ops.testing.Harness(ops.CharmBase, meta=''' name: postgresql @@ -839,7 +842,7 @@ def test_update_peer_relation_no_local_unit_change_event(self): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() helper = DBRelationChangedHelper(harness.charm, "helper") rel_id = harness.add_relation('db', 'postgresql') @@ -870,7 +873,7 @@ def test_update_peer_relation_no_local_unit_change_event(self): assert rel.data[harness.charm.model.unit]['key'] == 'v4' assert helper.changes == [] - def test_update_peer_relation_app_data(self): + def test_update_peer_relation_app_data(self, request: pytest.FixtureRequest): # language=YAML harness = ops.testing.Harness(ops.CharmBase, meta=''' name: postgresql @@ -878,7 +881,7 @@ def test_update_peer_relation_app_data(self): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(is_leader=True) helper = DBRelationChangedHelper(harness.charm, "helper") @@ -902,7 +905,7 @@ def test_update_peer_relation_app_data(self): assert rel.data[harness.charm.model.app]['k2'] == 'v2' assert helper.changes == [(0, 'postgresql')] - def test_update_relation_no_local_app_change_event(self): + def test_update_relation_no_local_app_change_event(self, request: pytest.FixtureRequest): # language=YAML harness = ops.testing.Harness(ops.CharmBase, meta=''' name: my-charm @@ -910,7 +913,7 @@ def test_update_relation_no_local_app_change_event(self): db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(False) helper = DBRelationChangedHelper(harness.charm, "helper") @@ -929,14 +932,14 @@ def test_update_relation_no_local_app_change_event(self): # But there were no changed events registered by our unit. assert helper.changes == [] - def test_update_relation_remove_data(self): + def test_update_relation_remove_data(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: my-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') rel_id = harness.add_relation('db', 'postgresql') @@ -945,14 +948,14 @@ def test_update_relation_remove_data(self): harness.update_relation_data(rel_id, 'postgresql/0', {'initial': ''}) assert viewer.changes == [{'initial': 'data'}, {}] - def test_no_event_on_empty_update_relation_unit_app(self): + def test_no_event_on_empty_update_relation_unit_app(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: my-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') rel_id = harness.add_relation('db', 'postgresql') @@ -961,14 +964,14 @@ def test_no_event_on_empty_update_relation_unit_app(self): harness.update_relation_data(rel_id, 'postgresql', {}) assert viewer.changes == [{'initial': 'data'}] - def test_no_event_on_no_diff_update_relation_unit_app(self): + def test_no_event_on_no_diff_update_relation_unit_app(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: my-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') rel_id = harness.add_relation('db', 'postgresql') @@ -977,14 +980,14 @@ def test_no_event_on_no_diff_update_relation_unit_app(self): harness.update_relation_data(rel_id, 'postgresql', {'initial': 'data'}) assert viewer.changes == [{'initial': 'data'}] - def test_no_event_on_empty_update_relation_unit_bag(self): + def test_no_event_on_empty_update_relation_unit_bag(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: my-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') rel_id = harness.add_relation('db', 'postgresql') @@ -993,14 +996,14 @@ def test_no_event_on_empty_update_relation_unit_bag(self): harness.update_relation_data(rel_id, 'postgresql/0', {}) assert viewer.changes == [{'initial': 'data'}] - def test_no_event_on_no_diff_update_relation_unit_bag(self): + def test_no_event_on_no_diff_update_relation_unit_bag(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: my-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') rel_id = harness.add_relation('db', 'postgresql') @@ -1013,7 +1016,7 @@ def test_empty_config_raises(self): with pytest.raises(TypeError): ops.testing.Harness(RecordingCharm, config='') - def test_update_config(self): + def test_update_config(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, config=''' options: a: @@ -1023,7 +1026,7 @@ def test_update_config(self): description: another config option type: int ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.update_config(key_values={'a': 'foo', 'b': 2}) assert harness.charm.changes == \ @@ -1040,14 +1043,14 @@ def test_update_config(self): {'name': 'config-changed', 'data': {'a': ''}}, ] - def test_update_config_undefined_option(self): + def test_update_config_undefined_option(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() with pytest.raises(ValueError): harness.update_config(key_values={'nonexistent': 'foo'}) - def test_update_config_bad_type(self): + def test_update_config_bad_type(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, config=''' options: a: @@ -1055,7 +1058,7 @@ def test_update_config_bad_type(self): type: boolean default: false ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() with pytest.raises(RuntimeError): # cannot cast to bool @@ -1082,7 +1085,7 @@ def test_bad_config_option_type(self): default: False ''') - def test_config_secret_option(self): + def test_config_secret_option(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, config=''' options: a: @@ -1090,7 +1093,7 @@ def test_config_secret_option(self): type: secret default: "" ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() secret_id = harness.add_user_secret({'key': 'value'}) harness.update_config(key_values={'a': secret_id}) @@ -1115,7 +1118,7 @@ def test_uncastable_config_option_type(self): default: peek-a-bool! ''') - def test_update_config_unset_boolean(self): + def test_update_config_unset_boolean(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, config=''' options: a: @@ -1123,7 +1126,7 @@ def test_update_config_unset_boolean(self): type: boolean default: False ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() # Check the default was set correctly assert harness.charm.config == {'a': False} @@ -1136,9 +1139,9 @@ def test_update_config_unset_boolean(self): [{'name': 'config-changed', 'data': {'a': True}}, {'name': 'config-changed', 'data': {'a': False}}] - def test_set_leader(self): + def test_set_leader(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # No event happens here harness.set_leader(False) harness.begin() @@ -1157,14 +1160,14 @@ def test_set_leader(self): # No hook event if you have disabled them assert harness.charm.get_changes(reset=True) == [] - def test_relation_set_app_not_leader(self): + def test_relation_set_app_not_leader(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, meta=''' name: test-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(False) rel_id = harness.add_relation('db', 'postgresql') @@ -1180,7 +1183,7 @@ def test_relation_set_app_not_leader(self): rel.data[harness.charm.app]['foo'] = 'bar' assert harness.get_relation_data(rel_id, 'test-charm') == {'foo': 'bar'} - def test_hooks_enabled_and_disabled(self): + def test_hooks_enabled_and_disabled(self, request: pytest.FixtureRequest): harness = ops.testing.Harness( RecordingCharm, meta=''' @@ -1193,7 +1196,7 @@ def test_hooks_enabled_and_disabled(self): third: type: string ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # Before begin() there are no events. harness.update_config({'value': 'first'}) # By default, after begin the charm is set up to receive events. @@ -1210,7 +1213,7 @@ def test_hooks_enabled_and_disabled(self): assert harness.charm.get_changes(reset=True) == \ [{'name': 'config-changed', 'data': {'value': 'fourth', 'third': '3'}}] - def test_hooks_disabled_contextmanager(self): + def test_hooks_disabled_contextmanager(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, meta=''' name: test-charm ''', config=''' @@ -1220,7 +1223,7 @@ def test_hooks_disabled_contextmanager(self): third: type: string ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # Before begin() there are no events. harness.update_config({'value': 'first'}) # By default, after begin the charm is set up to receive events. @@ -1236,7 +1239,7 @@ def test_hooks_disabled_contextmanager(self): assert harness.charm.get_changes(reset=True) == \ [{'name': 'config-changed', 'data': {'value': 'fourth', 'third': '3'}}] - def test_hooks_disabled_nested_contextmanager(self): + def test_hooks_disabled_nested_contextmanager(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, meta=''' name: test-charm ''', config=''' @@ -1246,7 +1249,7 @@ def test_hooks_disabled_nested_contextmanager(self): sixth: type: string ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() # Context manager can be nested, so a test using it can invoke a helper using it. with harness.hooks_disabled(): @@ -1255,7 +1258,7 @@ def test_hooks_disabled_nested_contextmanager(self): harness.update_config({'sixth': '6'}) assert harness.charm.get_changes(reset=True) == [] - def test_hooks_disabled_noop(self): + def test_hooks_disabled_noop(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, meta=''' name: test-charm ''', config=''' @@ -1265,7 +1268,7 @@ def test_hooks_disabled_noop(self): eighth: type: string ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() # If hooks are already disabled, it is a no op, and on exit hooks remain disabled. harness.disable_hooks() @@ -1274,9 +1277,9 @@ def test_hooks_disabled_noop(self): harness.update_config({'eighth': '8'}) assert harness.charm.get_changes(reset=True) == [] - def test_metadata_from_directory(self): + def test_metadata_from_directory(self, request: pytest.FixtureRequest): tmp = pathlib.Path(tempfile.mkdtemp()) - self.addCleanup(shutil.rmtree, str(tmp)) + request.addfinalizer(lambda: shutil.rmtree(tmp)) metadata_filename = tmp / 'metadata.yaml' with metadata_filename.open('wt') as metadata: metadata.write(textwrap.dedent(''' @@ -1285,15 +1288,15 @@ def test_metadata_from_directory(self): db: interface: pgsql ''')) - harness = self._get_dummy_charm_harness(tmp) + harness = self._get_dummy_charm_harness(request, tmp) harness.begin() assert list(harness.model.relations) == ['db'] # The charm_dir also gets set assert harness.framework.charm_dir == tmp - def test_metadata_from_directory_charmcraft_yaml(self): + def test_metadata_from_directory_charmcraft_yaml(self, request: pytest.FixtureRequest): tmp = pathlib.Path(tempfile.mkdtemp()) - self.addCleanup(shutil.rmtree, tmp) + request.addfinalizer(lambda: shutil.rmtree(tmp)) charmcraft_filename = tmp / 'charmcraft.yaml' charmcraft_filename.write_text(textwrap.dedent(''' type: charm @@ -1310,15 +1313,15 @@ def test_metadata_from_directory_charmcraft_yaml(self): db: interface: pgsql ''')) - harness = self._get_dummy_charm_harness(tmp) + harness = self._get_dummy_charm_harness(request, tmp) harness.begin() assert list(harness.model.relations) == ['db'] # The charm_dir also gets set assert harness.framework.charm_dir == tmp - def test_config_from_directory(self): + def test_config_from_directory(self, request: pytest.FixtureRequest): tmp = pathlib.Path(tempfile.mkdtemp()) - self.addCleanup(shutil.rmtree, str(tmp)) + request.addfinalizer(lambda: shutil.rmtree(tmp)) config_filename = tmp / 'config.yaml' with config_filename.open('wt') as config: config.write(textwrap.dedent(''' @@ -1344,7 +1347,7 @@ def test_config_from_directory(self): opt_no_default: type: string ''')) - harness = self._get_dummy_charm_harness(tmp) + harness = self._get_dummy_charm_harness(request, tmp) assert harness.model.config['opt_str'] == 'val' assert harness.model.config['opt_str_empty'] == '' assert harness.model.config['opt_bool'] is True @@ -1356,9 +1359,9 @@ def test_config_from_directory(self): assert harness._backend._config._defaults['opt_null'] is None assert harness._backend._config._defaults['opt_no_default'] is None - def test_config_from_directory_charmcraft_yaml(self): + def test_config_from_directory_charmcraft_yaml(self, request: pytest.FixtureRequest): tmp = pathlib.Path(tempfile.mkdtemp()) - self.addCleanup(shutil.rmtree, tmp) + request.addfinalizer(lambda: shutil.rmtree(tmp)) charmcraft_filename = tmp / 'charmcraft.yaml' charmcraft_filename.write_text(textwrap.dedent(''' type: charm @@ -1379,12 +1382,12 @@ def test_config_from_directory_charmcraft_yaml(self): type: int default: 1 ''')) - harness = self._get_dummy_charm_harness(tmp) + harness = self._get_dummy_charm_harness(request, tmp) assert harness.model.config['opt_str'] == 'val' assert harness.model.config['opt_int'] == 1 assert isinstance(harness.model.config['opt_int'], int) - def test_config_in_repl(self): + def test_config_in_repl(self, request: pytest.FixtureRequest): # In a REPL, there is no "source file", but we should still be able to # provide explicit metadata, and fall back to the default otherwise. with patch.object(inspect, 'getfile', side_effect=OSError()): @@ -1396,39 +1399,39 @@ def test_config_in_repl(self): type: int default: 42 ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() - self.assertEqual(harness._meta.name, "repl-charm") - self.assertEqual(harness.charm.model.config['foo'], 42) + assert harness._meta.name == "repl-charm" + assert harness.charm.model.config['foo'] == 42 harness = ops.testing.Harness(ops.CharmBase) - self.addCleanup(harness.cleanup) - self.assertEqual(harness._meta.name, "test-charm") + request.addfinalizer(harness.cleanup) + assert harness._meta.name == "test-charm" - def test_set_model_name(self): + def test_set_model_name(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_model_name('foo') assert harness.model.name == 'foo' - def test_set_model_name_after_begin(self): + def test_set_model_name_after_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_model_name('bar') harness.begin() with pytest.raises(RuntimeError): harness.set_model_name('foo') assert harness.model.name == 'bar' - def test_set_model_uuid_after_begin(self): + def test_set_model_uuid_after_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_model_name('bar') harness.set_model_uuid('96957e90-e006-11eb-ba80-0242ac130004') harness.begin() @@ -1436,11 +1439,11 @@ def test_set_model_uuid_after_begin(self): harness.set_model_uuid('af0479ea-e006-11eb-ba80-0242ac130004') assert harness.model.uuid == '96957e90-e006-11eb-ba80-0242ac130004' - def test_set_model_info_after_begin(self): + def test_set_model_info_after_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_model_info('foo', '96957e90-e006-11eb-ba80-0242ac130004') harness.begin() with pytest.raises(RuntimeError): @@ -1456,7 +1459,7 @@ def test_set_model_info_after_begin(self): assert harness.model.name == 'foo' assert harness.model.uuid == '96957e90-e006-11eb-ba80-0242ac130004' - def test_add_storage_before_harness_begin(self): + def test_add_storage_before_harness_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1468,7 +1471,7 @@ def test_add_storage_before_harness_begin(self): multiple: range: 1-3 ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) stor_ids = harness.add_storage("test", count=3) for s in stor_ids: @@ -1478,7 +1481,7 @@ def test_add_storage_before_harness_begin(self): with pytest.raises(ops.ModelError): harness._backend.storage_get("test/0", "location")[-6:] - def test_add_storage_then_harness_begin(self): + def test_add_storage_then_harness_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1490,7 +1493,7 @@ def test_add_storage_then_harness_begin(self): multiple: range: 1-3 ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.add_storage("test", count=3) @@ -1505,7 +1508,7 @@ def test_add_storage_then_harness_begin(self): want = str(pathlib.PurePath('test', '0')) assert want == harness._backend.storage_get("test/0", "location")[-6:] - def test_add_storage_not_attached_default(self): + def test_add_storage_not_attached_default(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app requires: @@ -1515,28 +1518,28 @@ def test_add_storage_not_attached_default(self): test: type: filesystem ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.add_storage('test') harness.begin() assert len(harness.model.storages['test']) == 0, \ 'storage should start in detached state and be excluded from storage listing' - def test_add_storage_without_metadata_key_fails(self): + def test_add_storage_without_metadata_key_fails(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) with pytest.raises(RuntimeError) as excinfo: harness.add_storage("test") assert excinfo.value.args[0] == \ "the key 'test' is not specified as a storage key in metadata" - def test_add_storage_after_harness_begin(self): + def test_add_storage_after_harness_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1548,7 +1551,7 @@ def test_add_storage_after_harness_begin(self): multiple: range: 1-3 ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # Set up initial storage harness.add_storage("test")[0] @@ -1573,7 +1576,7 @@ def test_add_storage_after_harness_begin(self): for i in range(1, 4): assert isinstance(harness.charm.observed_events[i], ops.StorageAttachedEvent) - def test_detach_storage(self): + def test_detach_storage(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1583,7 +1586,7 @@ def test_detach_storage(self): test: type: filesystem ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # Set up initial storage stor_id = harness.add_storage("test")[0] @@ -1612,7 +1615,7 @@ def test_detach_storage(self): assert len(harness.charm.observed_events) == 2 assert isinstance(harness.charm.observed_events[1], ops.StorageDetachingEvent) - def test_detach_storage_before_harness_begin(self): + def test_detach_storage_before_harness_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1622,7 +1625,7 @@ def test_detach_storage_before_harness_begin(self): test: type: filesystem ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) stor_id = harness.add_storage("test")[0] with pytest.raises(RuntimeError) as excinfo: @@ -1630,7 +1633,7 @@ def test_detach_storage_before_harness_begin(self): assert excinfo.value.args[0] == \ "cannot detach storage before Harness is initialised" - def test_storage_with_hyphens_works(self): + def test_storage_with_hyphens_works(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1642,7 +1645,7 @@ def test_storage_with_hyphens_works(self): test-with-hyphens: type: filesystem ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # Set up initial storage harness.begin() @@ -1651,7 +1654,7 @@ def test_storage_with_hyphens_works(self): assert len(helper.changes) == 1 - def test_attach_storage(self): + def test_attach_storage(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1661,7 +1664,7 @@ def test_attach_storage(self): test: type: filesystem ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # Set up initial storage stor_id = harness.add_storage("test")[0] @@ -1691,7 +1694,7 @@ def test_attach_storage(self): assert len(harness.charm.observed_events) == 3 assert isinstance(harness.charm.observed_events[2], ops.StorageAttachedEvent) - def test_attach_storage_before_harness_begin(self): + def test_attach_storage_before_harness_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1701,14 +1704,14 @@ def test_attach_storage_before_harness_begin(self): test: type: filesystem ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # We deliberately don't guard against attaching storage before the harness begins, # as there are legitimate reasons to do so. stor_id = harness.add_storage("test")[0] assert stor_id - def test_remove_storage_before_harness_begin(self): + def test_remove_storage_before_harness_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1720,7 +1723,7 @@ def test_remove_storage_before_harness_begin(self): multiple: range: 1-3 ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) stor_ids = harness.add_storage("test", count=2) harness.remove_storage(stor_ids[0]) @@ -1736,14 +1739,14 @@ def test_remove_storage_before_harness_begin(self): assert len(harness.charm.observed_events) == 1 assert isinstance(harness.charm.observed_events[0], ops.StorageAttachedEvent) - def test_remove_storage_without_metadata_key_fails(self): + def test_remove_storage_without_metadata_key_fails(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # Doesn't really make sense since we already can't add storage which isn't in the metadata, # but included for completeness. @@ -1752,7 +1755,7 @@ def test_remove_storage_without_metadata_key_fails(self): assert excinfo.value.args[0] == \ "the key 'test' is not specified as a storage key in metadata" - def test_remove_storage_after_harness_begin(self): + def test_remove_storage_after_harness_begin(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1764,7 +1767,7 @@ def test_remove_storage_after_harness_begin(self): multiple: range: 1-3 ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) stor_ids = harness.add_storage("test", count=2) harness.begin_with_initial_hooks() @@ -1783,7 +1786,7 @@ def test_remove_storage_after_harness_begin(self): def _extract_storage_index(self, stor_id: str): return int(stor_id.split('/')[-1]) - def test_remove_detached_storage(self): + def test_remove_detached_storage(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(StorageTester, meta=''' name: test-app requires: @@ -1795,7 +1798,7 @@ def test_remove_detached_storage(self): multiple: range: 1-3 ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) stor_ids = harness.add_storage("test", count=2) harness.begin_with_initial_hooks() @@ -1806,24 +1809,24 @@ def test_remove_detached_storage(self): assert isinstance(harness.charm.observed_events[1], ops.StorageAttachedEvent) assert isinstance(harness.charm.observed_events[2], ops.StorageDetachingEvent) - def test_actions_from_directory(self): + def test_actions_from_directory(self, request: pytest.FixtureRequest): tmp = pathlib.Path(tempfile.mkdtemp()) - self.addCleanup(shutil.rmtree, str(tmp)) + request.addfinalizer(lambda: shutil.rmtree(tmp)) actions_filename = tmp / 'actions.yaml' with actions_filename.open('wt') as actions: actions.write(textwrap.dedent(''' test: description: a dummy action ''')) - harness = self._get_dummy_charm_harness(tmp) + harness = self._get_dummy_charm_harness(request, tmp) harness.begin() assert list(harness.framework.meta.actions) == ['test'] # The charm_dir also gets set assert harness.framework.charm_dir == tmp - def test_actions_from_directory_charmcraft_yaml(self): + def test_actions_from_directory_charmcraft_yaml(self, request: pytest.FixtureRequest): tmp = pathlib.Path(tempfile.mkdtemp()) - self.addCleanup(shutil.rmtree, tmp) + request.addfinalizer(lambda: shutil.rmtree(tmp)) charmcraft_filename = tmp / 'charmcraft.yaml' charmcraft_filename.write_text(textwrap.dedent(''' type: charm @@ -1839,20 +1842,20 @@ def test_actions_from_directory_charmcraft_yaml(self): test: description: a dummy action ''')) - harness = self._get_dummy_charm_harness(tmp) + harness = self._get_dummy_charm_harness(request, tmp) harness.begin() assert list(harness.framework.meta.actions) == ['test'] # The charm_dir also gets set assert harness.framework.charm_dir == tmp - def _get_dummy_charm_harness(self, tmp: pathlib.Path): - self._write_dummy_charm(tmp) + def _get_dummy_charm_harness(self, request: pytest.FixtureRequest, tmp: pathlib.Path): + self._write_dummy_charm(request, tmp) charm_mod = importlib.import_module('testcharm') harness = ops.testing.Harness(charm_mod.MyTestingCharm) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) return harness - def _write_dummy_charm(self, tmp: pathlib.Path): + def _write_dummy_charm(self, request: pytest.FixtureRequest, tmp: pathlib.Path): srcdir = tmp / 'src' srcdir.mkdir(0o755) charm_filename = srcdir / 'testcharm.py' @@ -1870,9 +1873,9 @@ def cleanup(): sys.path = orig sys.modules.pop('testcharm') - self.addCleanup(cleanup) + request.addfinalizer(cleanup) - def test_actions_passed_in(self): + def test_actions_passed_in(self, request: pytest.FixtureRequest): harness = ops.testing.Harness( ops.CharmBase, meta=''' @@ -1882,7 +1885,7 @@ def test_actions_passed_in(self): test-action: description: a dummy test action ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) assert list(harness.framework.meta.actions) == ['test-action'] def test_event_context(self): @@ -1949,14 +1952,14 @@ def mock_join_db(event: ops.EventBase): assert not harness._backend._hook_is_running assert rel.data[harness.charm.app]['foo'] == 'bar' - def test_relation_set_deletes(self): + def test_relation_set_deletes(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(False) rel_id = harness.add_relation('db', 'postgresql') @@ -1967,14 +1970,14 @@ def test_relation_set_deletes(self): del rel.data[harness.charm.model.unit]['foo'] assert harness.get_relation_data(rel_id, 'test-charm/0') == {} - def test_relation_set_nonstring(self): + def test_relation_set_nonstring(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(False) rel_id = harness.add_relation('db', 'postgresql') @@ -1983,24 +1986,24 @@ def test_relation_set_nonstring(self): harness.update_relation_data(rel_id, 'test-charm/0', {'foo': invalid_value}) # type: ignore - def test_set_workload_version(self): + def test_set_workload_version(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: app ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() assert harness.get_workload_version() is None harness.charm.model.unit.set_workload_version('1.2.3') assert harness.get_workload_version() == '1.2.3' - def test_get_backend_calls(self): + def test_get_backend_calls(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() # No calls to the backend yet assert harness._get_backend_calls() == [] @@ -2043,14 +2046,14 @@ def test_get_backend_calls(self): # And the calls are gone assert harness._get_backend_calls() == [] - def test_get_backend_calls_with_kwargs(self): + def test_get_backend_calls_with_kwargs(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm requires: db: interface: pgsql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() unit = harness.charm.model.unit # Reset the list, because we don't care what it took to get here @@ -2064,9 +2067,9 @@ def test_get_backend_calls_with_kwargs(self): assert harness._get_backend_calls() == [ ('is_leader',), ('status_set', 'active', 'message', {'is_app': True})] - def test_unit_status(self): + def test_unit_status(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: test-app') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_leader(True) harness.begin() # default status @@ -2075,9 +2078,9 @@ def test_unit_status(self): harness.model.unit.status = status assert harness.model.unit.status == status - def test_app_status(self): + def test_app_status(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: test-app') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_leader(True) harness.begin() # default status @@ -2086,7 +2089,7 @@ def test_app_status(self): harness.model.app.status = status assert harness.model.app.status == status - def test_populate_oci_resources(self): + def test_populate_oci_resources(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -2097,7 +2100,7 @@ def test_populate_oci_resources(self): type: oci-image description: "Another image." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.populate_oci_resources() path = harness.model.resources.fetch('image') assert path.name == 'contents.yaml' @@ -2111,7 +2114,7 @@ def test_populate_oci_resources(self): assert path.name == 'contents.yaml' assert path.parent.name == 'image2' - def test_resource_folder_cleanup(self): + def test_resource_folder_cleanup(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -2119,7 +2122,7 @@ def test_resource_folder_cleanup(self): type: oci-image description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.populate_oci_resources() path = harness.model.resources.fetch('image') assert path.exists() @@ -2128,14 +2131,14 @@ def test_resource_folder_cleanup(self): assert not path.parent.exists() assert not path.parent.parent.exists() - def test_container_isdir_and_exists(self): + def test_container_isdir_and_exists(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app containers: foo: resource: foo-image ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_can_connect('foo', True) c = harness.model.unit.containers['foo'] @@ -2156,7 +2159,7 @@ def test_container_isdir_and_exists(self): assert not c.isdir(file_path) assert c.exists(file_path) - def test_add_oci_resource_custom(self): + def test_add_oci_resource_custom(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -2164,7 +2167,7 @@ def test_add_oci_resource_custom(self): type: oci-image description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) custom = { "registrypath": "custompath", "username": "custom_username", @@ -2178,7 +2181,7 @@ def test_add_oci_resource_custom(self): assert contents['username'] == 'custom_username' assert contents['password'] == 'custom_password' - def test_add_oci_resource_no_image(self): + def test_add_oci_resource_no_image(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -2186,14 +2189,14 @@ def test_add_oci_resource_no_image(self): type: file description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) with pytest.raises(RuntimeError): harness.add_oci_resource("image") with pytest.raises(RuntimeError): harness.add_oci_resource("missing-resource") assert len(harness._backend._resources_map) == 0 - def test_add_resource_unknown(self): + def test_add_resource_unknown(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -2201,11 +2204,11 @@ def test_add_resource_unknown(self): type: file description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) with pytest.raises(RuntimeError): harness.add_resource('unknown', 'content') - def test_add_resource_but_oci(self): + def test_add_resource_but_oci(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -2213,11 +2216,11 @@ def test_add_resource_but_oci(self): type: oci-image description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) with pytest.raises(RuntimeError): harness.add_resource('image', 'content') - def test_add_resource_string(self): + def test_add_resource_string(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -2226,7 +2229,7 @@ def test_add_resource_string(self): filename: foo.txt description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.add_resource('image', 'foo contents\n') path = harness.model.resources.fetch('image') assert path.name == 'foo.txt' @@ -2234,7 +2237,7 @@ def test_add_resource_string(self): with path.open('rt') as f: assert f.read() == 'foo contents\n' - def test_add_resource_bytes(self): + def test_add_resource_bytes(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -2243,7 +2246,7 @@ def test_add_resource_bytes(self): filename: foo.zip description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) raw_contents = b'\xff\xff\x00blah\n' harness.add_resource('image', raw_contents) path = harness.model.resources.fetch('image') @@ -2252,7 +2255,7 @@ def test_add_resource_bytes(self): with path.open('rb') as f: assert raw_contents == f.read() - def test_add_resource_unknown_filename(self): + def test_add_resource_unknown_filename(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -2260,24 +2263,24 @@ def test_add_resource_unknown_filename(self): type: file description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.add_resource('image', 'foo contents\n') path = harness.model.resources.fetch('image') assert path.name == 'image' assert path.parent.name == 'image' - def test_get_pod_spec(self): + def test_get_pod_spec(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_leader(True) container_spec = {'container': 'spec'} k8s_resources = {'k8s': 'spec'} harness.model.pod.set_spec(container_spec, k8s_resources) assert harness.get_pod_spec() == (container_spec, k8s_resources) - def test_begin_with_initial_hooks_no_relations(self): + def test_begin_with_initial_hooks_no_relations(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, meta=''' name: test-app ''', config=''' @@ -2286,7 +2289,7 @@ def test_begin_with_initial_hooks_no_relations(self): description: a config option type: string ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.update_config({'foo': 'bar'}) harness.set_leader(True) with pytest.raises(RuntimeError): @@ -2301,7 +2304,10 @@ def test_begin_with_initial_hooks_no_relations(self): {'name': 'start'}, ] - def test_begin_with_initial_hooks_no_relations_not_leader(self): + def test_begin_with_initial_hooks_no_relations_not_leader( + self, + request: pytest.FixtureRequest, + ): harness = ops.testing.Harness(RecordingCharm, meta=''' name: test-app ''', config=''' @@ -2310,7 +2316,7 @@ def test_begin_with_initial_hooks_no_relations_not_leader(self): description: a config option type: string ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.update_config({'foo': 'bar'}) with pytest.raises(RuntimeError): _ = harness.charm @@ -2324,7 +2330,7 @@ def test_begin_with_initial_hooks_no_relations_not_leader(self): {'name': 'start'}, ] - def test_begin_with_initial_hooks_with_peer_relation(self): + def test_begin_with_initial_hooks_with_peer_relation(self, request: pytest.FixtureRequest): class PeerCharm(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -2340,7 +2346,7 @@ def __init__(self, framework: ops.Framework): description: a config option type: string ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.update_config({'foo': 'bar'}) with pytest.raises(RuntimeError): _ = harness.charm @@ -2365,7 +2371,10 @@ def __init__(self, framework: ops.Framework): ] # With a single unit, no peer-relation-joined is fired - def test_begin_with_initial_hooks_peer_relation_pre_defined(self): + def test_begin_with_initial_hooks_peer_relation_pre_defined( + self, + request: pytest.FixtureRequest, + ): class PeerCharm(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -2376,7 +2385,7 @@ def __init__(self, framework: ops.Framework): peer: interface: app-peer ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) peer_rel_id = harness.add_relation('peer', 'test-app') harness.begin_with_initial_hooks() # If the peer relation is already defined by the user, we don't create the relation a @@ -2396,7 +2405,10 @@ def __init__(self, framework: ops.Framework): {'name': 'start'}, ] - def test_begin_with_initial_hooks_relation_charm_with_no_relation(self): + def test_begin_with_initial_hooks_relation_charm_with_no_relation( + self, + request: pytest.FixtureRequest, + ): class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -2407,7 +2419,7 @@ def __init__(self, framework: ops.Framework): db: interface: sql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_leader() harness.begin_with_initial_hooks() assert harness.charm.changes == \ @@ -2418,7 +2430,7 @@ def __init__(self, framework: ops.Framework): {'name': 'start'}, ] - def test_begin_with_initial_hooks_with_one_relation(self): + def test_begin_with_initial_hooks_with_one_relation(self, request: pytest.FixtureRequest): class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -2429,7 +2441,7 @@ def __init__(self, framework: ops.Framework): db: interface: sql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_leader() rel_id = harness.add_relation('db', 'postgresql') harness.add_relation_unit(rel_id, 'postgresql/0') @@ -2464,7 +2476,7 @@ def __init__(self, framework: ops.Framework): }}, ] - def test_begin_with_initial_hooks_with_application_data(self): + def test_begin_with_initial_hooks_with_application_data(self, request: pytest.FixtureRequest): class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -2475,7 +2487,7 @@ def __init__(self, framework: ops.Framework): db: interface: sql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_leader() rel_id = harness.add_relation('db', 'postgresql') harness.add_relation_unit(rel_id, 'postgresql/0') @@ -2518,7 +2530,7 @@ def __init__(self, framework: ops.Framework): }}, ] - def test_begin_with_initial_hooks_with_multiple_units(self): + def test_begin_with_initial_hooks_with_multiple_units(self, request: pytest.FixtureRequest): class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -2529,7 +2541,7 @@ def __init__(self, framework: ops.Framework): db: interface: sql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_leader() rel_id = harness.add_relation('db', 'postgresql') harness.add_relation_unit(rel_id, 'postgresql/1') @@ -2580,7 +2592,10 @@ def __init__(self, framework: ops.Framework): }}, ] - def test_begin_with_initial_hooks_multiple_relation_same_endpoint(self): + def test_begin_with_initial_hooks_multiple_relation_same_endpoint( + self, + request: pytest.FixtureRequest, + ): class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -2591,7 +2606,7 @@ def __init__(self, framework: ops.Framework): db: interface: sql ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.set_leader() rel_id_a = harness.add_relation('db', 'pg-a') harness.add_relation_unit(rel_id_a, 'pg-a/0') @@ -2669,7 +2684,7 @@ def __init__(self, framework: ops.Framework): b_first = [a_first[2], a_first[3], a_first[0], a_first[1]] assert changes == b_first - def test_begin_with_initial_hooks_unknown_status(self): + def test_begin_with_initial_hooks_unknown_status(self, request: pytest.FixtureRequest): # Verify that a charm that does not set a status in the install hook will have an # unknown status in the harness. harness = ops.testing.Harness(RecordingCharm, meta=''' @@ -2680,7 +2695,7 @@ def test_begin_with_initial_hooks_unknown_status(self): description: a config option type: string ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend harness.begin_with_initial_hooks() @@ -2690,7 +2705,7 @@ def test_begin_with_initial_hooks_unknown_status(self): assert backend.status_get(is_app=True) == \ {'status': 'unknown', 'message': ''} - def test_begin_with_initial_hooks_install_sets_status(self): + def test_begin_with_initial_hooks_install_sets_status(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm, meta=''' name: test-app ''', config=''' @@ -2700,7 +2715,7 @@ def test_begin_with_initial_hooks_install_sets_status(self): type: boolean ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend harness.update_config(key_values={"set_status": True}) harness.begin_with_initial_hooks() @@ -2708,14 +2723,14 @@ def test_begin_with_initial_hooks_install_sets_status(self): assert backend.status_get(is_app=False) == \ {'status': 'maintenance', 'message': 'Status set on install'} - def test_get_pebble_container_plan(self): + def test_get_pebble_container_plan(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app containers: foo: resource: foo-image ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_can_connect('foo', True) initial_plan = harness.get_container_pebble_plan('foo') @@ -2782,14 +2797,14 @@ def test_add_layer_with_log_targets_to_plan(self): assert plan.checks.get('bar') is not None assert plan.log_targets.get('baz') is not None - def test_get_pebble_container_plan_unknown(self): + def test_get_pebble_container_plan_unknown(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app containers: foo: resource: foo-image ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_can_connect('foo', True) with pytest.raises(KeyError): @@ -2797,14 +2812,14 @@ def test_get_pebble_container_plan_unknown(self): plan = harness.get_container_pebble_plan('foo') assert plan.to_yaml() == "{}\n" - def test_container_pebble_ready(self): + def test_container_pebble_ready(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ContainerEventCharm, meta=''' name: test-app containers: foo: resource: foo-image ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) # This is a no-op if it is called before begin(), but it isn't an error harness.container_pebble_ready('foo') harness.begin() @@ -2878,9 +2893,10 @@ def test_invalid_status_set(self): harness.model.unit.status = ops.ActiveStatus() -class TestNetwork(unittest.TestCase): - def setUp(self): - self.harness = ops.testing.Harness(ops.CharmBase, meta=''' +class TestNetwork: + @pytest.fixture + def harness(self): + harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm requires: db: @@ -2888,12 +2904,13 @@ def setUp(self): foo: interface: xyz ''') - self.addCleanup(self.harness.cleanup) + yield harness + harness.cleanup() - def test_add_network_defaults(self): - self.harness.add_network('10.0.0.10') + def test_add_network_defaults(self, harness: ops.testing.Harness[ops.CharmBase]): + harness.add_network('10.0.0.10') - binding = self.harness.model.get_binding('db') + binding = harness.model.get_binding('db') assert binding is not None assert binding.name == 'db' network = binding.network @@ -2907,19 +2924,21 @@ def test_add_network_defaults(self): assert interface.address == ipaddress.IPv4Address('10.0.0.10') assert interface.subnet == ipaddress.IPv4Network('10.0.0.0/24') - def test_add_network_all_args(self): - relation_id = self.harness.add_relation('db', 'postgresql') - self.harness.add_network('10.0.0.10', - endpoint='db', - relation_id=relation_id, - cidr='10.0.0.0/8', - interface='eth1', - ingress_addresses=['10.0.0.1', '10.0.0.2'], - egress_subnets=['10.0.0.0/8', '10.10.0.0/16']) - - relation = self.harness.model.get_relation('db', relation_id) + def test_add_network_all_args(self, harness: ops.testing.Harness[ops.CharmBase]): + relation_id = harness.add_relation('db', 'postgresql') + harness.add_network( + '10.0.0.10', + endpoint='db', + relation_id=relation_id, + cidr='10.0.0.0/8', + interface='eth1', + ingress_addresses=['10.0.0.1', '10.0.0.2'], + egress_subnets=['10.0.0.0/8', '10.10.0.0/16'], + ) + + relation = harness.model.get_relation('db', relation_id) assert relation is not None - binding = self.harness.model.get_binding(relation) + binding = harness.model.get_binding(relation) assert binding is not None assert binding.name == 'db' network = binding.network @@ -2936,67 +2955,67 @@ def test_add_network_all_args(self): assert interface.address == ipaddress.IPv4Address('10.0.0.10') assert interface.subnet == ipaddress.IPv4Network('10.0.0.0/8') - def test_add_network_specific_endpoint(self): - self.harness.add_network('10.0.0.1') - self.harness.add_network('10.0.2.1', endpoint='db') + def test_add_network_specific_endpoint(self, harness: ops.testing.Harness[ops.CharmBase]): + harness.add_network('10.0.0.1') + harness.add_network('10.0.2.1', endpoint='db') - binding = self.harness.model.get_binding('db') + binding = harness.model.get_binding('db') assert binding is not None assert binding.name == 'db' network = binding.network assert network.bind_address == ipaddress.IPv4Address('10.0.2.1') # Ensure binding for the other interface is still on the default value - foo_binding = self.harness.model.get_binding('foo') + foo_binding = harness.model.get_binding('foo') assert foo_binding is not None assert foo_binding.network.bind_address == \ ipaddress.IPv4Address('10.0.0.1') - def test_add_network_specific_relation(self): - self.harness.add_network('10.0.0.1') - self.harness.add_network('10.0.2.1', endpoint='db') - relation_id = self.harness.add_relation('db', 'postgresql') - self.harness.add_network('35.0.0.1', endpoint='db', relation_id=relation_id) + def test_add_network_specific_relation(self, harness: ops.testing.Harness[ops.CharmBase]): + harness.add_network('10.0.0.1') + harness.add_network('10.0.2.1', endpoint='db') + relation_id = harness.add_relation('db', 'postgresql') + harness.add_network('35.0.0.1', endpoint='db', relation_id=relation_id) - relation = self.harness.model.get_relation('db', relation_id) + relation = harness.model.get_relation('db', relation_id) assert relation is not None - binding = self.harness.model.get_binding(relation) + binding = harness.model.get_binding(relation) assert binding is not None assert binding.name == 'db' network = binding.network assert network.bind_address == ipaddress.IPv4Address('35.0.0.1') # Ensure binding for the other interface is still on the default value - foo_binding = self.harness.model.get_binding('foo') + foo_binding = harness.model.get_binding('foo') assert foo_binding is not None assert foo_binding.network.bind_address == \ ipaddress.IPv4Address('10.0.0.1') - def test_add_network_endpoint_fallback(self): - relation_id = self.harness.add_relation('db', 'postgresql') - self.harness.add_network('10.0.0.10', endpoint='db') + def test_add_network_endpoint_fallback(self, harness: ops.testing.Harness[ops.CharmBase]): + relation_id = harness.add_relation('db', 'postgresql') + harness.add_network('10.0.0.10', endpoint='db') - relation = self.harness.model.get_relation('db', relation_id) + relation = harness.model.get_relation('db', relation_id) assert relation is not None - binding = self.harness.model.get_binding(relation) + binding = harness.model.get_binding(relation) assert binding is not None assert binding.name == 'db' network = binding.network assert network.bind_address == ipaddress.IPv4Address('10.0.0.10') - def test_add_network_default_fallback(self): - self.harness.add_network('10.0.0.10') + def test_add_network_default_fallback(self, harness: ops.testing.Harness[ops.CharmBase]): + harness.add_network('10.0.0.10') - binding = self.harness.model.get_binding('db') + binding = harness.model.get_binding('db') assert binding is not None assert binding.name == 'db' network = binding.network assert network.bind_address == ipaddress.IPv4Address('10.0.0.10') - def test_add_network_ipv6(self): - self.harness.add_network('2001:0db8::a:0:0:1') + def test_add_network_ipv6(self, harness: ops.testing.Harness[ops.CharmBase]): + harness.add_network('2001:0db8::a:0:0:1') - binding = self.harness.model.get_binding('db') + binding = harness.model.get_binding('db') assert binding is not None assert binding.name == 'db' network = binding.network @@ -3010,36 +3029,38 @@ def test_add_network_ipv6(self): assert interface.address == ipaddress.IPv6Address('2001:0db8::a:0:0:1') assert interface.subnet == ipaddress.IPv6Network('2001:0db8::0:0:0:0/64') - def test_network_get_relation_not_found(self): + def test_network_get_relation_not_found(self, harness: ops.testing.Harness[ops.CharmBase]): with pytest.raises(ops.RelationNotFoundError): - binding = self.harness.model.get_binding('db') + binding = harness.model.get_binding('db') assert binding is not None binding.network - def test_add_relation_network_get(self): - self.harness.add_relation('db', 'remote') - binding = self.harness.model.get_binding('db') + def test_add_relation_network_get(self, harness: ops.testing.Harness[ops.CharmBase]): + harness.add_relation('db', 'remote') + binding = harness.model.get_binding('db') assert binding is not None assert binding.network - def test_add_network_endpoint_not_in_meta(self): + def test_add_network_endpoint_not_in_meta(self, harness: ops.testing.Harness[ops.CharmBase]): with pytest.raises(ops.ModelError): - self.harness.add_network('35.0.0.1', endpoint='xyz') + harness.add_network('35.0.0.1', endpoint='xyz') - def test_add_network_relation_id_set_endpoint_not_set(self): - relation_id = self.harness.add_relation('db', 'postgresql') + def test_add_network_relation_id_set_endpoint_not_set( + self, harness: ops.testing.Harness[ops.CharmBase]): + relation_id = harness.add_relation('db', 'postgresql') with pytest.raises(TypeError): - self.harness.add_network('35.0.0.1', relation_id=relation_id) + harness.add_network('35.0.0.1', relation_id=relation_id) - def test_add_network_relation_id_incorrect(self): - relation_id = self.harness.add_relation('db', 'postgresql') + def test_add_network_relation_id_incorrect(self, harness: ops.testing.Harness[ops.CharmBase]): + relation_id = harness.add_relation('db', 'postgresql') with pytest.raises(ops.ModelError): - self.harness.add_network('35.0.0.1', endpoint='db', relation_id=relation_id + 1) + harness.add_network('35.0.0.1', endpoint='db', relation_id=relation_id + 1) - def test_add_network_endpoint_and_relation_id_do_not_correspond(self): - relation_id = self.harness.add_relation('db', 'postgresql') + def test_add_network_endpoint_and_relation_id_do_not_correspond( + self, harness: ops.testing.Harness[ops.CharmBase]): + relation_id = harness.add_relation('db', 'postgresql') with pytest.raises(ops.ModelError): - self.harness.add_network('35.0.0.1', endpoint='foo', relation_id=relation_id) + harness.add_network('35.0.0.1', endpoint='foo', relation_id=relation_id) class DBRelationChangedHelper(ops.Object): @@ -3231,31 +3252,31 @@ def get_public_methods(obj: object): return public -class TestTestingModelBackend(unittest.TestCase): +class TestTestingModelBackend: - def test_conforms_to_model_backend(self): + def test_conforms_to_model_backend(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: app ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend mb_methods = get_public_methods(_ModelBackend) backend_methods = get_public_methods(backend) assert mb_methods == backend_methods - def test_model_uuid_is_uuid_v4(self): + def test_model_uuid_is_uuid_v4(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend assert uuid.UUID(backend.model_uuid).version == 4 - def test_status_set_get_unit(self): + def test_status_set_get_unit(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: app ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend backend.status_set('blocked', 'message', is_app=False) assert backend.status_get(is_app=False) == \ @@ -3263,11 +3284,11 @@ def test_status_set_get_unit(self): assert backend.status_get(is_app=True) == \ {'status': 'unknown', 'message': ''} - def test_status_set_get_app(self): + def test_status_set_get_app(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: app ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend backend.status_set('blocked', 'message', is_app=True) assert backend.status_get(is_app=True) == \ @@ -3275,14 +3296,14 @@ def test_status_set_get_app(self): assert backend.status_get(is_app=False) == \ {'status': 'maintenance', 'message': ''} - def test_relation_ids_unknown_relation(self): + def test_relation_ids_unknown_relation(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm provides: db: interface: mydb ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend # With no relations added, we just get an empty list for the interface assert backend.relation_ids('db') == [] @@ -3290,25 +3311,25 @@ def test_relation_ids_unknown_relation(self): with pytest.raises(ops.ModelError): backend.relation_ids('unknown') - def test_relation_get_unknown_relation_id(self): + def test_relation_get_unknown_relation_id(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend with pytest.raises(ops.RelationNotFoundError): backend.relation_get(1234, 'unit/0', False) - def test_relation_list_unknown_relation_id(self): + def test_relation_list_unknown_relation_id(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend with pytest.raises(ops.RelationNotFoundError): backend.relation_list(1234) - def test_lazy_resource_directory(self): + def test_lazy_resource_directory(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -3316,7 +3337,7 @@ def test_lazy_resource_directory(self): type: oci-image description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.populate_oci_resources() backend = harness._backend assert backend._resource_dir is None @@ -3325,7 +3346,7 @@ def test_lazy_resource_directory(self): assert str(path).startswith(str(backend._resource_dir.name)), \ f'expected {path} to be a subdirectory of {backend._resource_dir.name}' - def test_resource_get_no_resource(self): + def test_resource_get_no_resource(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app resources: @@ -3333,21 +3354,21 @@ def test_resource_get_no_resource(self): type: file description: "Image to deploy." ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend with pytest.raises(ops.ModelError) as excinfo: backend.resource_get('foo') assert "units/unit-test-app-0/resources/foo: resource#test-app/foo not found" in \ str(excinfo.value) - def test_relation_remote_app_name(self): + def test_relation_remote_app_name(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-charm requires: db: interface: foo ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend assert backend.relation_remote_app_name(1) is None @@ -3360,17 +3381,17 @@ def test_relation_remote_app_name(self): assert backend.relation_remote_app_name(7) is None - def test_get_pebble_methods(self): + def test_get_pebble_methods(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) backend = harness._backend client = backend.get_pebble('/custom/socket/path') assert isinstance(client, _TestingPebbleClient) - def test_reboot(self): + def test_reboot(self, request: pytest.FixtureRequest): class RebootingCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -3386,7 +3407,7 @@ def _reboot(self, event: ops.RemoveEvent): harness = ops.testing.Harness(RebootingCharm, meta=''' name: test-app ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) assert harness.reboot_count == 0 backend = harness._backend backend.reboot() @@ -3402,33 +3423,28 @@ def _reboot(self, event: ops.RemoveEvent): assert harness.reboot_count == 4 -class _TestingPebbleClientMixin: - def get_testing_client(self): +# For testing non file ops of the pebble testing client. +class TestTestingPebbleClient: + @pytest.fixture + def client(self): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app containers: mycontainer: {} ''') - self.addCleanup(harness.cleanup) # type: ignore backend = harness._backend - client = backend.get_pebble('/charm/containers/mycontainer/pebble.socket') harness.set_can_connect('mycontainer', True) - return client - - -# For testing non file ops of the pebble testing client. -class TestTestingPebbleClient(unittest.TestCase, _TestingPebbleClientMixin): + yield client + harness.cleanup() - def test_methods_match_pebble_client(self): - client = self.get_testing_client() + def test_methods_match_pebble_client(self, client: _TestingPebbleClient): assert client is not None pebble_client_methods = get_public_methods(pebble.Client) testing_client_methods = get_public_methods(client) assert pebble_client_methods == testing_client_methods - def test_add_layer(self): - client = self.get_testing_client() + def test_add_layer(self, client: _TestingPebbleClient): plan = client.get_plan() assert isinstance(plan, pebble.Plan) assert plan.to_yaml() == '{}\n' @@ -3463,8 +3479,7 @@ def test_add_layer(self): summary: Serv ''') == plan.to_yaml() - def test_add_layer_merge(self): - client = self.get_testing_client() + def test_add_layer_merge(self, client: _TestingPebbleClient): plan = client.get_plan() assert isinstance(plan, pebble.Plan) assert plan.to_yaml() == '{}\n' @@ -3502,7 +3517,6 @@ def test_add_layer_merge(self): ''')) plan = client.get_plan() # The YAML should be normalized - self.maxDiff = None assert textwrap.dedent('''\ services: serv: @@ -3610,8 +3624,7 @@ def test_add_layer_merge(self): user-id: userID2 ''') == plan.to_yaml() - def test_add_layer_not_combined(self): - client = self.get_testing_client() + def test_add_layer_not_combined(self, client: _TestingPebbleClient): plan = client.get_plan() assert isinstance(plan, pebble.Plan) assert plan.to_yaml() == '{}\n' @@ -3638,8 +3651,7 @@ def test_add_layer_not_combined(self): with pytest.raises(RuntimeError): client.add_layer('foo', pebble.Layer(service)) - def test_add_layer_three_services(self): - client = self.get_testing_client() + def test_add_layer_three_services(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3668,7 +3680,6 @@ def test_add_layer_three_services(self): command: '/bin/echo baz' ''') plan = client.get_plan() - self.maxDiff = 1000 # Alphabetical services, and the YAML should be normalized assert textwrap.dedent('''\ services: @@ -3689,8 +3700,7 @@ def test_add_layer_three_services(self): summary: Foo ''') == plan.to_yaml() - def test_add_layer_combine_no_override(self): - client = self.get_testing_client() + def test_add_layer_combine_no_override(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3710,8 +3720,7 @@ def test_add_layer_combine_no_override(self): command: '/bin/echo foo' ''', combine=True) - def test_add_layer_combine_override_replace(self): - client = self.get_testing_client() + def test_add_layer_combine_override_replace(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3739,8 +3748,7 @@ def test_add_layer_combine_override_replace(self): override: replace ''') == client.get_plan().to_yaml() - def test_add_layer_combine_override_merge(self): - client = self.get_testing_client() + def test_add_layer_combine_override_merge(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3770,8 +3778,7 @@ def test_add_layer_combine_override_merge(self): summary: Foo ''') == client.get_plan().to_yaml() - def test_add_layer_combine_override_unknown(self): - client = self.get_testing_client() + def test_add_layer_combine_override_unknown(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3792,13 +3799,11 @@ def test_add_layer_combine_override_unknown(self): override: blah ''', combine=True) - def test_get_services_none(self): - client = self.get_testing_client() + def test_get_services_none(self, client: _TestingPebbleClient): service_info = client.get_services() assert service_info == [] - def test_get_services_not_started(self): - client = self.get_testing_client() + def test_get_services_not_started(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3824,8 +3829,7 @@ def test_get_services_not_started(self): assert foo_info.current == pebble.ServiceStatus.INACTIVE assert not foo_info.is_running() - def test_get_services_autostart(self): - client = self.get_testing_client() + def test_get_services_autostart(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3852,8 +3856,7 @@ def test_get_services_autostart(self): assert foo_info.current == pebble.ServiceStatus.ACTIVE assert foo_info.is_running() - def test_get_services_start_stop(self): - client = self.get_testing_client() + def test_get_services_start_stop(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3885,8 +3888,7 @@ def test_get_services_start_stop(self): assert bar_info.startup == pebble.ServiceStartup.DISABLED assert bar_info.current == pebble.ServiceStatus.INACTIVE - def test_get_services_bad_request(self): - client = self.get_testing_client() + def test_get_services_bad_request(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3903,8 +3905,7 @@ def test_get_services_bad_request(self): with pytest.raises(TypeError): client.get_services('foo') - def test_get_services_subset(self): - client = self.get_testing_client() + def test_get_services_subset(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3923,8 +3924,7 @@ def test_get_services_subset(self): assert foo_info.startup == pebble.ServiceStartup.ENABLED assert foo_info.current == pebble.ServiceStatus.INACTIVE - def test_get_services_unknown(self): - client = self.get_testing_client() + def test_get_services_unknown(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3942,28 +3942,24 @@ def test_get_services_unknown(self): infos = client.get_services(['unknown']) assert infos == [] - def test_invalid_start_service(self): - client = self.get_testing_client() + def test_invalid_start_service(self, client: _TestingPebbleClient): # TODO: jam 2021-04-20 This should become a better error with pytest.raises(RuntimeError): client.start_services(['unknown']) - def test_start_service_str(self): + def test_start_service_str(self, client: _TestingPebbleClient): # Start service takes a list of names, but it is really easy to accidentally pass just a # name - client = self.get_testing_client() with pytest.raises(TypeError): client.start_services('unknown') - def test_stop_service_str(self): + def test_stop_service_str(self, client: _TestingPebbleClient): # Start service takes a list of names, but it is really easy to accidentally pass just a # name - client = self.get_testing_client() with pytest.raises(TypeError): client.stop_services('unknown') - def test_mixed_start_service(self): - client = self.get_testing_client() + def test_mixed_start_service(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -3983,8 +3979,7 @@ def test_mixed_start_service(self): assert foo_info.startup == pebble.ServiceStartup.ENABLED assert foo_info.current == pebble.ServiceStatus.INACTIVE - def test_stop_services_unknown(self): - client = self.get_testing_client() + def test_stop_services_unknown(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -4005,10 +4000,9 @@ def test_stop_services_unknown(self): assert foo_info.startup == pebble.ServiceStartup.ENABLED assert foo_info.current == pebble.ServiceStatus.ACTIVE - def test_start_started_service(self): + def test_start_started_service(self, client: _TestingPebbleClient): # Pebble maintains idempotency even if you start a service # which is already started. - client = self.get_testing_client() client.add_layer('foo', '''\ summary: foo services: @@ -4036,10 +4030,9 @@ def test_start_started_service(self): assert foo_info.startup == pebble.ServiceStartup.ENABLED assert foo_info.current == pebble.ServiceStatus.ACTIVE - def test_stop_stopped_service(self): + def test_stop_stopped_service(self, client: _TestingPebbleClient): # Pebble maintains idempotency even if you stop a service # which is already stopped. - client = self.get_testing_client() client.add_layer('foo', '''\ summary: foo services: @@ -4068,8 +4061,7 @@ def test_stop_stopped_service(self): assert foo_info.current == pebble.ServiceStatus.INACTIVE @ unittest.skipUnless(is_linux, 'Pebble runs on Linux') - def test_send_signal(self): - client = self.get_testing_client() + def test_send_signal(self, client: _TestingPebbleClient): client.add_layer('foo', '''\ summary: foo services: @@ -4108,47 +4100,51 @@ def test_send_signal(self): client.send_signal("SIGINT", ("foo", "bar",)) +PebbleClientType = typing.Union[_TestingPebbleClient, pebble.Client] + + # For testing file-ops of the pebble client. This is refactored into a # separate mixin so we can run these tests against both the mock client as # well as a real pebble server instance. class PebbleStorageAPIsTestMixin: - # Override this in classes using this mixin. - # This should be set to any non-empty path, but without a trailing /. - prefix: str - - # Override this in classes using this mixin. - client: ops.pebble.Client - - assertEqual = unittest.TestCase.assertEqual # noqa - assertIn = unittest.TestCase.assertIn # noqa - assertIs = unittest.TestCase.assertIs # noqa - assertIsInstance = unittest.TestCase.assertIsInstance # noqa - assertRaises = unittest.TestCase.assertRaises # noqa - - def test_push_and_pull_bytes(self): + def test_push_and_pull_bytes( + self, + pebble_dir: str, + client: PebbleClientType, + ): self._test_push_and_pull_data( + pebble_dir, + client, original_data=b"\x00\x01\x02\x03\x04", encoding=None, stream_class=io.BytesIO) - def test_push_and_pull_non_utf8_data(self): + def test_push_and_pull_non_utf8_data( + self, + pebble_dir: str, + client: PebbleClientType, + ): self._test_push_and_pull_data( + pebble_dir, + client, original_data='日本語', # "Japanese" in Japanese encoding='sjis', stream_class=io.StringIO) - def _test_push_and_pull_data(self, - original_data: typing.Union[str, bytes], - encoding: typing.Optional[str], - stream_class: typing.Union[typing.Type[io.BytesIO], - typing.Type[io.StringIO]]): - client = self.client + def _test_push_and_pull_data( + self, + pebble_dir: str, + client: PebbleClientType, + original_data: typing.Union[str, bytes], + encoding: typing.Optional[str], + stream_class: typing.Union[typing.Type[io.BytesIO], typing.Type[io.StringIO]], + ): # We separate out the calls to make it clearer to type checkers what's happening. if encoding is None: - client.push(f"{self.prefix}/test", original_data) + client.push(f"{pebble_dir}/test", original_data) else: - client.push(f"{self.prefix}/test", original_data, encoding=encoding) - with client.pull(f"{self.prefix}/test", encoding=encoding) as infile: + client.push(f"{pebble_dir}/test", original_data, encoding=encoding) + with client.pull(f"{pebble_dir}/test", encoding=encoding) as infile: received_data = infile.read() assert original_data == received_data @@ -4156,67 +4152,85 @@ def _test_push_and_pull_data(self, if encoding is None: stream_class = typing.cast(typing.Type[io.BytesIO], stream_class) small_file = stream_class(typing.cast(bytes, original_data)) - client.push(f"{self.prefix}/test", small_file) + client.push(f"{pebble_dir}/test", small_file) else: stream_class = typing.cast(typing.Type[io.StringIO], stream_class) small_file = stream_class(typing.cast(str, original_data)) - client.push(f"{self.prefix}/test", small_file, encoding=encoding) - with client.pull(f"{self.prefix}/test", encoding=encoding) as infile: + client.push(f"{pebble_dir}/test", small_file, encoding=encoding) + with client.pull(f"{pebble_dir}/test", encoding=encoding) as infile: received_data = infile.read() assert original_data == received_data - def test_push_bytes_ignore_encoding(self): + def test_push_bytes_ignore_encoding( + self, + pebble_dir: str, + client: PebbleClientType, + ): # push() encoding param should be ignored if source is bytes - client = self.client - client.push(f"{self.prefix}/test", b'\x00\x01', encoding='utf-8') - with client.pull(f"{self.prefix}/test", encoding=None) as infile: + client.push(f"{pebble_dir}/test", b'\x00\x01', encoding='utf-8') + with client.pull(f"{pebble_dir}/test", encoding=None) as infile: received_data = infile.read() assert received_data == b'\x00\x01' - def test_push_bytesio_ignore_encoding(self): + def test_push_bytesio_ignore_encoding( + self, + pebble_dir: str, + client: PebbleClientType, + ): # push() encoding param should be ignored if source is binary stream - client = self.client - client.push(f"{self.prefix}/test", io.BytesIO(b'\x00\x01'), encoding='utf-8') - with client.pull(f"{self.prefix}/test", encoding=None) as infile: + client.push(f"{pebble_dir}/test", io.BytesIO(b'\x00\x01'), encoding='utf-8') + with client.pull(f"{pebble_dir}/test", encoding=None) as infile: received_data = infile.read() assert received_data == b'\x00\x01' - def test_push_and_pull_larger_file(self): + def test_push_and_pull_larger_file( + self, + pebble_dir: str, + client: PebbleClientType, + ): # Intent: to ensure things work appropriately with larger files. # Larger files may be sent/received in multiple chunks; this should help for # checking that such logic is correct. data_size = 1024 * 1024 original_data = os.urandom(data_size) - client = self.client - client.push(f"{self.prefix}/test", original_data) - with client.pull(f"{self.prefix}/test", encoding=None) as infile: + client.push(f"{pebble_dir}/test", original_data) + with client.pull(f"{pebble_dir}/test", encoding=None) as infile: received_data = infile.read() assert original_data == received_data - def test_push_to_non_existent_subdir(self): + def test_push_to_non_existent_subdir( + self, + pebble_dir: str, + client: PebbleClientType, + ): data = 'data' - client = self.client with pytest.raises(pebble.PathError) as excinfo: - client.push(f"{self.prefix}/nonexistent_dir/test", data, make_dirs=False) + client.push(f"{pebble_dir}/nonexistent_dir/test", data, make_dirs=False) assert excinfo.value.kind == 'not-found' - client.push(f"{self.prefix}/nonexistent_dir/test", data, make_dirs=True) + client.push(f"{pebble_dir}/nonexistent_dir/test", data, make_dirs=True) - def test_push_as_child_of_file_raises_error(self): + def test_push_as_child_of_file_raises_error( + self, + pebble_dir: str, + client: PebbleClientType, + ): data = 'data' - client = self.client - client.push(f"{self.prefix}/file", data) + client.push(f"{pebble_dir}/file", data) with pytest.raises(pebble.PathError) as excinfo: - client.push(f"{self.prefix}/file/file", data) + client.push(f"{pebble_dir}/file/file", data) assert excinfo.value.kind == 'generic-file-error' - def test_push_with_permission_mask(self): + def test_push_with_permission_mask( + self, + pebble_dir: str, + client: PebbleClientType, + ): data = 'data' - client = self.client - client.push(f"{self.prefix}/file", data, permissions=0o600) - client.push(f"{self.prefix}/file", data, permissions=0o777) + client.push(f"{pebble_dir}/file", data, permissions=0o600) + client.push(f"{pebble_dir}/file", data, permissions=0o777) # If permissions are outside of the range 0o000 through 0o777, an exception should be # raised. for bad_permission in ( @@ -4224,28 +4238,31 @@ def test_push_with_permission_mask(self): -1, # Less than 0o000 ): with pytest.raises(pebble.PathError) as excinfo: - client.push(f"{self.prefix}/file", data, permissions=bad_permission) + client.push(f"{pebble_dir}/file", data, permissions=bad_permission) assert excinfo.value.kind == 'generic-file-error' - def test_push_files_and_list(self): + def test_push_files_and_list( + self, + pebble_dir: str, + client: PebbleClientType, + ): data = 'data' - client = self.client # Let's push the first file with a bunch of details. We'll check on this later. client.push( - f"{self.prefix}/file1", data, + f"{pebble_dir}/file1", data, permissions=0o620) # Do a quick push with defaults for the other files. - client.push(f"{self.prefix}/file2", data) - client.push(f"{self.prefix}/file3", data) + client.push(f"{pebble_dir}/file2", data) + client.push(f"{pebble_dir}/file3", data) - files = client.list_files(f"{self.prefix}/") + files = client.list_files(f"{pebble_dir}/") assert {file.path for file in files} == \ - {self.prefix + file for file in ('/file1', '/file2', '/file3')} + {pebble_dir + file for file in ('/file1', '/file2', '/file3')} # Let's pull the first file again and check its details - file = [f for f in files if f.path == f"{self.prefix}/file1"][0] + file = [f for f in files if f.path == f"{pebble_dir}/file1"][0] assert file.name == 'file1' assert file.type == pebble.FileType.FILE assert file.size == 4 @@ -4253,34 +4270,48 @@ def test_push_files_and_list(self): assert file.permissions == 0o620 # Skipping ownership checks here; ownership will be checked in purely-mocked tests - def test_push_and_list_file(self): + def test_push_and_list_file( + self, + pebble_dir: str, + client: PebbleClientType, + ): data = 'data' - client = self.client - client.push(f"{self.prefix}/file", data) - files = client.list_files(f"{self.prefix}/") - assert {file.path for file in files} == {f"{self.prefix}/file"} - - def test_push_file_with_relative_path_fails(self): - client = self.client + client.push(f"{pebble_dir}/file", data) + files = client.list_files(f"{pebble_dir}/") + assert {file.path for file in files} == {f"{pebble_dir}/file"} + + def test_push_file_with_relative_path_fails( + self, + client: PebbleClientType, + ): with pytest.raises(pebble.PathError) as excinfo: client.push('file', '') assert excinfo.value.kind == 'generic-file-error' - def test_pull_not_found(self): + def test_pull_not_found( + self, + client: PebbleClientType, + ): with pytest.raises(pebble.PathError) as excinfo: - self.client.pull("/not/found") + client.pull("/not/found") assert excinfo.value.kind == "not-found" assert "/not/found" in excinfo.value.message - def test_pull_directory(self): - self.client.make_dir(f"{self.prefix}/subdir") + def test_pull_directory( + self, + pebble_dir: str, + client: PebbleClientType, + ): + client.make_dir(f"{pebble_dir}/subdir") with pytest.raises(pebble.PathError) as excinfo: - self.client.pull(f"{self.prefix}/subdir") + client.pull(f"{pebble_dir}/subdir") assert excinfo.value.kind == "generic-file-error" - assert f"{self.prefix}/subdir" in excinfo.value.message + assert f"{pebble_dir}/subdir" in excinfo.value.message - def test_list_files_not_found_raises(self): - client = self.client + def test_list_files_not_found_raises( + self, + client: PebbleClientType, + ): with pytest.raises(pebble.APIError) as excinfo: client.list_files("/not/existing/file/") assert excinfo.value.code == 404 @@ -4288,9 +4319,11 @@ def test_list_files_not_found_raises(self): assert excinfo.value.message == 'stat /not/existing/file/: no ' \ 'such file or directory' - def test_list_directory_object_itself(self): - client = self.client - + def test_list_directory_object_itself( + self, + pebble_dir: str, + client: PebbleClientType, + ): # Test with root dir # (Special case; we won't prefix this, even when using the real Pebble server.) files = client.list_files('/', itself=True) @@ -4301,77 +4334,93 @@ def test_list_directory_object_itself(self): assert dir_.type == pebble.FileType.DIRECTORY # Test with subdirs - client.make_dir(f"{self.prefix}/subdir") - files = client.list_files(f"{self.prefix}/subdir", itself=True) + client.make_dir(f"{pebble_dir}/subdir") + files = client.list_files(f"{pebble_dir}/subdir", itself=True) assert len(files) == 1 dir_ = files[0] assert dir_.name == 'subdir' assert dir_.type == pebble.FileType.DIRECTORY - def test_push_files_and_list_by_pattern(self): + def test_push_files_and_list_by_pattern( + self, + pebble_dir: str, + client: PebbleClientType, + ): # Note: glob pattern deltas do exist between golang and Python, but here, # we'll just use a simple * pattern. data = 'data' - client = self.client for filename in ( '/file1.gz', '/file2.tar.gz', '/file3.tar.bz2', '/backup_file.gz', ): - client.push(self.prefix + filename, data) - files = client.list_files(f"{self.prefix}/", pattern='file*.gz') + client.push(pebble_dir + filename, data) + files = client.list_files(f"{pebble_dir}/", pattern='file*.gz') assert {file.path for file in files} == \ - {self.prefix + file for file in ('/file1.gz', '/file2.tar.gz')} - - def test_make_directory(self): - client = self.client - client.make_dir(f"{self.prefix}/subdir") - assert client.list_files(f"{self.prefix}/", pattern='subdir')[0].path == \ - f"{self.prefix}/subdir" - client.make_dir(f"{self.prefix}/subdir/subdir") - assert client.list_files(f"{self.prefix}/subdir", pattern='subdir')[0].path == \ - f"{self.prefix}/subdir/subdir" - - def test_make_directory_recursively(self): - client = self.client - + {pebble_dir + file for file in ('/file1.gz', '/file2.tar.gz')} + + def test_make_directory( + self, + pebble_dir: str, + client: PebbleClientType, + ): + client.make_dir(f"{pebble_dir}/subdir") + assert client.list_files(f"{pebble_dir}/", pattern='subdir')[0].path == \ + f"{pebble_dir}/subdir" + client.make_dir(f"{pebble_dir}/subdir/subdir") + assert client.list_files(f"{pebble_dir}/subdir", pattern='subdir')[0].path == \ + f"{pebble_dir}/subdir/subdir" + + def test_make_directory_recursively( + self, + pebble_dir: str, + client: PebbleClientType, + ): with pytest.raises(pebble.PathError) as excinfo: - client.make_dir(f"{self.prefix}/subdir/subdir", make_parents=False) + client.make_dir(f"{pebble_dir}/subdir/subdir", make_parents=False) assert excinfo.value.kind == 'not-found' - client.make_dir(f"{self.prefix}/subdir/subdir", make_parents=True) - assert client.list_files(f"{self.prefix}/subdir", pattern='subdir')[0].path == \ - f"{self.prefix}/subdir/subdir" + client.make_dir(f"{pebble_dir}/subdir/subdir", make_parents=True) + assert client.list_files(f"{pebble_dir}/subdir", pattern='subdir')[0].path == \ + f"{pebble_dir}/subdir/subdir" - def test_make_directory_with_relative_path_fails(self): - client = self.client + def test_make_directory_with_relative_path_fails( + self, + client: PebbleClientType, + ): with pytest.raises(pebble.PathError) as excinfo: client.make_dir('dir') assert excinfo.value.kind == 'generic-file-error' - def test_make_subdir_of_file_fails(self): - client = self.client - client.push(f"{self.prefix}/file", 'data') + def test_make_subdir_of_file_fails( + self, + pebble_dir: str, + client: PebbleClientType, + ): + client.push(f"{pebble_dir}/file", 'data') # Direct child case with pytest.raises(pebble.PathError) as excinfo: - client.make_dir(f"{self.prefix}/file/subdir") + client.make_dir(f"{pebble_dir}/file/subdir") assert excinfo.value.kind == 'generic-file-error' # Recursive creation case, in case its flow is different with pytest.raises(pebble.PathError) as excinfo: - client.make_dir(f"{self.prefix}/file/subdir/subdir", make_parents=True) + client.make_dir(f"{pebble_dir}/file/subdir/subdir", make_parents=True) assert excinfo.value.kind == 'generic-file-error' - def test_make_dir_with_permission_mask(self): - client = self.client - client.make_dir(f"{self.prefix}/dir1", permissions=0o700) - client.make_dir(f"{self.prefix}/dir2", permissions=0o777) + def test_make_dir_with_permission_mask( + self, + pebble_dir: str, + client: PebbleClientType, + ): + client.make_dir(f"{pebble_dir}/dir1", permissions=0o700) + client.make_dir(f"{pebble_dir}/dir2", permissions=0o777) - files = client.list_files(f"{self.prefix}/", pattern='dir*') - assert [f for f in files if f.path == f"{self.prefix}/dir1"][0].permissions == 0o700 - assert [f for f in files if f.path == f"{self.prefix}/dir2"][0].permissions == 0o777 + files = client.list_files(f"{pebble_dir}/", pattern='dir*') + assert [f for f in files if f.path == f"{pebble_dir}/dir1"][0].permissions == 0o700 + assert [f for f in files if f.path == f"{pebble_dir}/dir2"][0].permissions == 0o777 # If permissions are outside of the range 0o000 through 0o777, an exception should be # raised. @@ -4380,37 +4429,40 @@ def test_make_dir_with_permission_mask(self): -1, # Less than 0o000 )): with pytest.raises(pebble.PathError) as excinfo: - client.make_dir(f"{self.prefix}/dir3_{i}", permissions=bad_permission) + client.make_dir(f"{pebble_dir}/dir3_{i}", permissions=bad_permission) assert excinfo.value.kind == 'generic-file-error' - def test_remove_path(self): - client = self.client - client.push(f"{self.prefix}/file", '') - client.make_dir(f"{self.prefix}/dir/subdir", make_parents=True) - client.push(f"{self.prefix}/dir/subdir/file1", '') - client.push(f"{self.prefix}/dir/subdir/file2", '') - client.push(f"{self.prefix}/dir/subdir/file3", '') - client.make_dir(f"{self.prefix}/empty_dir") + def test_remove_path( + self, + pebble_dir: str, + client: PebbleClientType, + ): + client.push(f"{pebble_dir}/file", '') + client.make_dir(f"{pebble_dir}/dir/subdir", make_parents=True) + client.push(f"{pebble_dir}/dir/subdir/file1", '') + client.push(f"{pebble_dir}/dir/subdir/file2", '') + client.push(f"{pebble_dir}/dir/subdir/file3", '') + client.make_dir(f"{pebble_dir}/empty_dir") - client.remove_path(f"{self.prefix}/file") + client.remove_path(f"{pebble_dir}/file") - client.remove_path(f"{self.prefix}/empty_dir") + client.remove_path(f"{pebble_dir}/empty_dir") # Remove non-empty directory, recursive=False: error with pytest.raises(pebble.PathError) as excinfo: - client.remove_path(f"{self.prefix}/dir", recursive=False) + client.remove_path(f"{pebble_dir}/dir", recursive=False) assert excinfo.value.kind == 'generic-file-error' # Remove non-empty directory, recursive=True: succeeds (and removes child objects) - client.remove_path(f"{self.prefix}/dir", recursive=True) + client.remove_path(f"{pebble_dir}/dir", recursive=True) # Remove non-existent path, recursive=False: error with pytest.raises(pebble.PathError) as excinfo: - client.remove_path(f"{self.prefix}/dir/does/not/exist/asdf", recursive=False) + client.remove_path(f"{pebble_dir}/dir/does/not/exist/asdf", recursive=False) assert excinfo.value.kind == 'not-found' # Remove non-existent path, recursive=True: succeeds - client.remove_path(f"{self.prefix}/dir/does/not/exist/asdf", recursive=True) + client.remove_path(f"{pebble_dir}/dir/does/not/exist/asdf", recursive=True) # Other notes: # * Parent directories created via push(make_dirs=True) default to root:root ownership @@ -4428,17 +4480,27 @@ class _MakedirArgs(typing.TypedDict): group: typing.Optional[str] -class TestPebbleStorageAPIsUsingMocks( - unittest.TestCase, - _TestingPebbleClientMixin, - PebbleStorageAPIsTestMixin): - def setUp(self): - self.prefix = '/prefix' - self.client = self.get_testing_client() - if self.prefix: - self.client.make_dir(self.prefix, make_parents=True) +class TestPebbleStorageAPIsUsingMocks(PebbleStorageAPIsTestMixin): + @pytest.fixture + def client(self): + harness = ops.testing.Harness(ops.CharmBase, meta=''' + name: test-app + containers: + mycontainer: {} + ''') + backend = harness._backend + client = backend.get_pebble('/charm/containers/mycontainer/pebble.socket') + harness.set_can_connect('mycontainer', True) + yield client + harness.cleanup() + + @pytest.fixture + def pebble_dir(self, client: PebbleClientType): + pebble_dir = '/prefix' + client.make_dir(pebble_dir, make_parents=True) + return pebble_dir - def test_container_storage_mounts(self): + def test_container_storage_mounts(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test-app containers: @@ -4460,7 +4522,7 @@ def test_container_storage_mounts(self): store2: type: filesystem ''') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) store_id = harness.add_storage('store1')[0] harness.attach_storage(store_id) @@ -4512,9 +4574,12 @@ def _select_testing_user_group(self): group = [g for g in grp.getgrall() if g.gr_gid != os.getgid()][0] return user, group - def test_push_with_ownership(self): + def test_push_with_ownership( + self, + pebble_dir: str, + client: PebbleClientType, + ): data = 'data' - client = self.client user, group = self._select_testing_user_group() cases: typing.List[_MakedirArgs] = [ { @@ -4549,12 +4614,15 @@ def test_push_with_ownership(self): } ] for idx, case in enumerate(cases): - client.push(f"{self.prefix}/file{idx}", data, **case) - file_ = client.list_files(f"{self.prefix}/file{idx}")[0] - assert file_.path == f"{self.prefix}/file{idx}" - - def test_make_dir_with_ownership(self): - client = self.client + client.push(f"{pebble_dir}/file{idx}", data, **case) + file_ = client.list_files(f"{pebble_dir}/file{idx}")[0] + assert file_.path == f"{pebble_dir}/file{idx}" + + def test_make_dir_with_ownership( + self, + pebble_dir: str, + client: PebbleClientType, + ): user, group = self._select_testing_user_group() cases: typing.List[_MakedirArgs] = [ { @@ -4589,26 +4657,33 @@ def test_make_dir_with_ownership(self): } ] for idx, case in enumerate(cases): - client.make_dir(f"{self.prefix}/dir{idx}", **case) - dir_ = client.list_files(f"{self.prefix}/dir{idx}", itself=True)[0] - assert dir_.path == f"{self.prefix}/dir{idx}" + client.make_dir(f"{pebble_dir}/dir{idx}", **case) + dir_ = client.list_files(f"{pebble_dir}/dir{idx}", itself=True)[0] + assert dir_.path == f"{pebble_dir}/dir{idx}" @patch("grp.getgrgid") @patch("pwd.getpwuid") - def test_list_files_unnamed(self, getpwuid: MagicMock, getgrgid: MagicMock): + def test_list_files_unnamed( + self, + getpwuid: MagicMock, + getgrgid: MagicMock, + pebble_dir: str, + client: PebbleClientType, + ): getpwuid.side_effect = KeyError getgrgid.side_effect = KeyError data = 'data' - self.client.push(f"{self.prefix}/file", data) - files = self.client.list_files(f"{self.prefix}/") + client.push(f"{pebble_dir}/file", data) + files = client.list_files(f"{pebble_dir}/") assert len(files) == 1 assert files[0].user is None assert files[0].group is None -class TestFilesystem(unittest.TestCase, _TestingPebbleClientMixin): - def setUp(self) -> None: - self.harness = ops.testing.Harness(ops.CharmBase, meta=''' +class TestFilesystem: + @pytest.fixture + def harness(self): + harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test containers: test-container: @@ -4619,93 +4694,107 @@ def setUp(self) -> None: test-storage: type: filesystem ''') - self.harness.begin() - self.harness.set_can_connect("test-container", True) - self.root = self.harness.get_filesystem_root("test-container") - self.container = self.harness.charm.unit.get_container("test-container") + harness.begin() + harness.set_can_connect("test-container", True) + yield harness + harness.cleanup() - def tearDown(self) -> None: - self.harness.cleanup() + @pytest.fixture + def container_fs_root(self, harness: ops.testing.Harness[ops.CharmBase]): + return harness.get_filesystem_root("test-container") - def test_push(self): - self.container.push("/foo", source="foo") - assert (self.root / "foo").is_file() - assert (self.root / "foo").read_text() == "foo" + @pytest.fixture + def container(self, harness: ops.testing.Harness[ops.CharmBase]): + return harness.charm.unit.get_container("test-container") - def test_push_create_parent(self): - self.container.push("/foo/bar", source="bar", make_dirs=True) - assert (self.root / "foo").is_dir() - assert (self.root / "foo" / "bar").read_text() == "bar" + def test_push(self, container: ops.Container, container_fs_root: pathlib.Path): + container.push("/foo", source="foo") + assert (container_fs_root / "foo").is_file() + assert (container_fs_root / "foo").read_text() == "foo" - def test_push_path(self): + def test_push_create_parent(self, container: ops.Container, container_fs_root: pathlib.Path): + container.push("/foo/bar", source="bar", make_dirs=True) + assert (container_fs_root / "foo").is_dir() + assert (container_fs_root / "foo" / "bar").read_text() == "bar" + + def test_push_path(self, container: ops.Container, container_fs_root: pathlib.Path): with tempfile.TemporaryDirectory() as temp: tempdir = pathlib.Path(temp) (tempdir / "foo/bar").mkdir(parents=True) (tempdir / "foo/test").write_text("test") (tempdir / "foo/bar/foobar").write_text("foobar") (tempdir / "foo/baz").mkdir(parents=True) - self.container.push_path(tempdir / "foo", "/tmp") # noqa: S108 - - assert (self.root / "tmp").is_dir() - assert (self.root / "tmp/foo").is_dir() - assert (self.root / "tmp/foo/bar").is_dir() - assert (self.root / "tmp/foo/baz").is_dir() - assert (self.root / "tmp/foo/test").read_text() == "test" - assert (self.root / "tmp/foo/bar/foobar").read_text() == "foobar" - - def test_make_dir(self): - self.container.make_dir("/tmp") # noqa: S108 - assert (self.root / "tmp").is_dir() - self.container.make_dir("/foo/bar/foobar", make_parents=True) - assert (self.root / "foo/bar/foobar").is_dir() - - def test_pull(self): - (self.root / "foo").write_text("foo") - assert self.container.pull("/foo").read() == "foo" - - def test_pull_path(self): - (self.root / "foo").mkdir() - (self.root / "foo/bar").write_text("bar") - (self.root / "foobar").mkdir() - (self.root / "test").write_text("test") + container.push_path(tempdir / "foo", "/tmp") # noqa: S108 + + assert (container_fs_root / "tmp").is_dir() + assert (container_fs_root / "tmp/foo").is_dir() + assert (container_fs_root / "tmp/foo/bar").is_dir() + assert (container_fs_root / "tmp/foo/baz").is_dir() + assert (container_fs_root / "tmp/foo/test").read_text() == "test" + assert (container_fs_root / "tmp/foo/bar/foobar").read_text() == "foobar" + + def test_make_dir(self, container: ops.Container, container_fs_root: pathlib.Path): + container.make_dir("/tmp") # noqa: S108 + assert (container_fs_root / "tmp").is_dir() + container.make_dir("/foo/bar/foobar", make_parents=True) + assert (container_fs_root / "foo/bar/foobar").is_dir() + + def test_pull(self, container: ops.Container, container_fs_root: pathlib.Path): + (container_fs_root / "foo").write_text("foo") + assert container.pull("/foo").read() == "foo" + + def test_pull_path(self, container: ops.Container, container_fs_root: pathlib.Path): + (container_fs_root / "foo").mkdir() + (container_fs_root / "foo/bar").write_text("bar") + (container_fs_root / "foobar").mkdir() + (container_fs_root / "test").write_text("test") with tempfile.TemporaryDirectory() as temp: tempdir = pathlib.Path(temp) - self.container.pull_path("/", tempdir) + container.pull_path("/", tempdir) assert (tempdir / "foo").is_dir() assert (tempdir / "foo/bar").read_text() == "bar" assert (tempdir / "foobar").is_dir() assert (tempdir / "test").read_text() == "test" - def test_list_files(self): - (self.root / "foo").mkdir() - self.assertSequenceEqual(self.container.list_files("/foo"), []) - assert len(self.container.list_files("/")) == 1 - file_info = self.container.list_files("/")[0] + def test_list_files(self, container: ops.Container, container_fs_root: pathlib.Path): + (container_fs_root / "foo").mkdir() + assert container.list_files("/foo") == [] + assert len(container.list_files("/")) == 1 + file_info = container.list_files("/")[0] assert file_info.path == "/foo" assert file_info.type == FileType.DIRECTORY - assert self.container.list_files("/foo", itself=True)[0].path == "/foo" - (self.root / "foo/bar").write_text("foobar") - assert len(self.container.list_files("/foo")) == 1 - assert len(self.container.list_files("/foo", pattern="*ar")) == 1 - assert len(self.container.list_files("/foo", pattern="*oo")) == 0 - file_info = self.container.list_files("/foo")[0] + assert container.list_files("/foo", itself=True)[0].path == "/foo" + (container_fs_root / "foo/bar").write_text("foobar") + assert len(container.list_files("/foo")) == 1 + assert len(container.list_files("/foo", pattern="*ar")) == 1 + assert len(container.list_files("/foo", pattern="*oo")) == 0 + file_info = container.list_files("/foo")[0] assert file_info.path == "/foo/bar" assert file_info.type == FileType.FILE - root_info = self.container.list_files("/", itself=True)[0] + root_info = container.list_files("/", itself=True)[0] assert root_info.path == "/" assert root_info.name == "/" - def test_storage_mount(self): - storage_id = self.harness.add_storage("test-storage", 1, attach=True)[0] - assert (self.root / "mounts/foo").exists() - (self.root / "mounts/foo/bar").write_text("foobar") - assert self.container.pull("/mounts/foo/bar").read() == "foobar" - self.harness.detach_storage(storage_id) - assert not (self.root / "mounts/foo/bar").is_file() - self.harness.attach_storage(storage_id) - assert (self.root / "mounts/foo/bar").read_text(), "foobar" - - def _make_storage_attach_harness(self, meta: typing.Optional[str] = None): + def test_storage_mount( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + container_fs_root: pathlib.Path, + ): + storage_id = harness.add_storage("test-storage", 1, attach=True)[0] + assert (container_fs_root / "mounts/foo").exists() + (container_fs_root / "mounts/foo/bar").write_text("foobar") + assert container.pull("/mounts/foo/bar").read() == "foobar" + harness.detach_storage(storage_id) + assert not (container_fs_root / "mounts/foo/bar").is_file() + harness.attach_storage(storage_id) + assert (container_fs_root / "mounts/foo/bar").read_text(), "foobar" + + def _make_storage_attach_harness( + self, + request: pytest.FixtureRequest, + meta: typing.Optional[str] = None, + ): class MyCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -4730,26 +4819,26 @@ def _on_attach(self, event: ops.StorageAttachedEvent): type: filesystem ''' harness = ops.testing.Harness(MyCharm, meta=meta) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) return harness - def test_storage_attach_begin_no_emit(self): + def test_storage_attach_begin_no_emit(self, request: pytest.FixtureRequest): """If `begin()` hasn't been called, `attach` does not emit storage-attached.""" - harness = self._make_storage_attach_harness() + harness = self._make_storage_attach_harness(request) harness.add_storage('test-storage', attach=True) harness.begin() assert 'test-storage/0' not in harness.charm.attached - def test_storage_attach_begin_with_hooks_emits(self): + def test_storage_attach_begin_with_hooks_emits(self, request: pytest.FixtureRequest): """`attach` doesn't emit storage-attached before `begin_with_initial_hooks`.""" - harness = self._make_storage_attach_harness() + harness = self._make_storage_attach_harness(request) harness.add_storage('test-storage', attach=True) harness.begin_with_initial_hooks() assert 'test-storage/0' in harness.charm.attached assert harness.charm.locations[0] - def test_storage_add_with_later_attach(self): - harness = self._make_storage_attach_harness() + def test_storage_add_with_later_attach(self, request: pytest.FixtureRequest): + harness = self._make_storage_attach_harness(request) harness.begin() storage_ids = harness.add_storage('test-storage', attach=False) assert 'test-storage/0' not in harness.charm.attached @@ -4761,7 +4850,7 @@ def test_storage_add_with_later_attach(self): harness.attach_storage(s_id) assert harness.charm.attached.count('test-storage/0') == 1 - def test_storage_machine_charm_metadata(self): + def test_storage_machine_charm_metadata(self, request: pytest.FixtureRequest): meta = ''' name: test storage: @@ -4769,12 +4858,12 @@ def test_storage_machine_charm_metadata(self): type: filesystem mount: /mounts/foo ''' - harness = self._make_storage_attach_harness(meta) + harness = self._make_storage_attach_harness(request, meta) harness.begin() harness.add_storage('test-storage', attach=True) assert 'test-storage/0' in harness.charm.attached - def test_storage_multiple_storage_instances(self): + def test_storage_multiple_storage_instances(self, request: pytest.FixtureRequest): meta = ''' name: test storage: @@ -4784,7 +4873,7 @@ def test_storage_multiple_storage_instances(self): multiple: range: 2-4 ''' - harness = self._make_storage_attach_harness(meta) + harness = self._make_storage_attach_harness(request, meta) harness.begin() harness.add_storage('test-storage', 2, attach=True) assert harness.charm.attached == ['test-storage/0', 'test-storage/1'] @@ -4795,12 +4884,12 @@ def test_storage_multiple_storage_instances(self): assert len(set(harness.charm.locations)) == 4 -class TestSecrets(unittest.TestCase): - def test_add_model_secret_by_app_name_str(self): +class TestSecrets: + def test_add_model_secret_by_app_name_str(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4810,11 +4899,11 @@ def test_add_model_secret_by_app_name_str(self): assert secret.id == secret_id assert secret.get_content() == {'password': 'hunter2'} - def test_add_model_secret_by_app_instance(self): + def test_add_model_secret_by_app_instance(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4825,11 +4914,11 @@ def test_add_model_secret_by_app_instance(self): assert secret.id == secret_id assert secret.get_content() == {'password': 'hunter3'} - def test_add_model_secret_by_unit_instance(self): + def test_add_model_secret_by_unit_instance(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4840,11 +4929,11 @@ def test_add_model_secret_by_unit_instance(self): assert secret.id == secret_id assert secret.get_content() == {'password': 'hunter4'} - def test_get_secret_as_owner(self): + def test_get_secret_as_owner(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() # App secret. secret_id = harness.charm.app.add_secret({'password': 'hunter5'}).id @@ -4857,9 +4946,9 @@ def test_get_secret_as_owner(self): assert secret.id == secret_id assert secret.get_content() == {'password': 'hunter6'} - def test_get_secret_and_refresh(self): + def test_get_secret_and_refresh(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(True) secret = harness.charm.app.add_secret({'password': 'hunter6'}) @@ -4871,9 +4960,9 @@ def test_get_secret_and_refresh(self): assert retrieved_secret.get_content(refresh=True) == {'password': 'hunter7'} assert retrieved_secret.get_content() == {'password': 'hunter7'} - def test_get_secret_removed(self): + def test_get_secret_removed(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(True) secret = harness.charm.app.add_secret({'password': 'hunter8'}) @@ -4882,9 +4971,9 @@ def test_get_secret_removed(self): with pytest.raises(ops.SecretNotFoundError): harness.model.get_secret(id=secret.id) - def test_get_secret_by_label(self): + def test_get_secret_by_label(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() secret_id = harness.charm.app.add_secret({'password': 'hunter9'}, label="my-pass").id secret = harness.model.get_secret(label="my-pass") @@ -4895,18 +4984,18 @@ def test_get_secret_by_label(self): secret = harness.model.get_secret(label="other-name") assert secret.get_content() == {'password': 'hunter9'} - def test_add_model_secret_invalid_content(self): + def test_add_model_secret_invalid_content(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) with pytest.raises(ValueError): harness.add_model_secret('database', {'x': 'y'}) # key too short - def test_set_secret_content(self): + def test_set_secret_content(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(EventRecorder, meta=yaml.safe_dump( {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4926,35 +5015,35 @@ def test_set_secret_content(self): assert harness.get_secret_revisions(secret_id) == [1, 2] - def test_set_secret_content_wrong_owner(self): + def test_set_secret_content_wrong_owner(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) secret = harness.model.app.add_secret({'foo': 'bar'}) with pytest.raises(RuntimeError): assert secret.id is not None harness.set_secret_content(secret.id, {'bar': 'foo'}) - def test_set_secret_content_invalid_secret_id(self): + def test_set_secret_content_invalid_secret_id(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) with pytest.raises(RuntimeError): harness.set_secret_content('asdf', {'foo': 'bar'}) - def test_set_secret_content_invalid_content(self): + def test_set_secret_content_invalid_content(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) secret_id = harness.add_model_secret('database', {'foo': 'bar'}) with pytest.raises(ValueError): harness.set_secret_content(secret_id, {'x': 'y'}) - def test_grant_secret_and_revoke_secret(self): + def test_grant_secret_and_revoke_secret(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4968,11 +5057,11 @@ def test_grant_secret_and_revoke_secret(self): with pytest.raises(ops.SecretNotFoundError): harness.model.get_secret(id=secret_id) - def test_grant_secret_wrong_app(self): + def test_grant_secret_wrong_app(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4981,11 +5070,11 @@ def test_grant_secret_wrong_app(self): with pytest.raises(ops.SecretNotFoundError): harness.model.get_secret(id=secret_id) - def test_grant_secret_wrong_unit(self): + def test_grant_secret_wrong_unit(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4994,19 +5083,19 @@ def test_grant_secret_wrong_unit(self): with pytest.raises(ops.SecretNotFoundError): harness.model.get_secret(id=secret_id) - def test_grant_secret_no_relation(self): + def test_grant_secret_no_relation(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) secret_id = harness.add_model_secret('database', {'password': 'hunter2'}) with pytest.raises(RuntimeError): harness.grant_secret(secret_id, 'webapp') - def test_get_secret_grants(self): + def test_get_secret_grants(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'database', 'provides': {'db': {'interface': 'pgsql'}}} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'webapp') harness.add_relation_unit(relation_id, 'webapp/0') @@ -5026,9 +5115,9 @@ def test_get_secret_grants(self): secret.grant(rel, unit=harness.model.get_unit('webapp/0')) assert harness.get_secret_grants(secret.id, relation_id) == {'webapp/0'} - def test_trigger_secret_rotation(self): + def test_trigger_secret_rotation(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(EventRecorder, meta='name: database') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) secret = harness.model.app.add_secret({'foo': 'x'}, label='lbl') assert secret.id is not None @@ -5052,20 +5141,20 @@ def test_trigger_secret_rotation(self): with pytest.raises(RuntimeError): harness.trigger_secret_rotation('nosecret') - def test_trigger_secret_rotation_on_user_secret(self): + def test_trigger_secret_rotation_on_user_secret(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(EventRecorder, meta='name: database') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) secret_id = harness.add_user_secret({'foo': 'bar'}) assert secret_id is not None harness.begin() - with self.assertRaises(RuntimeError): + with pytest.raises(RuntimeError): harness.trigger_secret_rotation(secret_id) - def test_trigger_secret_removal(self): + def test_trigger_secret_removal(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(EventRecorder, meta='name: database') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) secret = harness.model.app.add_secret({'foo': 'x'}, label='lbl') assert secret.id is not None @@ -5091,9 +5180,9 @@ def test_trigger_secret_removal(self): with pytest.raises(RuntimeError): harness.trigger_secret_removal('nosecret', 1) - def test_trigger_secret_expiration(self): + def test_trigger_secret_expiration(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(EventRecorder, meta='name: database') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) secret = harness.model.app.add_secret({'foo': 'x'}, label='lbl') assert secret.id is not None @@ -5119,20 +5208,20 @@ def test_trigger_secret_expiration(self): with pytest.raises(RuntimeError): harness.trigger_secret_removal('nosecret', 1) - def test_trigger_secret_expiration_on_user_secret(self): + def test_trigger_secret_expiration_on_user_secret(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(EventRecorder, meta='name: database') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) secret_id = harness.add_user_secret({'foo': 'bar'}) assert secret_id is not None harness.begin() - with self.assertRaises(RuntimeError): + with pytest.raises(RuntimeError): harness.trigger_secret_expiration(secret_id, 1) - def test_secret_permissions_unit(self): + def test_secret_permissions_unit(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: database') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() # The charm can always manage a local unit secret. @@ -5144,9 +5233,9 @@ def test_secret_permissions_unit(self): secret.set_content({"password": "5678"}) secret.remove_all_revisions() - def test_secret_permissions_leader(self): + def test_secret_permissions_leader(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: database') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() # The leader can manage an application secret. @@ -5159,9 +5248,9 @@ def test_secret_permissions_leader(self): secret.set_content({"password": "5678"}) secret.remove_all_revisions() - def test_secret_permissions_nonleader(self): + def test_secret_permissions_nonleader(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: database') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() # Non-leaders can only view an application secret. @@ -5176,11 +5265,11 @@ def test_secret_permissions_nonleader(self): with pytest.raises(ops.model.SecretNotFoundError): secret.remove_all_revisions() - def test_add_user_secret(self): + def test_add_user_secret(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp'} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() secret_content = {'password': 'foo'} @@ -5188,57 +5277,57 @@ def test_add_user_secret(self): harness.grant_secret(secret_id, 'webapp') secret = harness.model.get_secret(id=secret_id) - self.assertEqual(secret.id, secret_id) - self.assertEqual(secret.get_content(), secret_content) + assert secret.id == secret_id + assert secret.get_content() == secret_content - def test_get_user_secret_without_grant(self): + def test_get_user_secret_without_grant(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp'} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() secret_id = harness.add_user_secret({'password': 'foo'}) - with self.assertRaises(ops.SecretNotFoundError): + with pytest.raises(ops.SecretNotFoundError): harness.model.get_secret(id=secret_id) - def test_revoke_user_secret(self): + def test_revoke_user_secret(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( {'name': 'webapp'} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() secret_content = {'password': 'foo'} secret_id = harness.add_user_secret(secret_content) harness.grant_secret(secret_id, 'webapp') harness.revoke_secret(secret_id, 'webapp') - with self.assertRaises(ops.SecretNotFoundError): + with pytest.raises(ops.SecretNotFoundError): harness.model.get_secret(id=secret_id) - def test_set_user_secret_content(self): + def test_set_user_secret_content(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(EventRecorder, meta=yaml.safe_dump( {'name': 'webapp'} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() secret_id = harness.add_user_secret({'password': 'foo'}) harness.grant_secret(secret_id, 'webapp') secret = harness.model.get_secret(id=secret_id) - self.assertEqual(secret.get_content(), {'password': 'foo'}) + assert secret.get_content() == {'password': 'foo'} harness.set_secret_content(secret_id, {'password': 'bar'}) secret = harness.model.get_secret(id=secret_id) - self.assertEqual(secret.get_content(refresh=True), {'password': 'bar'}) + assert secret.get_content(refresh=True) == {'password': 'bar'} - def test_get_user_secret_info(self): + def test_get_user_secret_info(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(EventRecorder, meta=yaml.safe_dump( {'name': 'webapp'} )) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() secret_id = harness.add_user_secret({'password': 'foo'}) harness.grant_secret(secret_id, 'webapp') secret = harness.model.get_secret(id=secret_id) - with self.assertRaises(ops.SecretNotFoundError): + with pytest.raises(ops.SecretNotFoundError): secret.get_info() @@ -5251,10 +5340,10 @@ def record_event(self, event: ops.EventBase): self.events.append(event) -class TestPorts(unittest.TestCase): - def test_ports(self): +class TestPorts: + def test_ports(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) unit = harness.model.unit unit.open_port('tcp', 8080) @@ -5291,9 +5380,9 @@ def test_ports(self): ports_set = unit.opened_ports() assert ports_set == set() - def test_errors(self): + def test_errors(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) unit = harness.model.unit with pytest.raises(ops.ModelError): @@ -5310,88 +5399,110 @@ def test_errors(self): unit.open_port('tcp', 65536) # port out of range -class TestHandleExec(unittest.TestCase): - def setUp(self) -> None: - self.harness = ops.testing.Harness(ops.CharmBase, meta=''' +class TestHandleExec: + @pytest.fixture + def harness(self): + harness = ops.testing.Harness(ops.CharmBase, meta=''' name: test containers: test-container: ''') - self.harness.begin() - self.harness.set_can_connect("test-container", True) - self.root = self.harness.get_filesystem_root("test-container") - self.container = self.harness.charm.unit.get_container("test-container") + harness.begin() + harness.set_can_connect("test-container", True) + yield harness + harness.cleanup() - def tearDown(self) -> None: - self.harness.cleanup() + @pytest.fixture + def container(self, harness: ops.testing.Harness[ops.CharmBase]): + return harness.charm.unit.get_container("test-container") - def test_register_handler(self): - self.harness.handle_exec(self.container, ["foo"], result="foo") - self.harness.handle_exec(self.container, ["foo", "bar", "foobar"], result="foobar2") - self.harness.handle_exec(self.container, ["foo", "bar"], result="foobar") + def test_register_handler( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): + harness.handle_exec(container, ["foo"], result="foo") + harness.handle_exec(container, ["foo", "bar", "foobar"], result="foobar2") + harness.handle_exec(container, ["foo", "bar"], result="foobar") - stdout, _ = self.container.exec(["foo", "bar", "foobar", "--help"]).wait_output() + stdout, _ = container.exec(["foo", "bar", "foobar", "--help"]).wait_output() assert stdout == "foobar2" - stdout, _ = self.container.exec(["foo", "bar", "--help"]).wait_output() + stdout, _ = container.exec(["foo", "bar", "--help"]).wait_output() assert stdout == "foobar" - stdout, _ = self.container.exec(["foo", "bar"]).wait_output() + stdout, _ = container.exec(["foo", "bar"]).wait_output() assert stdout == "foobar" - stdout, _ = self.container.exec(["foo", "--help"]).wait_output() + stdout, _ = container.exec(["foo", "--help"]).wait_output() assert stdout == "foo" - def test_re_register_handler(self): - self.harness.handle_exec(self.container, ["foo", "bar"], result="foobar") - self.harness.handle_exec(self.container, ["foo"], result="foo") + def test_re_register_handler( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): + harness.handle_exec(container, ["foo", "bar"], result="foobar") + harness.handle_exec(container, ["foo"], result="foo") - stdout, _ = self.container.exec(["foo", "bar"]).wait_output() + stdout, _ = container.exec(["foo", "bar"]).wait_output() assert stdout == "foobar" - self.harness.handle_exec(self.container, ["foo", "bar"], result="hello") - stdout, _ = self.container.exec(["foo", "bar"]).wait_output() + harness.handle_exec(container, ["foo", "bar"], result="hello") + stdout, _ = container.exec(["foo", "bar"]).wait_output() assert stdout == "hello" - self.harness.handle_exec(self.container.name, ["foo"], result="hello2") - stdout, _ = self.container.exec(["foo"]).wait_output() + harness.handle_exec(container.name, ["foo"], result="hello2") + stdout, _ = container.exec(["foo"]).wait_output() assert stdout == "hello2" with pytest.raises(pebble.APIError): - self.container.exec(["abc"]).wait() + container.exec(["abc"]).wait() - def test_register_match_all_prefix(self): - self.harness.handle_exec(self.container, [], result="hello") + def test_register_match_all_prefix( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): + harness.handle_exec(container, [], result="hello") - stdout, _ = self.container.exec(["foo", "bar"]).wait_output() + stdout, _ = container.exec(["foo", "bar"]).wait_output() assert stdout == "hello" - stdout, _ = self.container.exec(["ls"]).wait_output() + stdout, _ = container.exec(["ls"]).wait_output() assert stdout == "hello" - def test_register_with_result(self): - self.harness.handle_exec(self.container, ["foo"], result=10) + def test_register_with_result( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): + harness.handle_exec(container, ["foo"], result=10) with pytest.raises(pebble.ExecError) as excinfo: - self.container.exec(["foo"]).wait() + container.exec(["foo"]).wait() assert excinfo.value.exit_code == 10 - self.harness.handle_exec(self.container, ["foo"], result="hello") - stdout, stderr = self.container.exec(["foo"]).wait_output() + harness.handle_exec(container, ["foo"], result="hello") + stdout, stderr = container.exec(["foo"]).wait_output() assert stdout == "hello" assert stderr == "" with pytest.raises(ValueError): - self.container.exec(["foo"], encoding=None).wait_output() + container.exec(["foo"], encoding=None).wait_output() - self.harness.handle_exec(self.container, ["foo"], result=b"hello2") - stdout, stderr = self.container.exec(["foo"], encoding=None).wait_output() + harness.handle_exec(container, ["foo"], result=b"hello2") + stdout, stderr = container.exec(["foo"], encoding=None).wait_output() assert stdout == b"hello2" assert stderr == b"" - stdout, stderr = self.container.exec(["foo"]).wait_output() + stdout, stderr = container.exec(["foo"]).wait_output() assert stdout == "hello2" assert stderr == "" - def test_register_with_handler(self): + def test_register_with_handler( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): args_history: typing.List[ops.testing.ExecArgs] = [] return_value = None @@ -5399,104 +5510,120 @@ def handler(args: ops.testing.ExecArgs): args_history.append(args) return return_value - self.harness.handle_exec(self.container, ["foo"], handler=handler) + harness.handle_exec(container, ["foo"], handler=handler) - self.container.exec(["foo", "bar"]).wait() + container.exec(["foo", "bar"]).wait() assert len(args_history) == 1 assert args_history[-1].command == ["foo", "bar"] return_value = ExecResult(exit_code=1) with pytest.raises(pebble.ExecError): - self.container.exec(["foo", "bar"]).wait() + container.exec(["foo", "bar"]).wait() return_value = ExecResult(stdout="hello", stderr="error") - stdout, stderr = self.container.exec(["foo"]).wait_output() + stdout, stderr = container.exec(["foo"]).wait_output() assert stdout == "hello" assert stderr == "error" assert len(args_history) == 3 - self.container.exec(["foo"], environment={"bar": "foobar"}).wait_output() + container.exec(["foo"], environment={"bar": "foobar"}).wait_output() assert args_history[-1].environment == {"bar": "foobar"} return_value = ExecResult(stdout=b"hello") - stdout, _ = self.container.exec(["foo"], encoding=None).wait_output() + stdout, _ = container.exec(["foo"], encoding=None).wait_output() assert args_history[-1].encoding is None assert stdout == b"hello" - self.container.exec(["foo"], working_dir="/test").wait_output() + container.exec(["foo"], working_dir="/test").wait_output() assert args_history[-1].working_dir == "/test" - self.container.exec(["foo"], user="foo", user_id=1, group="bar", group_id=2).wait() + container.exec(["foo"], user="foo", user_id=1, group="bar", group_id=2).wait() assert args_history[-1].user == "foo" assert args_history[-1].user_id == 1 assert args_history[-1].group == "bar" assert args_history[-1].group_id == 2 - def test_exec_timeout(self): + def test_exec_timeout( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): def handler(_: ops.testing.ExecArgs): raise TimeoutError - self.harness.handle_exec(self.container, [], handler=handler) + harness.handle_exec(container, [], handler=handler) with pytest.raises(TimeoutError): - self.container.exec(["ls"], timeout=1).wait() + container.exec(["ls"], timeout=1).wait() with pytest.raises(RuntimeError): - self.container.exec(["ls"]).wait() + container.exec(["ls"]).wait() - def test_combined_error(self): + def test_combined_error( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): return_value = ExecResult(stdout="foobar") - self.harness.handle_exec(self.container, [], handler=lambda _: return_value) - stdout, stderr = self.container.exec(["ls"], combine_stderr=True).wait_output() + harness.handle_exec(container, [], handler=lambda _: return_value) + stdout, stderr = container.exec(["ls"], combine_stderr=True).wait_output() assert stdout == "foobar" assert stderr == "" return_value = ExecResult(stdout="foobar", stderr="error") with pytest.raises(ValueError): - self.container.exec(["ls"], combine_stderr=True).wait_output() + container.exec(["ls"], combine_stderr=True).wait_output() - def test_exec_stdin(self): + def test_exec_stdin( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): args_history: typing.List[ops.testing.ExecArgs] = [] def handler(args: ops.testing.ExecArgs): args_history.append(args) - self.harness.handle_exec(self.container, [], handler=handler) - proc = self.container.exec(["ls"], stdin="test") + harness.handle_exec(container, [], handler=handler) + proc = container.exec(["ls"], stdin="test") assert proc.stdin is None assert args_history[-1].stdin == "test" - proc = self.container.exec(["ls"]) + proc = container.exec(["ls"]) assert proc.stdin is not None assert args_history[-1].stdin is None - def test_exec_stdout_stderr(self): - self.harness.handle_exec( - self.container, [], result=ExecResult( + def test_exec_stdout_stderr( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): + harness.handle_exec( + container, [], result=ExecResult( stdout="output", stderr="error")) stdout = io.StringIO() stderr = io.StringIO() - proc = self.container.exec(["ls"], stderr=stderr, stdout=stdout) + proc = container.exec(["ls"], stderr=stderr, stdout=stdout) assert proc.stdout is None assert proc.stderr is None proc.wait() assert stdout.getvalue() == "output" assert stderr.getvalue() == "error" - proc = self.container.exec(["ls"]) + proc = container.exec(["ls"]) assert proc.stdout is not None # Not assertIsNotNone to help type checkers. assert proc.stderr is not None # Not assertIsNotNone to help type checkers. proc.wait() assert proc.stdout.read() == "output" assert proc.stderr.read() == "error" - self.harness.handle_exec( - self.container, [], result=ExecResult( + harness.handle_exec( + container, [], result=ExecResult( stdout=b"output", stderr=b"error")) stdout = io.StringIO() stderr = io.StringIO() - proc = self.container.exec(["ls"], stderr=stderr, stdout=stdout) + proc = container.exec(["ls"], stderr=stderr, stdout=stdout) assert stdout.getvalue() == "output" assert stderr.getvalue() == "error" - proc = self.container.exec(["ls"]) + proc = container.exec(["ls"]) assert proc.stdout is not None # Not assertIsNotNone to help type checkers. assert proc.stderr is not None # Not assertIsNotNone to help type checkers. assert proc.stdout.read() == "output" @@ -5504,16 +5631,20 @@ def test_exec_stdout_stderr(self): stdout = io.BytesIO() stderr = io.BytesIO() - proc = self.container.exec(["ls"], stderr=stderr, stdout=stdout, encoding=None) + proc = container.exec(["ls"], stderr=stderr, stdout=stdout, encoding=None) assert stdout.getvalue() == b"output" assert stderr.getvalue() == b"error" - proc = self.container.exec(["ls"], encoding=None) + proc = container.exec(["ls"], encoding=None) assert proc.stdout is not None # Not assertIsNotNone to help type checkers. assert proc.stderr is not None # Not assertIsNotNone to help type checkers. assert proc.stdout.read() == b"output" assert proc.stderr.read() == b"error" - def test_exec_service_context(self): + def test_exec_service_context( + self, + harness: ops.testing.Harness[ops.CharmBase], + container: ops.Container, + ): service: ops.pebble.ServiceDict = { "command": "test", "working-dir": "/tmp", # noqa: S108 @@ -5528,16 +5659,16 @@ def test_exec_service_context(self): 'description': "", 'services': { "test": service}} - self.container.add_layer(label="test", layer=ops.pebble.Layer(layer)) + container.add_layer(label="test", layer=ops.pebble.Layer(layer)) args_history: typing.List[ops.testing.ExecArgs] = [] def handler(args: ops.testing.ExecArgs): args_history.append(args) os.environ["JUJU_VERSION"] = "3.2.1" - self.harness.handle_exec(self.container, ["ls"], handler=handler) + harness.handle_exec(container, ["ls"], handler=handler) - self.container.exec(["ls"], service_context="test").wait() + container.exec(["ls"], service_context="test").wait() assert args_history[-1].working_dir == "/tmp" # noqa: S108 assert args_history[-1].user == "foo" assert args_history[-1].user_id == 1 @@ -5545,14 +5676,14 @@ def handler(args: ops.testing.ExecArgs): assert args_history[-1].group_id == 2 assert args_history[-1].environment == {"foo": "bar", "foobar": "barfoo"} - self.container.exec(["ls"], - service_context="test", - working_dir="/test", - user="test", - user_id=3, - group="test_group", - group_id=4, - environment={"foo": "hello"}).wait() + container.exec(["ls"], + service_context="test", + working_dir="/test", + user="test", + user_id=3, + group="test_group", + group_id=4, + environment={"foo": "hello"}).wait() assert args_history[-1].working_dir == "/test" assert args_history[-1].user == "test" assert args_history[-1].user_id == 3 @@ -5561,10 +5692,14 @@ def handler(args: ops.testing.ExecArgs): assert args_history[-1].environment == {"foo": "hello", "foobar": "barfoo"} -class TestActions(unittest.TestCase): - def setUp(self): +class TestActions: + @pytest.fixture + def action_results(self): action_results: typing.Dict[str, typing.Any] = {} - self._action_results = action_results + return action_results + + @pytest.fixture + def harness(self, action_results: typing.Dict[str, typing.Any]): class ActionCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): @@ -5600,7 +5735,7 @@ def _on_log_and_results_action(self, event: ops.ActionEvent): def _on_results_action(self, event: ops.ActionEvent): event.set_results(action_results) - self.harness = ops.testing.Harness(ActionCharm, meta=''' + harness = ops.testing.Harness(ActionCharm, meta=''' name: test ''', actions=''' simple: @@ -5625,7 +5760,9 @@ def _on_results_action(self, event: ops.ActionEvent): results: description: incididunt ut labore ''') - self.harness.begin() + harness.begin() + yield harness + harness.cleanup() def test_before_begin(self): harness = ops.testing.Harness(ops.CharmBase, meta=''' @@ -5634,49 +5771,52 @@ def test_before_begin(self): with pytest.raises(RuntimeError): harness.run_action("fail") - def test_invalid_action(self): + def test_invalid_action(self, harness: ops.testing.Harness[ops.CharmBase]): # This action isn't in the metadata at all. with pytest.raises(RuntimeError): - self.harness.run_action("another-action") + harness.run_action("another-action") # Also check that we're not exposing the action with the dash to underscore replacement. with pytest.raises(RuntimeError): - self.harness.run_action("log_and_results") + harness.run_action("log_and_results") - def test_run_action(self): - out = self.harness.run_action("simple") + def test_run_action(self, harness: ops.testing.Harness[ops.CharmBase]): + out = harness.run_action("simple") assert out.logs == [] assert out.results == {} - assert self.harness.charm.simple_was_called + assert harness.charm.simple_was_called # type: ignore - def test_fail_action_no_message(self): + def test_fail_action_no_message(self, harness: ops.testing.Harness[ops.CharmBase]): with pytest.raises(ops.testing.ActionFailed) as excinfo: - self.harness.run_action('fail', {'empty-failure-message': True}) + harness.run_action('fail', {'empty-failure-message': True}) assert 'called `fail()`' in str(excinfo.value) assert excinfo.value.message == '' - def test_fail_action(self): - self._action_results.clear() - self._action_results["partial"] = "foo" + def test_fail_action( + self, + action_results: typing.Dict[str, typing.Any], + harness: ops.testing.Harness[ops.CharmBase], + ): + action_results["partial"] = "foo" with pytest.raises(ops.testing.ActionFailed) as excinfo: - self.harness.run_action("fail") - assert "something went wrong" in str(excinfo.value) + harness.run_action("fail") + assert excinfo.value.message == "something went wrong" assert excinfo.value.output.logs == ["some progress", "more progress"] assert excinfo.value.output.results == {"partial": "foo"} - def test_required_param(self): + def test_required_param(self, harness: ops.testing.Harness[ops.CharmBase]): with pytest.raises(RuntimeError): - self.harness.run_action("unobserved-param-tester") + harness.run_action("unobserved-param-tester") with pytest.raises(RuntimeError): - self.harness.run_action("unobserved-param-tester", {"bar": "baz"}) - self.harness.run_action("unobserved-param-tester", {"foo": "baz"}) - self.harness.run_action("unobserved-param-tester", {"foo": "baz", "bar": "qux"}) + harness.run_action("unobserved-param-tester", {"bar": "baz"}) + harness.run_action("unobserved-param-tester", {"foo": "baz"}) + harness.run_action("unobserved-param-tester", {"foo": "baz", "bar": "qux"}) - def test_additional_params(self): - self.harness.run_action("simple", {"foo": "bar"}) + def test_additional_params(self, harness: ops.testing.Harness[ops.CharmBase]): + harness.run_action("simple", {"foo": "bar"}) with pytest.raises(ops.ModelError): - self.harness.run_action("unobserved-param-tester", {"foo": "bar", "qux": "baz"}) - self.harness.run_action("simple", { + harness.run_action("unobserved-param-tester", {"foo": "bar", "qux": "baz"}) + harness.run_action("simple", { "string": "hello", "number": 28.8, "object": {"a": {"b": "c"}}, @@ -5684,36 +5824,42 @@ def test_additional_params(self): "boolean": True, "null": None}) - def test_logs_and_results(self): - out = self.harness.run_action("log-and-results") + def test_logs_and_results(self, harness: ops.testing.Harness[ops.CharmBase]): + out = harness.run_action("log-and-results") assert out.logs == ["Step 1", "Step 2"] assert out.results == {"result1": "foo-default", "result2": None} - out = self.harness.run_action("log-and-results", {"foo": "baz", "bar": 28}) + out = harness.run_action("log-and-results", {"foo": "baz", "bar": 28}) assert out.results == {"result1": "baz", "result2": 28} - def test_bad_results(self): - # We can't have results that collide when flattened. - self._action_results.clear() - self._action_results["a"] = {"b": 1} - self._action_results["a.b"] = 2 + @pytest.mark.parametrize('prohibited_key', [ + "stdout", "stdout-encoding", "stderr", "stderr-encoding" + ]) + def test_bad_results( + self, + action_results: typing.Dict[str, typing.Any], + harness: ops.testing.Harness[ops.CharmBase], + prohibited_key: str, + ): + action_results["a"] = {"b": 1} + action_results["a.b"] = 2 with pytest.raises(ValueError): - self.harness.run_action("results") + harness.run_action("results") + # There are some result key names we cannot use. - prohibited_keys = "stdout", "stdout-encoding", "stderr", "stderr-encoding" - for key in prohibited_keys: - self._action_results.clear() - self._action_results[key] = "foo" - with pytest.raises(ops.ModelError): - self.harness.run_action("results") + action_results.clear() + action_results[prohibited_key] = "foo" + with pytest.raises(ops.ModelError): + harness.run_action("results") + # There are some additional rules around what result keys are valid. - self._action_results.clear() - self._action_results["A"] = "foo" + action_results.clear() + action_results["A"] = "foo" with pytest.raises(ValueError): - self.harness.run_action("results") + harness.run_action("results") -class TestNotify(unittest.TestCase): - def test_notify_basics(self): +class TestNotify: + def test_notify_basics(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ContainerEventCharm, meta=""" name: notifier containers: @@ -5722,7 +5868,7 @@ def test_notify_basics(self): bar: resource: foo-image """) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_container_events('foo') harness.charm.observe_container_events('bar') @@ -5771,7 +5917,7 @@ def test_notify_basics(self): }] assert harness.charm.changes == expected_changes - def test_notify_no_repeat(self): + def test_notify_no_repeat(self, request: pytest.FixtureRequest): """Ensure event doesn't get triggered when notice occurs but doesn't repeat.""" harness = ops.testing.Harness(ContainerEventCharm, meta=""" name: notifier @@ -5779,7 +5925,7 @@ def test_notify_no_repeat(self): foo: resource: foo-image """) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_container_events('foo') @@ -5799,7 +5945,7 @@ def test_notify_no_repeat(self): }] assert harness.charm.changes == expected_changes - def test_notify_no_begin(self): + def test_notify_no_begin(self, request: pytest.FixtureRequest): num_notices = 0 class TestCharm(ops.CharmBase): @@ -5818,7 +5964,7 @@ def _on_pebble_custom_notice(self, event: ops.PebbleCustomNoticeEvent): c1: resource: c1-image """) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) id = harness.pebble_notify('c1', 'example.com/n1') @@ -5828,16 +5974,7 @@ def _on_pebble_custom_notice(self, event: ops.PebbleCustomNoticeEvent): class PebbleNoticesMixin: - client: ops.pebble.Client - - assertEqual = unittest.TestCase.assertEqual # noqa - assertIsNone = unittest.TestCase.assertIsNone # noqa - assertLess = unittest.TestCase.assertLess # noqa - assertRaises = unittest.TestCase.assertRaises # noqa - assertGreaterEqual = unittest.TestCase.assertGreaterEqual # noqa - - def test_get_notice_by_id(self): - client = self.client + def test_get_notice_by_id(self, client: PebbleClientType): key1 = 'example.com/' + os.urandom(16).hex() key2 = 'example.com/' + os.urandom(16).hex() id1 = client.notify(pebble.NoticeType.CUSTOM, key1) @@ -5868,9 +6005,7 @@ def test_get_notice_by_id(self): assert notice.repeat_after is None assert notice.expire_after == datetime.timedelta(days=7) - def test_get_notices(self): - client = self.client - + def test_get_notices(self, client: PebbleClientType): key1 = 'example.com/' + os.urandom(16).hex() key2 = 'example.com/' + os.urandom(16).hex() key3 = 'example.com/' + os.urandom(16).hex() @@ -5903,13 +6038,23 @@ def test_get_notices(self): assert notices[0].last_repeated < notices[1].last_repeated -class TestNotices(unittest.TestCase, _TestingPebbleClientMixin, PebbleNoticesMixin): - def setUp(self): - self.client = self.get_testing_client() +class TestNotices(PebbleNoticesMixin): + @pytest.fixture + def client(self): + harness = ops.testing.Harness(ops.CharmBase, meta=''' + name: test-app + containers: + mycontainer: {} + ''') + backend = harness._backend + client = backend.get_pebble('/charm/containers/mycontainer/pebble.socket') + harness.set_can_connect('mycontainer', True) + yield client + harness.cleanup() -class TestCloudSpec(unittest.TestCase): - def test_set_cloud_spec(self): +class TestCloudSpec: + def test_set_cloud_spec(self, request: pytest.FixtureRequest): class TestCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -5919,7 +6064,7 @@ def _on_start(self, event: ops.StartEvent): self.cloud_spec = self.model.get_cloud_spec() harness = ops.testing.Harness(TestCharm) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) cloud_spec_dict = { 'name': 'localhost', 'type': 'lxd', @@ -5938,9 +6083,9 @@ def _on_start(self, event: ops.StartEvent): harness.charm.on.start.emit() assert harness.charm.cloud_spec == ops.CloudSpec.from_dict(cloud_spec_dict) - def test_get_cloud_spec_without_set_error(self): + def test_get_cloud_spec_without_set_error(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase) - self.addCleanup(harness.cleanup) + request.addfinalizer(harness.cleanup) harness.begin() with pytest.raises(ops.ModelError): harness.model.get_cloud_spec() From d8148ab41869f6119683cf42b7e6e574f1a99bf1 Mon Sep 17 00:00:00 2001 From: Tiexin Guo Date: Fri, 24 May 2024 14:19:48 +0800 Subject: [PATCH 4/4] refactor: use ruff formatter (#1224) Use ruff formatter in tox, and format the code base. --- ops/__init__.py | 4 - ops/_private/timeconv.py | 8 +- ops/charm.py | 243 ++- ops/framework.py | 171 +- ops/jujuversion.py | 10 +- ops/lib/__init__.py | 79 +- ops/log.py | 20 +- ops/main.py | 129 +- ops/model.py | 742 ++++--- ops/pebble.py | 860 ++++---- ops/storage.py | 75 +- ops/testing.py | 930 ++++---- pyproject.toml | 5 + test/charms/test_main/src/charm.py | 68 +- test/charms/test_smoke/src/charm.py | 2 +- test/fake_pebble.py | 109 +- test/pebble_cli.py | 84 +- test/smoke/test_smoke.py | 7 +- test/test_charm.py | 222 +- test/test_framework.py | 957 ++++---- test/test_helpers.py | 38 +- test/test_infra.py | 21 +- test/test_jujuversion.py | 164 +- test/test_lib.py | 300 ++- test/test_log.py | 44 +- test/test_main.py | 713 +++--- test/test_model.py | 1480 +++++++------ test/test_pebble.py | 1222 ++++++----- test/test_real_pebble.py | 92 +- test/test_storage.py | 77 +- test/test_testing.py | 3156 ++++++++++++++++----------- test/test_timeconv.py | 200 +- tox.ini | 8 +- 33 files changed, 6906 insertions(+), 5334 deletions(-) diff --git a/ops/__init__.py b/ops/__init__.py index 60de7ad4a..c74ba25b8 100644 --- a/ops/__init__.py +++ b/ops/__init__.py @@ -45,7 +45,6 @@ '__version__', 'main', 'pebble', - # From charm.py 'ActionEvent', 'ActionMeta', @@ -95,7 +94,6 @@ 'UpdateStatusEvent', 'UpgradeCharmEvent', 'WorkloadEvent', - # From framework.py 'BoundEvent', 'BoundStoredState', @@ -118,10 +116,8 @@ 'StoredSet', 'StoredState', 'StoredStateData', - # From jujuversion.py 'JujuVersion', - # From model.py 'ActiveStatus', 'Application', diff --git a/ops/_private/timeconv.py b/ops/_private/timeconv.py index ceb67f32a..7a6c86421 100644 --- a/ops/_private/timeconv.py +++ b/ops/_private/timeconv.py @@ -19,8 +19,7 @@ from typing import Union # Matches yyyy-mm-ddTHH:MM:SS(.sss)ZZZ -_TIMESTAMP_RE = re.compile( - r'(\d{4})-(\d{2})-(\d{2})[Tt](\d{2}):(\d{2}):(\d{2})(\.\d+)?(.*)') +_TIMESTAMP_RE = re.compile(r'(\d{4})-(\d{2})-(\d{2})[Tt](\d{2}):(\d{2}):(\d{2})(\.\d+)?(.*)') # Matches [-+]HH:MM _TIMEOFFSET_RE = re.compile(r'([-+])(\d{2}):(\d{2})') @@ -59,8 +58,9 @@ def parse_rfc3339(s: str) -> datetime.datetime: # standard library behaviour. microsecond = min(microsecond, 999999) - return datetime.datetime(int(y), int(m), int(d), int(hh), int(mm), int(ss), - microsecond=microsecond, tzinfo=tz) + return datetime.datetime( + int(y), int(m), int(d), int(hh), int(mm), int(ss), microsecond=microsecond, tzinfo=tz + ) def parse_duration(s: str) -> datetime.timedelta: diff --git a/ops/charm.py b/ops/charm.py index f4369e5d2..e083bd43b 100644 --- a/ops/charm.py +++ b/ops/charm.py @@ -51,36 +51,34 @@ _Scopes = Literal['global', 'container'] _RelationMetaDict = TypedDict( - '_RelationMetaDict', { - 'interface': Required[str], - 'limit': int, - 'optional': bool, - 'scope': _Scopes}, - total=False) + '_RelationMetaDict', + {'interface': Required[str], 'limit': int, 'optional': bool, 'scope': _Scopes}, + total=False, + ) _MultipleRange = TypedDict('_MultipleRange', {'range': str}) - _StorageMetaDict = TypedDict('_StorageMetaDict', { - 'type': Required[str], - 'description': str, - 'shared': bool, - 'read-only': bool, - 'minimum-size': str, - 'location': str, - 'multiple-range': str, - 'multiple': _MultipleRange - }, total=False) + _StorageMetaDict = TypedDict( + '_StorageMetaDict', + { + 'type': Required[str], + 'description': str, + 'shared': bool, + 'read-only': bool, + 'minimum-size': str, + 'location': str, + 'multiple-range': str, + 'multiple': _MultipleRange, + }, + total=False, + ) _ResourceMetaDict = TypedDict( - '_ResourceMetaDict', { - 'type': Required[str], - 'filename': str, - 'description': str}, - total=False) + '_ResourceMetaDict', + {'type': Required[str], 'filename': str, 'description': str}, + total=False, + ) - _MountDict = TypedDict( - '_MountDict', {'storage': Required[str], - 'location': str}, - total=False) + _MountDict = TypedDict('_MountDict', {'storage': Required[str], 'location': str}, total=False) class _ContainerBaseDict(TypedDict): @@ -125,7 +123,7 @@ class ActionEvent(EventBase): :meth:`log`. """ - id: str = "" + id: str = '' """The Juju ID of the action invocation.""" params: Dict[str, Any] @@ -407,8 +405,9 @@ class CollectMetricsEvent(HookEvent): how they can interact with Juju. """ - def add_metrics(self, metrics: Mapping[str, Union[int, float]], - labels: Optional[Mapping[str, str]] = None): + def add_metrics( + self, metrics: Mapping[str, Union[int, float]], labels: Optional[Mapping[str, str]] = None + ): """Record metrics that have been gathered by the charm for this unit. Args: @@ -445,14 +444,19 @@ class RelationEvent(HookEvent): :class:`Application `-level event. """ - def __init__(self, handle: 'Handle', relation: 'model.Relation', - app: Optional[model.Application] = None, - unit: Optional[model.Unit] = None): + def __init__( + self, + handle: 'Handle', + relation: 'model.Relation', + app: Optional[model.Application] = None, + unit: Optional[model.Unit] = None, + ): super().__init__(handle) if unit is not None and unit.app != app: raise RuntimeError( - f'cannot create RelationEvent with application {app} and unit {unit}') + f'cannot create RelationEvent with application {app} and unit {unit}' + ) self.relation = relation if app is None: @@ -484,11 +488,14 @@ def restore(self, snapshot: Dict[str, Any]): Not meant to be called by charm code. """ relation = self.framework.model.get_relation( - snapshot['relation_name'], snapshot['relation_id']) + snapshot['relation_name'], snapshot['relation_id'] + ) if relation is None: raise ValueError( 'Unable to restore {}: relation {} (id={}) not found.'.format( - self, snapshot['relation_name'], snapshot['relation_id'])) + self, snapshot['relation_name'], snapshot['relation_id'] + ) + ) self.relation = relation app_name = snapshot.get('app_name') @@ -513,6 +520,7 @@ class RelationCreatedEvent(RelationEvent): relations will trigger `RelationCreatedEvent` before :class:`StartEvent` is emitted. """ + unit: None # pyright: ignore[reportIncompatibleVariableOverride] """Always ``None``.""" @@ -528,6 +536,7 @@ class RelationJoinedEvent(RelationEvent): remote ``private-address`` setting, which is always available when the relation is created and is by convention not deleted. """ + unit: model.Unit # pyright: ignore[reportIncompatibleVariableOverride] """The remote unit that has triggered this event.""" @@ -570,13 +579,18 @@ class RelationDepartedEvent(RelationEvent): Once all callback methods bound to this event have been run for such a relation, the unit agent will fire the :class:`RelationBrokenEvent`. """ + unit: model.Unit # pyright: ignore[reportIncompatibleVariableOverride] """The remote unit that has triggered this event.""" - def __init__(self, handle: 'Handle', relation: 'model.Relation', - app: Optional[model.Application] = None, - unit: Optional[model.Unit] = None, - departing_unit_name: Optional[str] = None): + def __init__( + self, + handle: 'Handle', + relation: 'model.Relation', + app: Optional[model.Application] = None, + unit: Optional[model.Unit] = None, + departing_unit_name: Optional[str] = None, + ): super().__init__(handle, relation, app=app, unit=unit) self._departing_unit_name = departing_unit_name @@ -627,6 +641,7 @@ class RelationBrokenEvent(RelationEvent): bound to this event is being executed, it is guaranteed that no remote units are currently known locally. """ + unit: None # pyright: ignore[reportIncompatibleVariableOverride] """Always ``None``.""" @@ -654,9 +669,9 @@ def snapshot(self) -> Dict[str, Any]: """ snapshot: Dict[str, Any] = {} if isinstance(self.storage, model.Storage): - snapshot["storage_name"] = self.storage.name - snapshot["storage_index"] = self.storage.index - snapshot["storage_location"] = str(self.storage.location) + snapshot['storage_name'] = self.storage.name + snapshot['storage_index'] = self.storage.index + snapshot['storage_location'] = str(self.storage.location) return snapshot def restore(self, snapshot: Dict[str, Any]): @@ -664,15 +679,13 @@ def restore(self, snapshot: Dict[str, Any]): Not meant to be called by charm code. """ - storage_name = snapshot.get("storage_name") - storage_index = snapshot.get("storage_index") - storage_location = snapshot.get("storage_location") + storage_name = snapshot.get('storage_name') + storage_index = snapshot.get('storage_index') + storage_location = snapshot.get('storage_location') if storage_name and storage_index is not None: storages = self.framework.model.storages[storage_name] - self.storage = next( - (s for s in storages if s.index == storage_index), - None) # type: ignore + self.storage = next((s for s in storages if s.index == storage_index), None) # type: ignore if self.storage is None: raise RuntimeError( f'failed loading storage (name={storage_name!r}, ' @@ -681,7 +694,8 @@ def restore(self, snapshot: Dict[str, Any]): if storage_location is None: raise RuntimeError( 'failed loading storage location from snapshot.' - f'(name={storage_name!r}, index={storage_index!r}, storage_location=None)') + f'(name={storage_name!r}, index={storage_index!r}, storage_location=None)' + ) self.storage.location = storage_location @@ -777,8 +791,14 @@ class PebbleNoticeEvent(WorkloadEvent): notice: model.LazyNotice """Provide access to the event notice's details.""" - def __init__(self, handle: 'Handle', workload: 'model.Container', - notice_id: str, notice_type: str, notice_key: str): + def __init__( + self, + handle: 'Handle', + workload: 'model.Container', + notice_id: str, + notice_type: str, + notice_key: str, + ): super().__init__(handle, workload) self.notice = model.LazyNotice(workload, notice_id, notice_type, notice_key) @@ -789,8 +809,9 @@ def snapshot(self) -> Dict[str, Any]: """ d = super().snapshot() d['notice_id'] = self.notice.id - d['notice_type'] = (self.notice.type if isinstance(self.notice.type, str) - else self.notice.type.value) + d['notice_type'] = ( + self.notice.type if isinstance(self.notice.type, str) else self.notice.type.value + ) d['notice_key'] = self.notice.key return d @@ -869,7 +890,8 @@ def defer(self) -> NoReturn: """ raise RuntimeError( 'Cannot defer secret rotation events. Juju will keep firing this ' - 'event until you create a new revision.') + 'event until you create a new revision.' + ) class SecretRemoveEvent(SecretEvent): @@ -953,7 +975,8 @@ def defer(self) -> NoReturn: """ raise RuntimeError( 'Cannot defer secret expiration events. Juju will keep firing ' - 'this event until you create a new revision.') + 'this event until you create a new revision.' + ) class CollectStatusEvent(LifecycleEvent): @@ -1172,25 +1195,25 @@ def __init__(self, framework: Framework): for relation_name in self.framework.meta.relations: relation_name = relation_name.replace('-', '_') - self.on.define_event(f"{relation_name}_relation_created", RelationCreatedEvent) - self.on.define_event(f"{relation_name}_relation_joined", RelationJoinedEvent) - self.on.define_event(f"{relation_name}_relation_changed", RelationChangedEvent) - self.on.define_event(f"{relation_name}_relation_departed", RelationDepartedEvent) - self.on.define_event(f"{relation_name}_relation_broken", RelationBrokenEvent) + self.on.define_event(f'{relation_name}_relation_created', RelationCreatedEvent) + self.on.define_event(f'{relation_name}_relation_joined', RelationJoinedEvent) + self.on.define_event(f'{relation_name}_relation_changed', RelationChangedEvent) + self.on.define_event(f'{relation_name}_relation_departed', RelationDepartedEvent) + self.on.define_event(f'{relation_name}_relation_broken', RelationBrokenEvent) for storage_name in self.framework.meta.storages: storage_name = storage_name.replace('-', '_') - self.on.define_event(f"{storage_name}_storage_attached", StorageAttachedEvent) - self.on.define_event(f"{storage_name}_storage_detaching", StorageDetachingEvent) + self.on.define_event(f'{storage_name}_storage_attached', StorageAttachedEvent) + self.on.define_event(f'{storage_name}_storage_detaching', StorageDetachingEvent) for action_name in self.framework.meta.actions: action_name = action_name.replace('-', '_') - self.on.define_event(f"{action_name}_action", ActionEvent) + self.on.define_event(f'{action_name}_action', ActionEvent) for container_name in self.framework.meta.containers: container_name = container_name.replace('-', '_') - self.on.define_event(f"{container_name}_pebble_ready", PebbleReadyEvent) - self.on.define_event(f"{container_name}_pebble_custom_notice", PebbleCustomNoticeEvent) + self.on.define_event(f'{container_name}_pebble_ready', PebbleReadyEvent) + self.on.define_event(f'{container_name}_pebble_custom_notice', PebbleCustomNoticeEvent) @property def app(self) -> model.Application: @@ -1319,8 +1342,9 @@ class is mostly for the framework to understand what the charm has defined. actions: Dict[str, 'ActionMeta'] """Actions the charm has defined.""" - def __init__(self, raw: Optional[Dict[str, Any]] = None, - actions_raw: Optional[Dict[str, Any]] = None): + def __init__( + self, raw: Optional[Dict[str, Any]] = None, actions_raw: Optional[Dict[str, Any]] = None + ): raw_: Dict[str, Any] = raw or {} actions_raw_: Dict[str, Any] = actions_raw or {} @@ -1356,39 +1380,50 @@ def __init__(self, raw: Optional[Dict[str, Any]] = None, # Note that metadata v2 does not define min-juju-version ('assumes' # should be used instead). self.min_juju_version = raw_.get('min-juju-version') - self.requires = {name: RelationMeta(RelationRole.requires, name, rel) - for name, rel in raw_.get('requires', {}).items()} - self.provides = {name: RelationMeta(RelationRole.provides, name, rel) - for name, rel in raw_.get('provides', {}).items()} - self.peers = {name: RelationMeta(RelationRole.peer, name, rel) - for name, rel in raw_.get('peers', {}).items()} + self.requires = { + name: RelationMeta(RelationRole.requires, name, rel) + for name, rel in raw_.get('requires', {}).items() + } + self.provides = { + name: RelationMeta(RelationRole.provides, name, rel) + for name, rel in raw_.get('provides', {}).items() + } + self.peers = { + name: RelationMeta(RelationRole.peer, name, rel) + for name, rel in raw_.get('peers', {}).items() + } self.relations: Dict[str, RelationMeta] = {} self.relations.update(self.requires) self.relations.update(self.provides) self.relations.update(self.peers) - self.storages = {name: StorageMeta(name, storage) - for name, storage in raw_.get('storage', {}).items()} - self.resources = {name: ResourceMeta(name, res) - for name, res in raw_.get('resources', {}).items()} - self.payloads = {name: PayloadMeta(name, payload) - for name, payload in raw_.get('payloads', {}).items()} + self.storages = { + name: StorageMeta(name, storage) for name, storage in raw_.get('storage', {}).items() + } + self.resources = { + name: ResourceMeta(name, res) for name, res in raw_.get('resources', {}).items() + } + self.payloads = { + name: PayloadMeta(name, payload) for name, payload in raw_.get('payloads', {}).items() + } self.extra_bindings = raw_.get('extra-bindings', {}) self.actions = {name: ActionMeta(name, action) for name, action in actions_raw_.items()} - self.containers = {name: ContainerMeta(name, container) - for name, container in raw_.get('containers', {}).items()} + self.containers = { + name: ContainerMeta(name, container) + for name, container in raw_.get('containers', {}).items() + } @staticmethod def from_charm_root(charm_root: Union[pathlib.Path, str]): """Initialise CharmMeta from the path to a charm repository root folder.""" _charm_root = pathlib.Path(charm_root) - metadata_path = _charm_root / "metadata.yaml" + metadata_path = _charm_root / 'metadata.yaml' with metadata_path.open() as f: meta = yaml.safe_load(f.read()) actions = None - actions_path = _charm_root / "actions.yaml" + actions_path = _charm_root / 'actions.yaml' if actions_path.exists(): with actions_path.open() as f: actions = yaml.safe_load(f.read()) @@ -1426,8 +1461,8 @@ def _load_links(self, raw: Dict[str, Any]): @classmethod def from_yaml( - cls, metadata: Union[str, TextIO], - actions: Optional[Union[str, TextIO]] = None) -> 'CharmMeta': + cls, metadata: Union[str, TextIO], actions: Optional[Union[str, TextIO]] = None + ) -> 'CharmMeta': """Instantiate a :class:`CharmMeta` from a YAML description of ``metadata.yaml``. Args: @@ -1453,6 +1488,7 @@ class RelationRole(enum.Enum): - A service consumer in the relation ('requires') - A service provider in the relation ('provides') """ + peer = 'peer' requires = 'requires' provides = 'provides' @@ -1502,8 +1538,9 @@ class RelationMeta: VALID_SCOPES = ['global', 'container'] def __init__(self, role: RelationRole, relation_name: str, raw: '_RelationMetaDict'): - assert isinstance(role, RelationRole), \ - f"role should be one of {list(RelationRole)!r}, not {role!r}" + assert isinstance( + role, RelationRole + ), f'role should be one of {list(RelationRole)!r}, not {role!r}' self._default_scope = self.VALID_SCOPES[0] self.role = role self.relation_name = relation_name @@ -1511,12 +1548,15 @@ def __init__(self, role: RelationRole, relation_name: str, raw: '_RelationMetaDi self.limit = limit = raw.get('limit', None) if limit is not None and not isinstance(limit, int): # type: ignore - raise TypeError(f"limit should be an int, not {type(limit)}") + raise TypeError(f'limit should be an int, not {type(limit)}') self.scope = raw.get('scope') or self._default_scope if self.scope not in self.VALID_SCOPES: - raise TypeError("scope should be one of {}; not '{}'".format( - ', '.join(f"'{s}'" for s in self.VALID_SCOPES), self.scope)) + raise TypeError( + "scope should be one of {}; not '{}'".format( + ', '.join(f"'{s}'" for s in self.VALID_SCOPES), self.scope + ) + ) self.optional = raw.get('optional', False) @@ -1650,18 +1690,23 @@ class JujuAssumes: condition: JujuAssumesCondition = JujuAssumesCondition.ALL @classmethod - def from_list(cls, raw: List[Any], - condition: JujuAssumesCondition = JujuAssumesCondition.ALL, - ) -> 'JujuAssumes': + def from_list( + cls, + raw: List[Any], + condition: JujuAssumesCondition = JujuAssumesCondition.ALL, + ) -> 'JujuAssumes': """Create new JujuAssumes object from list parsed from YAML.""" - features: List[Union[str, 'JujuAssumes']] = [] + features: List[Union[str, JujuAssumes]] = [] for feature in raw: if isinstance(feature, str): features.append(feature) else: for nested_condition, nested_features in feature.items(): - features.append(JujuAssumes.from_list( - nested_features, JujuAssumesCondition(nested_condition))) + features.append( + JujuAssumes.from_list( + nested_features, JujuAssumesCondition(nested_condition) + ) + ) return cls(features=features, condition=condition) @@ -1770,8 +1815,8 @@ def _populate_mounts(self, mounts: List['_MountDict']): under each key. """ for mount in mounts: - storage = mount.get("storage", "") - mount = mount.get("location", "") + storage = mount.get('storage', '') + mount = mount.get('location', '') if not mount: continue @@ -1819,6 +1864,6 @@ def location(self) -> str: if len(self._locations) == 1: return self._locations[0] raise RuntimeError( - "container has more than one mount point with the same backing storage. " - "Request .locations to see a list" + 'container has more than one mount point with the same backing storage. ' + 'Request .locations to see a list' ) diff --git a/ops/framework.py b/ops/framework.py index 5ca8674d4..1b71d68f5 100644 --- a/ops/framework.py +++ b/ops/framework.py @@ -79,9 +79,9 @@ class _StoredObject(Protocol): _PathToObjectMapping = Dict[_Path, 'Object'] _PathToSerializableMapping = Dict[_Path, Serializable] -_T = TypeVar("_T") +_T = TypeVar('_T') _EventType = TypeVar('_EventType', bound='EventBase') -_ObjectType = TypeVar("_ObjectType", bound="Object") +_ObjectType = TypeVar('_ObjectType', bound='Object') logger = logging.getLogger(__name__) @@ -109,14 +109,14 @@ def __init__(self, parent: Optional[Union['Handle', 'Object']], kind: str, key: self._key = key if parent: if key: - self._path = f"{parent}/{kind}[{key}]" + self._path = f'{parent}/{kind}[{key}]' else: - self._path = f"{parent}/{kind}" + self._path = f'{parent}/{kind}' else: if key: - self._path = f"{kind}[{key}]" + self._path = f'{kind}[{key}]' else: - self._path = f"{kind}" # don't need f-string, but consistent with above + self._path = f'{kind}' # don't need f-string, but consistent with above def nest(self, kind: str, key: Optional[str]) -> 'Handle': """Create a new handle as child of the current one.""" @@ -155,8 +155,8 @@ def path(self) -> str: def from_path(cls, path: str) -> 'Handle': """Build a handle from the indicated path.""" handle = None - for pair in path.split("/"): - pair = pair.split("[") + for pair in path.split('/'): + pair = pair.split('[') good = False if len(pair) == 1: kind, key = pair[0], None @@ -167,7 +167,7 @@ def from_path(cls, path: str) -> 'Handle': key = key[:-1] good = True if not good: - raise RuntimeError(f"attempted to restore invalid handle path {path}") + raise RuntimeError(f'attempted to restore invalid handle path {path}') handle = Handle(handle, kind, key) # type: ignore return typing.cast(Handle, handle) @@ -191,7 +191,7 @@ def __init__(self, handle: Handle): self.deferred: bool = False def __repr__(self): - return f"<{self.__class__.__name__} via {self.handle}>" + return f'<{self.__class__.__name__} via {self.handle}>' def defer(self) -> None: """Defer the event to the future. @@ -241,7 +241,7 @@ def defer(self) -> None: 3. At some future time, event C happens, which also checks if A can proceed. """ - logger.debug("Deferring %s.", self) + logger.debug('Deferring %s.', self) self.deferred = True def snapshot(self) -> Dict[str, Any]: @@ -278,7 +278,8 @@ class SomeObject(Object): def __init__(self, event_type: 'Type[EventBase]'): if not isinstance(event_type, type) or not issubclass(event_type, EventBase): raise RuntimeError( - f'Event requires a subclass of EventBase as an argument, got {event_type}') + f'Event requires a subclass of EventBase as an argument, got {event_type}' + ) self.event_type: Type[EventBase] = event_type self.event_kind: Optional[str] = None self.emitter_type: Optional[Type[Object]] = None @@ -293,13 +294,12 @@ def __set_name__(self, emitter_type: 'Type[Object]', event_kind: str): self.event_kind, emitter_type.__name__, event_kind, - )) + ) + ) self.event_kind = event_kind self.emitter_type = emitter_type - def __get__(self, emitter: Optional['Object'], - emitter_type: 'Type[Object]' - ) -> 'BoundEvent': + def __get__(self, emitter: Optional['Object'], emitter_type: 'Type[Object]') -> 'BoundEvent': if emitter is None: return self # type: ignore # Framework might not be available if accessed as CharmClass.on.event @@ -320,9 +320,7 @@ def __repr__(self): f'{type(self.emitter).__name__}.{self.event_kind} at {hex(id(self))}>' ) - def __init__(self, emitter: 'Object', - event_type: 'Type[EventBase]', - event_kind: str): + def __init__(self, emitter: 'Object', event_type: 'Type[EventBase]', event_kind: str): self.emitter = emitter self.event_type = event_type self.event_kind = event_kind @@ -358,7 +356,7 @@ class HandleKind: """ def __get__(self, obj: 'Object', obj_type: 'Type[Object]') -> str: - kind = typing.cast(str, obj_type.__dict__.get("handle_kind")) + kind = typing.cast(str, obj_type.__dict__.get('handle_kind')) if kind: return kind return obj_type.__name__ @@ -383,6 +381,7 @@ class Object: been created. """ + handle_kind: str = HandleKind() # type: ignore if TYPE_CHECKING: @@ -418,13 +417,12 @@ def model(self) -> 'Model': class ObjectEvents(Object): """Convenience type to allow defining ``.on`` attributes at class level.""" - handle_kind = "on" + handle_kind = 'on' def __init__(self, parent: Optional[Object] = None, key: Optional[str] = None): if parent is not None: super().__init__(parent, key) - self._cache: weakref.WeakKeyDictionary[Object, 'ObjectEvents'] = \ - weakref.WeakKeyDictionary() + self._cache: weakref.WeakKeyDictionary[Object, ObjectEvents] = weakref.WeakKeyDictionary() def __get__(self, emitter: Object, emitter_type: 'Type[Object]'): if emitter is None: @@ -462,13 +460,14 @@ def define_event(cls, event_kind: str, event_type: 'Type[EventBase]'): """ prefix = 'unable to define an event with event_kind that ' if not event_kind.isidentifier(): - raise RuntimeError(f"{prefix}is not a valid python identifier: {event_kind}") + raise RuntimeError(f'{prefix}is not a valid python identifier: {event_kind}') elif keyword.iskeyword(event_kind): - raise RuntimeError(f"{prefix}is a python keyword: {event_kind}") + raise RuntimeError(f'{prefix}is a python keyword: {event_kind}') try: getattr(cls, event_kind) raise RuntimeError( - f"{prefix}overlaps with an existing type {cls} attribute: {event_kind}") + f'{prefix}overlaps with an existing type {cls} attribute: {event_kind}' + ) except AttributeError: pass @@ -559,7 +558,7 @@ def __init__(self, handle_path: str): self.handle_path = handle_path def __str__(self): - return f"cannot restore {self.handle_path} since no class was registered for it" + return f'cannot restore {self.handle_path} since no class was registered for it' # the message to show to the user when a pdb breakpoint goes active @@ -594,13 +593,18 @@ class Framework(Object): # to help the type checker and IDEs: if TYPE_CHECKING: + @property def on(self) -> 'FrameworkEvents': ... # noqa - def __init__(self, storage: Union[SQLiteStorage, JujuStorage], - charm_dir: Union[str, pathlib.Path], - meta: 'charm.CharmMeta', model: 'Model', - event_name: Optional[str] = None): + def __init__( + self, + storage: Union[SQLiteStorage, JujuStorage], + charm_dir: Union[str, pathlib.Path], + meta: 'charm.CharmMeta', + model: 'Model', + event_name: Optional[str] = None, + ): super().__init__(self, None) # an old, deprecated __init__ interface accepted an Optional charm_dir, @@ -630,11 +634,10 @@ def __init__(self, storage: Union[SQLiteStorage, JujuStorage], self._type_known: Set[Type[Serializable]] = set() if isinstance(storage, (str, pathlib.Path)): - logger.warning( - "deprecated: Framework now takes a Storage not a path") + logger.warning('deprecated: Framework now takes a Storage not a path') storage = SQLiteStorage(storage) # TODO(benhoyt): should probably have a Storage protocol - self._storage: 'SQLiteStorage' = storage # type: ignore + self._storage: SQLiteStorage = storage # type: ignore # We can't use the higher-level StoredState because it relies on events. self.register_type(StoredStateData, None, StoredStateData.handle_kind) @@ -684,8 +687,7 @@ def _track(self, obj: 'Serializable'): # Framework objects don't track themselves return if obj.handle.path in self.framework._objects: - raise RuntimeError( - f'two objects claiming to be {obj.handle.path} have been created') + raise RuntimeError(f'two objects claiming to be {obj.handle.path} have been created') self._objects[obj.handle.path] = obj def _forget(self, obj: 'Serializable'): @@ -703,8 +705,12 @@ def commit(self) -> None: self.save_snapshot(self._stored) self._storage.commit() - def register_type(self, cls: Type[Serializable], parent: Optional[Union['Handle', 'Object']], - kind: Optional[str] = None): + def register_type( + self, + cls: Type[Serializable], + parent: Optional[Union['Handle', 'Object']], + kind: Optional[str] = None, + ): """Register a type to a handle.""" parent_path: Optional[str] = None if isinstance(parent, Object): @@ -716,11 +722,12 @@ def register_type(self, cls: Type[Serializable], parent: Optional[Union['Handle' self._type_registry[(parent_path, kind_)] = cls self._type_known.add(cls) - def save_snapshot(self, value: Union["StoredStateData", "EventBase"]): + def save_snapshot(self, value: Union['StoredStateData', 'EventBase']): """Save a persistent snapshot of the provided value.""" if type(value) not in self._type_known: raise RuntimeError( - f'cannot save {type(value).__name__} values before registering that type') + f'cannot save {type(value).__name__} values before registering that type' + ) data = value.snapshot() # Use marshal as a validator, enforcing the use of simple types, as we later the @@ -731,7 +738,7 @@ def save_snapshot(self, value: Union["StoredStateData", "EventBase"]): try: marshal.dumps(data) except ValueError: - msg = "unable to save the data for {}, it must contain only simple types: {!r}" + msg = 'unable to save the data for {}, it must contain only simple types: {!r}' raise ValueError(msg.format(value.__class__.__name__, data)) from None self._storage.save_snapshot(value.handle.path, data) @@ -778,10 +785,12 @@ class SomeObject: """ if not isinstance(bound_event, BoundEvent): raise TypeError( - f'Framework.observe requires a BoundEvent as second parameter, got {bound_event}') + f'Framework.observe requires a BoundEvent as second parameter, got {bound_event}' + ) if not isinstance(observer, types.MethodType): raise TypeError( - f"Framework.observe requires a method as the 'observer' parameter, got {observer}") + f"Framework.observe requires a method as the 'observer' parameter, got {observer}" + ) event_type = bound_event.event_type event_kind = bound_event.event_kind @@ -789,16 +798,18 @@ class SomeObject: self.register_type(event_type, emitter, event_kind) # type: ignore - if hasattr(emitter, "handle"): + if hasattr(emitter, 'handle'): emitter_path = emitter.handle.path else: raise TypeError( - f'event emitter {type(emitter).__name__} must have a "handle" attribute') + f'event emitter {type(emitter).__name__} must have a "handle" attribute' + ) method_name = observer.__name__ - assert isinstance(observer.__self__, Object), "can't register observers " \ - "that aren't `Object`s" + assert isinstance(observer.__self__, Object), ( + "can't register observers " "that aren't `Object`s" + ) observer_obj = observer.__self__ # Validate that the method has an acceptable call signature. @@ -808,13 +819,13 @@ class SomeObject: except TypeError as e: raise TypeError( f'{type(observer_obj).__name__}.{method_name} must be callable with ' - "only 'self' and the 'event'") from e + "only 'self' and the 'event'" + ) from e # TODO Prevent the exact same parameters from being registered more than once. self._observer[observer_obj.handle.path] = observer_obj - self._observers.append((observer_obj.handle.path, - method_name, emitter_path, event_kind)) + self._observers.append((observer_obj.handle.path, method_name, emitter_path, event_kind)) def _next_event_key(self) -> str: """Return the next event key that should be used, incrementing the internal counter.""" @@ -829,7 +840,7 @@ def _emit(self, event: EventBase): event_path = event.handle.path event_kind = event.handle.kind parent = event.handle.parent - assert isinstance(parent, Handle), "event handle must have a parent" + assert isinstance(parent, Handle), 'event handle must have a parent' parent_path = parent.path # TODO Track observers by (parent_path, event_kind) rather than as a list of # all observers. Avoiding linear search through all observers for every event @@ -912,7 +923,7 @@ def _reemit(self, single_event_path: Optional[str] = None): if observer: if single_event_path is None: - logger.debug("Re-emitting deferred event %s.", event) + logger.debug('Re-emitting deferred event %s.', event) elif isinstance(event, LifecycleEvent): # Ignore Lifecycle events: they are "private" and not interesting. pass @@ -921,7 +932,7 @@ def _reemit(self, single_event_path: Optional[str] = None): # dispatched, and it also is not an event we have deferred, # and is also not a lifecycle (framework-emitted) event, # it must be a custom event - logger.debug("Emitting custom event %s.", event) + logger.debug('Emitting custom event %s.', event) custom_handler = getattr(observer, method_name, None) if custom_handler: @@ -940,9 +951,9 @@ def _reemit(self, single_event_path: Optional[str] = None): else: logger.warning( - f"Reference to ops.Object at path {observer_path} has been garbage collected " - "between when the charm was initialised and when the event was emitted. " - "Make sure sure you store a reference to the observer." + f'Reference to ops.Object at path {observer_path} has been garbage collected ' + 'between when the charm was initialised and when the event was emitted. ' + 'Make sure sure you store a reference to the observer.' ) if event.deferred: @@ -999,8 +1010,10 @@ def breakpoint(self, name: Optional[str] = None): pdb.Pdb().set_trace(code_frame) else: logger.warning( - "Breakpoint %r skipped (not found in the requested breakpoints: %s)", - name, indicated_breakpoints) + 'Breakpoint %r skipped (not found in the requested breakpoints: %s)', + name, + indicated_breakpoints, + ) def remove_unreferenced_events(self) -> None: """Remove events from storage that are not referenced. @@ -1057,6 +1070,7 @@ def on_commit(self, event: EventBase) -> None: class BoundStoredState: """Stored state data bound to a specific Object.""" + if TYPE_CHECKING: # to help the type checker and IDEs: @property @@ -1075,8 +1089,8 @@ def __init__(self, parent: Object, attr_name: str): data = StoredStateData(parent, attr_name) # __dict__ is used to avoid infinite recursion. - self.__dict__["_data"] = data - self.__dict__["_attr_name"] = attr_name + self.__dict__['_data'] = data + self.__dict__['_attr_name'] = attr_name parent.framework.observe(parent.framework.on.commit, self._data.on_commit) # type: ignore @@ -1090,14 +1104,14 @@ def __getattr__(self, key: str) -> Any: def __getattr__(self, key: str) -> Any: # "on" is the only reserved key that can't be used in the data map. - if key == "on": + if key == 'on': return self._data.on if key not in self._data: raise AttributeError(f"attribute '{key}' is not stored") return _wrap_stored(self._data, self._data[key]) def __setattr__(self, key: str, value: Any): - if key == "on": + if key == 'on': raise AttributeError("attribute 'on' is reserved and cannot be set") unwrapped = _unwrap_stored(self._data, value) @@ -1105,7 +1119,8 @@ def __setattr__(self, key: str, value: Any): if not isinstance(unwrapped, (type(None), int, float, str, bytes, list, dict, set)): raise AttributeError( f'attribute {key!r} cannot be a {type(unwrapped).__name__}: ' - 'must be int/float/dict/list/etc') + 'must be int/float/dict/list/etc' + ) self._data[key] = unwrapped @@ -1146,28 +1161,22 @@ def __init__(self): self.attr_name: Optional[str] = None @typing.overload - def __get__( - self, - parent: Literal[None], - parent_type: 'Type[_ObjectType]') -> 'StoredState': + def __get__(self, parent: Literal[None], parent_type: 'Type[_ObjectType]') -> 'StoredState': pass @typing.overload - def __get__( - self, - parent: '_ObjectType', - parent_type: 'Type[_ObjectType]') -> BoundStoredState: + def __get__(self, parent: '_ObjectType', parent_type: 'Type[_ObjectType]') -> BoundStoredState: pass - def __get__(self, - parent: Optional['_ObjectType'], - parent_type: 'Type[_ObjectType]') -> Union['StoredState', - BoundStoredState]: + def __get__( + self, parent: Optional['_ObjectType'], parent_type: 'Type[_ObjectType]' + ) -> Union['StoredState', BoundStoredState]: if self.parent_type is not None and self.parent_type not in parent_type.mro(): # the StoredState instance is being shared between two unrelated classes # -> unclear what is expected of us -> bail out raise RuntimeError( - f'StoredState shared by {self.parent_type.__name__} and {parent_type.__name__}') + f'StoredState shared by {self.parent_type.__name__} and {parent_type.__name__}' + ) if parent is None: # accessing via the class directly (e.g. MyClass.stored) @@ -1191,7 +1200,8 @@ def __get__(self, # attributes -> unclear what is expected of us -> bail out raise RuntimeError( f'StoredState shared by {cls.__name__}.{self.attr_name} and ' - f'{cls.__name__}.{attr_name}') + f'{cls.__name__}.{attr_name}' + ) # we've found ourselves for the first time; save where, and bind the object self.attr_name = attr_name self.parent_type = cls @@ -1208,7 +1218,8 @@ def __get__(self, return bound raise AttributeError( - f'cannot find {self.__class__.__name__} attribute in type {parent_type.__name__}') + f'cannot find {self.__class__.__name__} attribute in type {parent_type.__name__}' + ) def _wrap_stored(parent_data: StoredStateData, value: Any) -> Any: @@ -1230,9 +1241,9 @@ def _unwrap_stored(parent_data: StoredStateData, value: Any) -> Any: def _wrapped_repr(obj: '_StoredObject') -> str: t = type(obj) if obj._under: - return f"{t.__module__}.{t.__name__}({obj._under!r})" # type: ignore + return f'{t.__module__}.{t.__name__}({obj._under!r})' # type: ignore else: - return f"{t.__module__}.{t.__name__}()" + return f'{t.__module__}.{t.__name__}()' class StoredDict(typing.MutableMapping[Hashable, Any]): diff --git a/ops/jujuversion.py b/ops/jujuversion.py index ec701c660..8e25b3eba 100644 --- a/ops/jujuversion.py +++ b/ops/jujuversion.py @@ -30,11 +30,14 @@ class JujuVersion: operators. """ - _pattern_re = re.compile(r'''^ + _pattern_re = re.compile( + r"""^ (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or - (\.(?P\d{1,9}))?$ # and sometimes with a number. - ''', re.VERBOSE) + """, + re.VERBOSE, + ) def __init__(self, version: str): m = self._pattern_re.match(version) @@ -69,7 +72,8 @@ def __eq__(self, other: Union[str, 'JujuVersion']) -> bool: and self.minor == other.minor and self.tag == other.tag and self.build == other.build - and self.patch == other.patch) + and self.patch == other.patch + ) def __lt__(self, other: Union[str, 'JujuVersion']) -> bool: if self is other: diff --git a/ops/lib/__init__.py b/ops/lib/__init__.py index 0ea46c361..8e0af1bad 100644 --- a/ops/lib/__init__.py +++ b/ops/lib/__init__.py @@ -39,11 +39,11 @@ _libraries = None -_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''') -_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''') +_libline_re = re.compile(r"""^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])""") +_libname_re = re.compile(r"""^[a-z][a-z0-9]+$""") # Not perfect, but should do for now. -_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''') +_libauthor_re = re.compile(r"""^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$""") def use(name: str, api: int, author: str) -> ModuleType: @@ -64,20 +64,21 @@ def use(name: str, api: int, author: str) -> ModuleType: This function is deprecated. Prefer charm libraries instead (https://juju.is/docs/sdk/library). """ - warnings.warn("ops.lib is deprecated, prefer charm libraries instead", - category=DeprecationWarning) + warnings.warn( + 'ops.lib is deprecated, prefer charm libraries instead', category=DeprecationWarning + ) if not isinstance(name, str): - raise TypeError(f"invalid library name: {name!r} (must be a str)") + raise TypeError(f'invalid library name: {name!r} (must be a str)') if not isinstance(author, str): - raise TypeError(f"invalid library author: {author!r} (must be a str)") + raise TypeError(f'invalid library author: {author!r} (must be a str)') if not isinstance(api, int): - raise TypeError(f"invalid library API: {api!r} (must be an int)") + raise TypeError(f'invalid library API: {api!r} (must be an int)') if api < 0: raise ValueError(f'invalid library api: {api} (must be ≥0)') if not _libname_re.match(name): - raise ValueError(f"invalid library name: {name!r} (chars and digits only)") + raise ValueError(f'invalid library name: {name!r} (chars and digits only)') if not _libauthor_re.match(author): - raise ValueError(f"invalid library author email: {author!r}") + raise ValueError(f'invalid library author email: {author!r}') if _libraries is None: autoimport() @@ -107,8 +108,9 @@ def autoimport(): This function is deprecated. Prefer charm libraries instead (https://juju.is/docs/sdk/library). """ - warnings.warn("ops.lib is deprecated, prefer charm libraries instead", - category=DeprecationWarning) + warnings.warn( + 'ops.lib is deprecated, prefer charm libraries instead', category=DeprecationWarning + ) global _libraries _libraries = {} for spec in _find_all_specs(sys.path): @@ -123,8 +125,8 @@ def autoimport(): def _find_all_specs(path: typing.Iterable[str]) -> typing.Iterator[ModuleSpec]: for sys_dir in path: - if sys_dir == "": - sys_dir = "." + if sys_dir == '': + sys_dir = '.' try: top_dirs = os.listdir(sys_dir) except (FileNotFoundError, NotADirectoryError): @@ -149,20 +151,20 @@ def _find_all_specs(path: typing.Iterable[str]) -> typing.Iterator[ModuleSpec]: logger.debug(" Finder for '%s' is None", opslib) continue if not hasattr(finder, 'find_spec'): - logger.debug(" Finder for '%s' has no find_spec", opslib) + logger.debug(" Finder for '%s' is None", opslib) continue for lib_dir in lib_dirs: - spec_name = f"{top_dir}.opslib.{lib_dir}" + spec_name = f'{top_dir}.opslib.{lib_dir}' spec = finder.find_spec(spec_name) if spec is None: - logger.debug(" No spec for %r", spec_name) + logger.debug(' No spec for %r', spec_name) continue if spec.loader is None: # a namespace package; not supported - logger.debug(" No loader for %r (probably a namespace package)", spec_name) + logger.debug(' No loader for %r (probably a namespace package)', spec_name) continue - logger.debug(" Found %r", spec_name) + logger.debug(' Found %r', spec_name) yield spec @@ -174,7 +176,7 @@ def _find_all_specs(path: typing.Iterable[str]) -> typing.Iterator[ModuleSpec]: def _join_and(keys: List[str]) -> str: if len(keys) == 0: - return "" + return '' if len(keys) == 1: return keys[0] all_except_last = ', '.join(keys[:-1]) @@ -192,17 +194,17 @@ def __str__(self): exp = set(_NEEDED_KEYS) got = set(self._found) if len(got) == 0: - return f"missing {_join_and(sorted(exp))}" - return f"got {_join_and(sorted(got))}, but missing {_join_and(sorted(exp - got))}" + return f'missing {_join_and(sorted(exp))}' + return f'got {_join_and(sorted(got))}, but missing {_join_and(sorted(exp - got))}' -def _parse_lib(spec: ModuleSpec) -> typing.Optional["_Lib"]: +def _parse_lib(spec: ModuleSpec) -> typing.Optional['_Lib']: if spec.origin is None: # "can't happen" - logger.warning("No origin for %r (no idea why; please report)", spec.name) + logger.warning('No origin for %r (no idea why; please report)', spec.name) return None - logger.debug(" Parsing %r", spec.name) + logger.debug(' Parsing %r', spec.name) try: with open(spec.origin, encoding='utf-8') as f: @@ -212,8 +214,10 @@ def _parse_lib(spec: ModuleSpec) -> typing.Optional["_Lib"]: break if n > _MAX_LIB_LINES: logger.debug( - " Missing opslib metadata after reading to line %d: %s", - _MAX_LIB_LINES, _Missing(libinfo)) + ' Missing opslib metadata after reading to line %d: %s', + _MAX_LIB_LINES, + _Missing(libinfo), + ) return None m = _libline_re.match(line) if m is None: @@ -223,28 +227,31 @@ def _parse_lib(spec: ModuleSpec) -> typing.Optional["_Lib"]: value = literal_eval(value) if not isinstance(value, _NEEDED_KEYS[key]): logger.debug( - " Bad type for %s: expected %s, got %s", - key, _NEEDED_KEYS[key].__name__, type(value).__name__) + ' Bad type for %s: expected %s, got %s', + key, + _NEEDED_KEYS[key].__name__, + type(value).__name__, + ) return None libinfo[key] = value else: if len(libinfo) != len(_NEEDED_KEYS): logger.debug( - " Missing opslib metadata after reading to end of file: %s", - _Missing(libinfo)) + ' Missing opslib metadata after reading to end of file: %s', + _Missing(libinfo), + ) return None except Exception as e: - logger.debug(" Failed: %s", e) + logger.debug(' Failed: %s', e) return None lib = _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH']) - logger.debug(" Success: found library %s", lib) + logger.debug(' Success: found library %s', lib) return lib class _Lib: - def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int): self.spec = spec self.name = name @@ -255,10 +262,10 @@ def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: in self._module = None def __repr__(self): - return f"<_Lib {self}>" + return f'<_Lib {self}>' def __str__(self): - return f"{self.name} by {self.author}, API {self.api}, patch {self.patch}" + return f'{self.name} by {self.author}, API {self.api}, patch {self.patch}' def import_module(self) -> ModuleType: if self._module is None: diff --git a/ops/log.py b/ops/log.py index d64aa4a8d..91734da31 100644 --- a/ops/log.py +++ b/ops/log.py @@ -38,8 +38,9 @@ def emit(self, record: logging.LogRecord): self.model_backend.juju_log(record.levelname, self.format(record)) -def setup_root_logging(model_backend: _ModelBackend, debug: bool = False, - exc_stderr: bool = False): +def setup_root_logging( + model_backend: _ModelBackend, debug: bool = False, exc_stderr: bool = False +): """Setup python logging to forward messages to juju-log. By default, logging is set to DEBUG level, and messages will be filtered by Juju. @@ -61,15 +62,12 @@ def setup_root_logging(model_backend: _ModelBackend, debug: bool = False, handler.setFormatter(formatter) logger.addHandler(handler) - def except_hook(etype: typing.Type[BaseException], - value: BaseException, - tb: types.TracebackType): - logger.error( - "Uncaught exception while in charm code:", - exc_info=(etype, value, tb)) + def except_hook( + etype: typing.Type[BaseException], value: BaseException, tb: types.TracebackType + ): + logger.error('Uncaught exception while in charm code:', exc_info=(etype, value, tb)) if exc_stderr: - print(f"Uncaught {etype.__name__} in charm code: {value}", - file=sys.stderr) - print("Use `juju debug-log` to see the full traceback.", file=sys.stderr) + print(f'Uncaught {etype.__name__} in charm code: {value}', file=sys.stderr) + print('Use `juju debug-log` to see the full traceback.', file=sys.stderr) sys.excepthook = except_hook diff --git a/ops/main.py b/ops/main.py index 7c603333b..d94a19f6e 100644 --- a/ops/main.py +++ b/ops/main.py @@ -18,6 +18,7 @@ This is so that :code:`import ops` followed by :code:`ops.main(MyCharm)` works as expected. """ + import logging import os import shutil @@ -53,7 +54,7 @@ def _exe_path(path: Path) -> Optional[Path]: def _get_charm_dir(): - charm_dir = os.environ.get("JUJU_CHARM_DIR") + charm_dir = os.environ.get('JUJU_CHARM_DIR') if charm_dir is None: # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. charm_dir = Path(f'{__file__}/../../..').resolve() @@ -62,8 +63,11 @@ def _get_charm_dir(): return charm_dir -def _create_event_link(charm: 'ops.charm.CharmBase', bound_event: 'ops.framework.EventSource', - link_to: Union[str, Path]): +def _create_event_link( + charm: 'ops.charm.CharmBase', + bound_event: 'ops.framework.EventSource', + link_to: Union[str, Path], +): """Create a symlink for a particular event. Args: @@ -72,21 +76,21 @@ def _create_event_link(charm: 'ops.charm.CharmBase', bound_event: 'ops.framework link_to: What the event link should point to """ # type guard - assert bound_event.event_kind, f"unbound EventSource {bound_event}" + assert bound_event.event_kind, f'unbound EventSource {bound_event}' if issubclass(bound_event.event_type, ops.charm.HookEvent): event_dir = charm.framework.charm_dir / 'hooks' event_path = event_dir / bound_event.event_kind.replace('_', '-') elif issubclass(bound_event.event_type, ops.charm.ActionEvent): - if not bound_event.event_kind.endswith("_action"): - raise RuntimeError( - f'action event name {bound_event.event_kind} needs _action suffix') + if not bound_event.event_kind.endswith('_action'): + raise RuntimeError(f'action event name {bound_event.event_kind} needs _action suffix') event_dir = charm.framework.charm_dir / 'actions' # The event_kind is suffixed with "_action" while the executable is not. - event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + event_path = event_dir / bound_event.event_kind[: -len('_action')].replace('_', '-') else: raise RuntimeError( - f'cannot create a symlink: unsupported event type {bound_event.event_type}') + f'cannot create a symlink: unsupported event type {bound_event.event_type}' + ) event_dir.mkdir(exist_ok=True) if not event_path.exists(): @@ -95,8 +99,8 @@ def _create_event_link(charm: 'ops.charm.CharmBase', bound_event: 'ops.framework # Ignore the non-symlink files or directories # assuming the charm author knows what they are doing. logger.debug( - 'Creating a new relative symlink at %s pointing to %s', - event_path, target_path) + 'Creating a new relative symlink at %s pointing to %s', event_path, target_path + ) event_path.symlink_to(target_path) @@ -115,7 +119,7 @@ def _setup_event_links(charm_dir: Path, charm: 'ops.charm.CharmBase'): charm: An instance of the Charm class. """ - link_to = os.path.realpath(os.environ.get("JUJU_DISPATCH_PATH", sys.argv[0])) + link_to = os.path.realpath(os.environ.get('JUJU_DISPATCH_PATH', sys.argv[0])) for bound_event in charm.on.events().values(): # Only events that originate from Juju need symlinks. if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): @@ -133,7 +137,7 @@ def _emit_charm_event(charm: 'ops.charm.CharmBase', event_name: str): try: event_to_emit = getattr(charm.on, event_name) except AttributeError: - logger.debug("Event %s not defined for %s.", event_name, charm) + logger.debug('Event %s not defined for %s.', event_name, charm) # If the event is not supported by the charm implementation, do # not error out or try to emit it. This is to support rollbacks. @@ -147,8 +151,9 @@ def _get_juju_relation_id(): return int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) -def _get_event_args(charm: 'ops.charm.CharmBase', - bound_event: 'ops.framework.BoundEvent') -> Tuple[List[Any], Dict[str, Any]]: +def _get_event_args( + charm: 'ops.charm.CharmBase', bound_event: 'ops.framework.BoundEvent' +) -> Tuple[List[Any], Dict[str, Any]]: event_type = bound_event.event_type model = charm.framework.model @@ -172,13 +177,13 @@ def _get_event_args(charm: 'ops.charm.CharmBase', args.append(int(os.environ['JUJU_SECRET_REVISION'])) return args, {} elif issubclass(event_type, ops.charm.StorageEvent): - storage_id = os.environ.get("JUJU_STORAGE_ID", "") + storage_id = os.environ.get('JUJU_STORAGE_ID', '') if storage_id: - storage_name = storage_id.split("/")[0] + storage_name = storage_id.split('/')[0] else: # Before JUJU_STORAGE_ID exists, take the event name as # _storage_ and replace it with - storage_name = "-".join(bound_event.event_kind.split("_")[:-2]) + storage_name = '-'.join(bound_event.event_kind.split('_')[:-2]) storages = model.storages[storage_name] index, storage_location = model._backend._storage_event_details() @@ -259,8 +264,9 @@ def ensure_event_links(self, charm: 'ops.charm.CharmBase'): # # 'start' event is included as Juju does not fire the install event for # K8s charms (see LP: #1854635). - if (self.event_name in ('install', 'start', 'upgrade_charm') - or self.event_name.endswith('_storage_attached')): + if self.event_name in ('install', 'start', 'upgrade_charm') or self.event_name.endswith( + '_storage_attached' + ): _setup_event_links(self._charm_dir, charm) def run_any_legacy_hook(self): @@ -279,26 +285,26 @@ def run_any_legacy_hook(self): # super strange that there isn't an is_executable if not os.access(str(dispatch_path), os.X_OK): - logger.warning("Legacy %s exists but is not executable.", self._dispatch_path) + logger.warning('Legacy %s exists but is not executable.', self._dispatch_path) return if dispatch_path.resolve() == Path(sys.argv[0]).resolve(): - logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path) + logger.debug('Legacy %s is just a link to ourselves.', self._dispatch_path) return argv = sys.argv.copy() argv[0] = str(dispatch_path) - logger.info("Running legacy %s.", self._dispatch_path) + logger.info('Running legacy %s.', self._dispatch_path) try: subprocess.run(argv, check=True) except subprocess.CalledProcessError as e: - logger.warning("Legacy %s exited with status %d.", self._dispatch_path, e.returncode) + logger.warning('Legacy %s exited with status %d.', self._dispatch_path, e.returncode) raise _Abort(e.returncode) from e except OSError as e: - logger.warning("Unable to run legacy %s: %s", self._dispatch_path, e) + logger.warning('Unable to run legacy %s: %s', self._dispatch_path, e) raise _Abort(1) from e else: - logger.debug("Legacy %s exited with status 0.", self._dispatch_path) + logger.debug('Legacy %s exited with status 0.', self._dispatch_path) def _set_name_from_path(self, path: Path): """Sets the name attribute to that which can be inferred from the given path.""" @@ -328,7 +334,7 @@ def _init_dispatch(self): self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH']) if 'OPERATOR_DISPATCH' in os.environ: - logger.debug("Charm called itself via %s.", self._dispatch_path) + logger.debug('Charm called itself via %s.', self._dispatch_path) raise _Abort(0) os.environ['OPERATOR_DISPATCH'] = '1' @@ -354,17 +360,17 @@ def _should_use_controller_storage(db_path: Path, meta: CharmMeta) -> bool: # only use controller storage for Kubernetes podspec charms is_podspec = 'kubernetes' in meta.series if not is_podspec: - logger.debug("Using local storage: not a Kubernetes podspec charm") + logger.debug('Using local storage: not a Kubernetes podspec charm') return False # are we in a new enough Juju? cur_version = JujuVersion.from_environ() if cur_version.has_controller_storage(): - logger.debug("Using controller storage: JUJU_VERSION=%s", cur_version) + logger.debug('Using controller storage: JUJU_VERSION=%s', cur_version) return True else: - logger.debug("Using local storage: JUJU_VERSION=%s", cur_version) + logger.debug('Using local storage: JUJU_VERSION=%s', cur_version) return False @@ -396,13 +402,12 @@ class _Manager: """ def __init__( - self, - charm_class: Type["ops.charm.CharmBase"], - model_backend: Optional[ops.model._ModelBackend] = None, - use_juju_for_storage: Optional[bool] = None, - charm_state_path: str = CHARM_STATE_FILE + self, + charm_class: Type['ops.charm.CharmBase'], + model_backend: Optional[ops.model._ModelBackend] = None, + use_juju_for_storage: Optional[bool] = None, + charm_state_path: str = CHARM_STATE_FILE, ): - self._charm_state_path = charm_state_path self._charm_class = charm_class if model_backend is None: @@ -423,20 +428,20 @@ def __init__( self.framework = self._make_framework(self.dispatcher) self.charm = self._make_charm(self.framework, self.dispatcher) - def _make_charm(self, framework: "ops.framework.Framework", dispatcher: _Dispatcher): + def _make_charm(self, framework: 'ops.framework.Framework', dispatcher: _Dispatcher): charm = self._charm_class(framework) dispatcher.ensure_event_links(charm) return charm def _setup_root_logging(self): - debug = "JUJU_DEBUG" in os.environ + debug = 'JUJU_DEBUG' in os.environ # For actions, there is a communication channel with the user running the # action, so we want to send exception details through stderr, rather than # only to juju-log as normal. handling_action = 'JUJU_ACTION_NAME' in os.environ setup_root_logging(self._model_backend, debug=debug, exc_stderr=handling_action) - logger.debug("ops %s up and running.", ops.__version__) # type:ignore + logger.debug('ops %s up and running.', ops.__version__) # type:ignore def _make_storage(self, dispatcher: _Dispatcher): charm_state_path = self._charm_root / self._charm_state_path @@ -449,22 +454,25 @@ def _make_storage(self, dispatcher: _Dispatcher): if use_juju_for_storage is None: use_juju_for_storage = _should_use_controller_storage( - charm_state_path, - self._charm_meta + charm_state_path, self._charm_meta ) elif use_juju_for_storage: - warnings.warn("Controller storage is deprecated; it's intended for " - "podspec charms and will be removed in a future release.", - category=DeprecationWarning) + warnings.warn( + "Controller storage is deprecated; it's intended for " + 'podspec charms and will be removed in a future release.', + category=DeprecationWarning, + ) if use_juju_for_storage and dispatcher.is_restricted_context(): # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event # Though we eventually expect that Juju will run collect-metrics in a # non-restricted context. Once we can determine that we are running # collect-metrics in a non-restricted context, we should fire the event as normal. - logger.debug('"%s" is not supported when using Juju for storage\n' - 'see: https://github.com/canonical/operator/issues/348', - dispatcher.event_name) + logger.debug( + '"%s" is not supported when using Juju for storage\n' + 'see: https://github.com/canonical/operator/issues/348', + dispatcher.event_name, + ) # Note that we don't exit nonzero, because that would cause Juju to rerun the hook raise _Abort(0) @@ -474,10 +482,7 @@ def _make_storage(self, dispatcher: _Dispatcher): store = ops.storage.SQLiteStorage(charm_state_path) return store - def _make_framework( - self, - dispatcher: _Dispatcher - ): + def _make_framework(self, dispatcher: _Dispatcher): # If we are in a RelationBroken event, we want to know which relation is # broken within the model, not only in the event's `.relation` attribute. if os.environ.get('JUJU_DISPATCH_PATH', '').endswith('-relation-broken'): @@ -485,11 +490,13 @@ def _make_framework( else: broken_relation_id = None - model = ops.model.Model(self._charm_meta, self._model_backend, - broken_relation_id=broken_relation_id) + model = ops.model.Model( + self._charm_meta, self._model_backend, broken_relation_id=broken_relation_id + ) store = self._make_storage(dispatcher) - framework = ops.framework.Framework(store, self._charm_root, self._charm_meta, model, - event_name=dispatcher.event_name) + framework = ops.framework.Framework( + store, self._charm_root, self._charm_meta, model, event_name=dispatcher.event_name + ) framework.set_breakpointhook() return framework @@ -523,8 +530,7 @@ def run(self): self.framework.close() -def main(charm_class: Type[ops.charm.CharmBase], - use_juju_for_storage: Optional[bool] = None): +def main(charm_class: Type[ops.charm.CharmBase], use_juju_for_storage: Optional[bool] = None): """Set up the charm and dispatch the observed event. The event name is based on the way this executable was called (argv[0]). @@ -537,9 +543,7 @@ def main(charm_class: Type[ops.charm.CharmBase], otherwise local storage is used. """ try: - manager = _Manager( - charm_class, - use_juju_for_storage=use_juju_for_storage) + manager = _Manager(charm_class, use_juju_for_storage=use_juju_for_storage) manager.run() except _Abort as e: @@ -550,8 +554,9 @@ def main(charm_class: Type[ops.charm.CharmBase], # "ops.main(Charm)" works as expected now that everything is imported in # ops/__init__.py. Idea from https://stackoverflow.com/a/48100440/68707 class _CallableModule(sys.modules[__name__].__class__): - def __call__(self, charm_class: Type[ops.charm.CharmBase], - use_juju_for_storage: Optional[bool] = None): + def __call__( + self, charm_class: Type[ops.charm.CharmBase], use_juju_for_storage: Optional[bool] = None + ): return main(charm_class, use_juju_for_storage=use_juju_for_storage) diff --git a/ops/model.py b/ops/model.py index 3bafa88ab..3275b4563 100644 --- a/ops/model.py +++ b/ops/model.py @@ -75,20 +75,25 @@ _RelationDataContent_Raw = Dict[str, str] UnitOrApplicationType = Union[Type['Unit'], Type['Application']] -_AddressDict = TypedDict('_AddressDict', { - 'address': str, # Juju < 2.9 - 'value': str, # Juju >= 2.9 - 'cidr': str -}) -_BindAddressDict = TypedDict('_BindAddressDict', { - 'interface-name': str, - 'addresses': List[_AddressDict] -}) -_NetworkDict = TypedDict('_NetworkDict', { - 'bind-addresses': List[_BindAddressDict], - 'ingress-addresses': List[str], - 'egress-subnets': List[str] -}) +_AddressDict = TypedDict( + '_AddressDict', + { + 'address': str, # Juju < 2.9 + 'value': str, # Juju >= 2.9 + 'cidr': str, + }, +) +_BindAddressDict = TypedDict( + '_BindAddressDict', {'interface-name': str, 'addresses': List[_AddressDict]} +) +_NetworkDict = TypedDict( + '_NetworkDict', + { + 'bind-addresses': List[_BindAddressDict], + 'ingress-addresses': List[str], + 'egress-subnets': List[str], + }, +) logger = logging.getLogger(__name__) @@ -103,14 +108,19 @@ class Model: as ``self.model`` from any class that derives from :class:`Object`. """ - def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend', - broken_relation_id: Optional[int] = None): + def __init__( + self, + meta: 'ops.charm.CharmMeta', + backend: '_ModelBackend', + broken_relation_id: Optional[int] = None, + ): self._cache = _ModelCache(meta, backend) self._backend = backend self._unit = self.get_unit(self._backend.unit_name) - relations: Dict[str, 'ops.RelationMeta'] = meta.relations - self._relations = RelationMapping(relations, self.unit, self._backend, self._cache, - broken_relation_id=broken_relation_id) + relations: Dict[str, ops.RelationMeta] = meta.relations + self._relations = RelationMapping( + relations, self.unit, self._backend, self._cache, broken_relation_id=broken_relation_id + ) self._config = ConfigData(self._backend) resources: Iterable[str] = meta.resources self._resources = Resources(list(resources), self._backend) @@ -215,8 +225,8 @@ def get_app(self, app_name: str) -> 'Application': return self._cache.get(Application, app_name) def get_relation( - self, relation_name: str, - relation_id: Optional[int] = None) -> Optional['Relation']: + self, relation_name: str, relation_id: Optional[int] = None + ) -> Optional['Relation']: """Get a specific Relation instance. If relation_id is not given, this will return the Relation instance if the @@ -293,8 +303,8 @@ def get_cloud_spec(self) -> 'CloudSpec': if typing.TYPE_CHECKING: # (entity type, name): instance. _WeakCacheType = weakref.WeakValueDictionary[ - Tuple['UnitOrApplicationType', str], - Optional[Union['Unit', 'Application']]] + Tuple['UnitOrApplicationType', str], Optional[Union['Unit', 'Application']] + ] class _ModelCache: @@ -334,14 +344,15 @@ class Application: the charm, if the user has deployed it to a different name. """ - def __init__(self, name: str, meta: 'ops.charm.CharmMeta', - backend: '_ModelBackend', cache: _ModelCache): + def __init__( + self, name: str, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend', cache: _ModelCache + ): self.name = name self._backend = backend self._cache = cache self._is_our_app = self.name == self._backend.app_name self._status = None - self._collected_statuses: 'List[StatusBase]' = [] + self._collected_statuses: List[StatusBase] = [] def _invalidate(self): self._status = None @@ -420,19 +431,22 @@ def planned_units(self) -> int: RuntimeError: on trying to get the planned units for a remote application. """ if not self._is_our_app: - raise RuntimeError( - f'cannot get planned units for a remote application {self}.') + raise RuntimeError(f'cannot get planned units for a remote application {self}.') return self._backend.planned_units() def __repr__(self): return f'<{type(self).__module__}.{type(self).__name__} {self.name}>' - def add_secret(self, content: Dict[str, str], *, - label: Optional[str] = None, - description: Optional[str] = None, - expire: Optional[Union[datetime.datetime, datetime.timedelta]] = None, - rotate: Optional['SecretRotate'] = None) -> 'Secret': + def add_secret( + self, + content: Dict[str, str], + *, + label: Optional[str] = None, + description: Optional[str] = None, + expire: Optional[Union[datetime.datetime, datetime.timedelta]] = None, + rotate: Optional['SecretRotate'] = None, + ) -> 'Secret': """Create a :class:`Secret` owned by this application. Args: @@ -459,12 +473,14 @@ def add_secret(self, content: Dict[str, str], *, description=description, expire=_calculate_expiry(expire), rotate=rotate, - owner='application') + owner='application', + ) return Secret(self._backend, id=id, label=label, content=content) -def _calculate_expiry(expire: Optional[Union[datetime.datetime, datetime.timedelta]], - ) -> Optional[datetime.datetime]: +def _calculate_expiry( + expire: Optional[Union[datetime.datetime, datetime.timedelta]], +) -> Optional[datetime.datetime]: if expire is None: return None if isinstance(expire, datetime.datetime): @@ -472,8 +488,10 @@ def _calculate_expiry(expire: Optional[Union[datetime.datetime, datetime.timedel elif isinstance(expire, datetime.timedelta): return datetime.datetime.now() + expire else: - raise TypeError('Expiration time must be a datetime or timedelta from now, not ' - + type(expire).__name__) + raise TypeError( + 'Expiration time must be a datetime or timedelta from now, ' + f'not {type(expire).__name__}' + ) class Unit: @@ -489,8 +507,13 @@ class Unit: app: Application """Application the unit is part of.""" - def __init__(self, name: str, meta: 'ops.charm.CharmMeta', - backend: '_ModelBackend', cache: '_ModelCache'): + def __init__( + self, + name: str, + meta: 'ops.charm.CharmMeta', + backend: '_ModelBackend', + cache: '_ModelCache', + ): self.name = name app_name = name.split('/')[0] @@ -500,9 +523,9 @@ def __init__(self, name: str, meta: 'ops.charm.CharmMeta', self._cache = cache self._is_our_unit = self.name == self._backend.unit_name self._status = None - self._collected_statuses: 'List[StatusBase]' = [] + self._collected_statuses: List[StatusBase] = [] - if self._is_our_unit and hasattr(meta, "containers"): + if self._is_our_unit and hasattr(meta, 'containers'): containers: _ContainerMeta_Raw = meta.containers self._containers = ContainerMapping(iter(containers), backend) @@ -544,9 +567,7 @@ def status(self) -> 'StatusBase': @status.setter def status(self, value: 'StatusBase'): if not isinstance(value, StatusBase): - raise InvalidStatusError( - f'invalid value provided for unit {self} status: {value}' - ) + raise InvalidStatusError(f'invalid value provided for unit {self} status: {value}') if not self._is_our_unit: raise RuntimeError(f'cannot set status for a remote unit {self}') @@ -583,7 +604,8 @@ def set_workload_version(self, version: str) -> None: """ if not isinstance(version, str): raise TypeError( - f'workload version must be a str, not {type(version).__name__}: {version!r}') + f'workload version must be a str, not {type(version).__name__}: {version!r}' + ) self._backend.application_version_set(version) @property @@ -608,11 +630,15 @@ def get_container(self, container_name: str) -> 'Container': except KeyError: raise ModelError(f'container {container_name!r} not found') from None - def add_secret(self, content: Dict[str, str], *, - label: Optional[str] = None, - description: Optional[str] = None, - expire: Optional[Union[datetime.datetime, datetime.timedelta]] = None, - rotate: Optional['SecretRotate'] = None) -> 'Secret': + def add_secret( + self, + content: Dict[str, str], + *, + label: Optional[str] = None, + description: Optional[str] = None, + expire: Optional[Union[datetime.datetime, datetime.timedelta]] = None, + rotate: Optional['SecretRotate'] = None, + ) -> 'Secret': """Create a :class:`Secret` owned by this unit. See :meth:`Application.add_secret` for parameter details. @@ -627,11 +653,13 @@ def add_secret(self, content: Dict[str, str], *, description=description, expire=_calculate_expiry(expire), rotate=rotate, - owner='unit') + owner='unit', + ) return Secret(self._backend, id=id, label=label, content=content) - def open_port(self, protocol: typing.Literal['tcp', 'udp', 'icmp'], - port: Optional[int] = None) -> None: + def open_port( + self, protocol: typing.Literal['tcp', 'udp', 'icmp'], port: Optional[int] = None + ) -> None: """Open a port with the given protocol for this unit. Some behaviour, such as whether the port is opened externally without @@ -657,8 +685,9 @@ def open_port(self, protocol: typing.Literal['tcp', 'udp', 'icmp'], """ self._backend.open_port(protocol.lower(), port) - def close_port(self, protocol: typing.Literal['tcp', 'udp', 'icmp'], - port: Optional[int] = None) -> None: + def close_port( + self, protocol: typing.Literal['tcp', 'udp', 'icmp'], port: Optional[int] = None + ) -> None: """Close a port with the given protocol for this unit. Some behaviour, such as whether the port is closed externally without @@ -711,10 +740,7 @@ def set_ports(self, *ports: Union[int, 'Port']) -> None: is ``None``. """ # Normalise to get easier comparisons. - existing = { - (port.protocol, port.port) - for port in self._backend.opened_ports() - } + existing = {(port.protocol, port.port) for port in self._backend.opened_ports()} desired = { ('tcp', port) if isinstance(port, int) else (port.protocol, port.port) for port in ports @@ -768,7 +794,7 @@ class Port: """ -_LazyValueType = typing.TypeVar("_LazyValueType") +_LazyValueType = typing.TypeVar('_LazyValueType') class _GenericLazyMapping(Mapping[str, _LazyValueType], ABC): @@ -822,12 +848,14 @@ class LazyMapping(_GenericLazyMapping[str]): class RelationMapping(Mapping[str, List['Relation']]): """Map of relation names to lists of :class:`Relation` instances.""" - def __init__(self, - relations_meta: Dict[str, 'ops.RelationMeta'], - our_unit: 'Unit', - backend: '_ModelBackend', - cache: '_ModelCache', - broken_relation_id: Optional[int]): + def __init__( + self, + relations_meta: Dict[str, 'ops.RelationMeta'], + our_unit: 'Unit', + backend: '_ModelBackend', + cache: '_ModelCache', + broken_relation_id: Optional[int], + ): self._peers: Set[str] = set() for name, relation_meta in relations_meta.items(): if relation_meta.role.is_peer(): @@ -855,8 +883,9 @@ def __getitem__(self, relation_name: str) -> List['Relation']: for rid in self._backend.relation_ids(relation_name): if rid == self._broken_relation_id: continue - relation = Relation(relation_name, rid, is_peer, - self._our_unit, self._backend, self._cache) + relation = Relation( + relation_name, rid, is_peer, self._our_unit, self._backend, self._cache + ) relation_list.append(relation) return relation_list @@ -874,15 +903,23 @@ def _get_unique(self, relation_name: str, relation_id: Optional[int] = None): if not isinstance(relation_id, int): raise ModelError( f'relation id {relation_id} must be int or None, ' - f'not {type(relation_id).__name__}') + f'not {type(relation_id).__name__}' + ) for relation in self[relation_name]: if relation.id == relation_id: return relation else: # The relation may be dead, but it is not forgotten. is_peer = relation_name in self._peers - return Relation(relation_name, relation_id, is_peer, - self._our_unit, self._backend, self._cache, active=False) + return Relation( + relation_name, + relation_id, + is_peer, + self._our_unit, + self._backend, + self._cache, + active=False, + ) relations = self[relation_name] num_related = len(relations) if num_related == 0: @@ -975,7 +1012,7 @@ def _cast_network_address(raw: str) -> Union[ipaddress.IPv4Address, ipaddress.IP try: return ipaddress.ip_address(raw) except ValueError: - logger.debug("could not cast %s to IPv4/v6 address", raw) + logger.debug('could not cast %s to IPv4/v6 address', raw) return raw @@ -1045,8 +1082,9 @@ def bind_address(self) -> Optional[Union[ipaddress.IPv4Address, ipaddress.IPv6Ad return None @property - def ingress_address(self) -> Optional[ - Union[ipaddress.IPv4Address, ipaddress.IPv6Address, str]]: + def ingress_address( + self, + ) -> Optional[Union[ipaddress.IPv4Address, ipaddress.IPv6Address, str]]: """The address other applications should use to connect to the current unit. Due to things like public/private addresses, NAT and tunneling, the address the charm @@ -1118,13 +1156,15 @@ class SecretRotate(enum.Enum): class SecretInfo: """Secret information (metadata).""" - def __init__(self, - id: str, - label: Optional[str], - revision: int, - expires: Optional[datetime.datetime], - rotation: Optional[SecretRotate], - rotates: Optional[datetime.datetime]): + def __init__( + self, + id: str, + label: Optional[str], + revision: int, + expires: Optional[datetime.datetime], + rotation: Optional[SecretRotate], + rotates: Optional[datetime.datetime], + ): self.id = Secret._canonicalize_id(id) self.label = label self.revision = revision @@ -1151,14 +1191,15 @@ def from_dict(cls, id: str, d: Dict[str, Any]) -> 'SecretInfo': ) def __repr__(self): - return ('SecretInfo(' - f'id={self.id!r}, ' - f'label={self.label!r}, ' - f'revision={self.revision}, ' - f'expires={self.expires!r}, ' - f'rotation={self.rotation}, ' - f'rotates={self.rotates!r})' - ) + return ( + 'SecretInfo(' + f'id={self.id!r}, ' + f'label={self.label!r}, ' + f'revision={self.revision}, ' + f'expires={self.expires!r}, ' + f'rotation={self.rotation}, ' + f'rotates={self.rotates!r})' + ) class Secret: @@ -1174,10 +1215,13 @@ class Secret: _key_re = re.compile(r'^([a-z](?:-?[a-z0-9]){2,})$') # copied from Juju code - def __init__(self, backend: '_ModelBackend', - id: Optional[str] = None, - label: Optional[str] = None, - content: Optional[Dict[str, str]] = None): + def __init__( + self, + backend: '_ModelBackend', + id: Optional[str] = None, + label: Optional[str] = None, + content: Optional[Dict[str, str]] = None, + ): if not (id or label): raise TypeError('Must provide an id or label, or both') if id is not None: @@ -1200,7 +1244,7 @@ def _canonicalize_id(id: str) -> str: """Return the canonical form of the given secret ID, with the 'secret:' prefix.""" id = id.strip() if not id.startswith('secret:'): - id = f"secret:{id}" # add the prefix if not there already + id = f'secret:{id}' # add the prefix if not there already return id @classmethod @@ -1225,12 +1269,15 @@ def _validate_content(cls, content: Optional[Dict[str, str]]): raise ValueError( f'Invalid secret keys: {invalid_keys}. ' f'Keys should be lowercase letters and digits, at least 3 characters long, ' - f'start with a letter, and not start or end with a hyphen.') + f'start with a letter, and not start or end with a hyphen.' + ) if invalid_value_keys: invalid_types = ' or '.join(sorted(invalid_value_types)) - raise TypeError(f'Invalid secret values for keys: {invalid_value_keys}. ' - f'Values should be of type str, not {invalid_types}.') + raise TypeError( + f'Invalid secret values for keys: {invalid_value_keys}. ' + f'Values should be of type str, not {invalid_types}.' + ) @property def id(self) -> Optional[str]: @@ -1273,7 +1320,7 @@ def unique_identifier(self) -> Optional[str]: if '/' in self._id: return self._id.rsplit('/', 1)[-1] elif self._id.startswith('secret:'): - return self._id[len('secret:'):] + return self._id[len('secret:') :] else: # Shouldn't get here as id is canonicalized, but just in case. return self._id @@ -1326,8 +1373,7 @@ def get_content(self, *, refresh: bool = False) -> Dict[str, str]: get the content of the currently-tracked revision. """ if refresh or self._content is None: - self._content = self._backend.secret_get( - id=self.id, label=self.label, refresh=refresh) + self._content = self._backend.secret_get(id=self.id, label=self.label, refresh=refresh) return self._content.copy() def peek_content(self) -> Dict[str, str]: @@ -1362,11 +1408,14 @@ def set_content(self, content: Dict[str, str]): self._backend.secret_set(typing.cast(str, self.id), content=content) self._content = None # invalidate cache so it's refetched next get_content() - def set_info(self, *, - label: Optional[str] = None, - description: Optional[str] = None, - expire: Optional[Union[datetime.datetime, datetime.timedelta]] = None, - rotate: Optional[SecretRotate] = None): + def set_info( + self, + *, + label: Optional[str] = None, + description: Optional[str] = None, + expire: Optional[Union[datetime.datetime, datetime.timedelta]] = None, + rotate: Optional[SecretRotate] = None, + ): """Update this secret's information (metadata). This will not create a new secret revision (that applies only to @@ -1380,15 +1429,18 @@ def set_info(self, *, effect only after the currently-scheduled rotation. """ if label is None and description is None and expire is None and rotate is None: - raise TypeError('Must provide a label, description, expiration time, ' - 'or rotation policy') + raise TypeError( + 'Must provide a label, description, expiration time, ' 'or rotation policy' + ) if self._id is None: self._id = self.get_info().id - self._backend.secret_set(typing.cast(str, self.id), - label=label, - description=description, - expire=_calculate_expiry(expire), - rotate=rotate) + self._backend.secret_set( + typing.cast(str, self.id), + label=label, + description=description, + expire=_calculate_expiry(expire), + rotate=rotate, + ) def grant(self, relation: 'Relation', *, unit: Optional[Unit] = None): """Grant read access to this secret. @@ -1404,9 +1456,8 @@ def grant(self, relation: 'Relation', *, unit: Optional[Unit] = None): if self._id is None: self._id = self.get_info().id self._backend.secret_grant( - typing.cast(str, self.id), - relation.id, - unit=unit.name if unit is not None else None) + typing.cast(str, self.id), relation.id, unit=unit.name if unit is not None else None + ) def revoke(self, relation: 'Relation', *, unit: Optional[Unit] = None): """Revoke read access to this secret. @@ -1422,9 +1473,8 @@ def revoke(self, relation: 'Relation', *, unit: Optional[Unit] = None): if self._id is None: self._id = self.get_info().id self._backend.secret_revoke( - typing.cast(str, self.id), - relation.id, - unit=unit.name if unit is not None else None) + typing.cast(str, self.id), relation.id, unit=unit.name if unit is not None else None + ) def remove_revision(self, revision: int): """Remove the given secret revision. @@ -1493,8 +1543,15 @@ class Relation: """ def __init__( - self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit, - backend: '_ModelBackend', cache: '_ModelCache', active: bool = True): + self, + relation_name: str, + relation_id: int, + is_peer: bool, + our_unit: Unit, + backend: '_ModelBackend', + cache: '_ModelCache', + active: bool = True, + ): self.name = relation_name self.id = relation_id self.units: Set[Unit] = set() @@ -1546,13 +1603,13 @@ class RelationData(Mapping[Union['Unit', 'Application'], 'RelationDataContent']) def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'): self.relation = weakref.proxy(relation) - self._data: Dict[Union['Unit', 'Application'], RelationDataContent] = { + self._data: Dict[Union[Unit, Application], RelationDataContent] = { our_unit: RelationDataContent(self.relation, our_unit, backend), our_unit.app: RelationDataContent(self.relation, our_unit.app, backend), } self._data.update({ - unit: RelationDataContent(self.relation, unit, backend) - for unit in self.relation.units}) + unit: RelationDataContent(self.relation, unit, backend) for unit in self.relation.units + }) # The relation might be dead so avoid a None key here. if self.relation.app is not None: self._data.update({ @@ -1580,8 +1637,9 @@ def __repr__(self): class RelationDataContent(LazyMapping, MutableMapping[str, str]): """Data content of a unit or application in a relation.""" - def __init__(self, relation: 'Relation', entity: Union['Unit', 'Application'], - backend: '_ModelBackend'): + def __init__( + self, relation: 'Relation', entity: Union['Unit', 'Application'], backend: '_ModelBackend' + ): self.relation = relation self._entity = entity self._backend = backend @@ -1625,7 +1683,7 @@ def _validate_read(self): app = self.relation.app if app is None: raise RelationDataAccessError( - f"Remote application instance cannot be retrieved for {self.relation}." + f'Remote application instance cannot be retrieved for {self.relation}.' ) # is this a peer relation? @@ -1655,11 +1713,9 @@ def _validate_write(self, key: str, value: str): # firstly, we validate WHAT we're trying to write. # this is independent of whether we're in testing code or production. if not isinstance(key, str): - raise RelationDataTypeError( - f'relation data keys must be strings, not {type(key)}') + raise RelationDataTypeError(f'relation data keys must be strings, not {type(key)}') if not isinstance(value, str): - raise RelationDataTypeError( - f'relation data values must be strings, not {type(value)}') + raise RelationDataTypeError(f'relation data values must be strings, not {type(value)}') # if we're not in production (we're testing): we skip access control rules if not self._hook_is_running: @@ -1671,14 +1727,15 @@ def _validate_write(self, key: str, value: str): if not is_our_app: raise RelationDataAccessError( f'{self._backend.app_name} cannot write the data of remote application ' - f'{self._entity.name}') + f'{self._entity.name}' + ) # Whether the application data bag is mutable or not depends on # whether this unit is a leader or not, but this is not guaranteed # to be always true during the same hook execution. if self._backend.is_leader(): return # all good raise RelationDataAccessError( - f"{self._backend.unit_name} is not leader and cannot write application data." + f'{self._backend.unit_name} is not leader and cannot write application data.' ) else: # we are attempting to write a unit databag @@ -1757,7 +1814,7 @@ class StatusBase: def __init__(self, message: str = ''): if self.__class__ is StatusBase: - raise TypeError("cannot instantiate a base class") + raise TypeError('cannot instantiate a base class') self.message = message def __eq__(self, other: 'StatusBase') -> bool: @@ -1766,7 +1823,7 @@ def __eq__(self, other: 'StatusBase') -> bool: return self.message == other.message def __repr__(self): - return f"{self.__class__.__name__}({self.message!r})" + return f'{self.__class__.__name__}({self.message!r})' @classmethod def from_name(cls, name: str, message: str): @@ -1792,8 +1849,10 @@ def from_name(cls, name: str, message: str): def register(cls, child: Type['StatusBase']): """Register a Status for the child's name.""" if not isinstance(child.name, str): - raise TypeError(f"Can't register StatusBase subclass {child}: ", - "missing required `name: str` class attribute") + raise TypeError( + f"Can't register StatusBase subclass {child}: ", + 'missing required `name: str` class attribute', + ) cls._statuses[child.name] = child return child @@ -1825,6 +1884,7 @@ class UnknownStatus(StatusBase): This status is read-only; trying to set unit or application status to ``UnknownStatus`` will raise :class:`ModelError`. """ + name = 'unknown' def __init__(self): @@ -1832,7 +1892,7 @@ def __init__(self): super().__init__('') def __repr__(self): - return "UnknownStatus()" + return 'UnknownStatus()' @StatusBase.register @@ -1845,6 +1905,7 @@ class ErrorStatus(StatusBase): This status is read-only; trying to set unit or application status to ``ErrorStatus`` will raise :class:`ModelError`. """ + name = 'error' @@ -1854,6 +1915,7 @@ class ActiveStatus(StatusBase): The unit believes it is correctly offering all the services it has been asked to offer. """ + name = 'active' def __init__(self, message: str = ''): @@ -1866,6 +1928,7 @@ class BlockedStatus(StatusBase): An admin has to manually intervene to unblock the unit and let it proceed. """ + name = 'blocked' @@ -1878,6 +1941,7 @@ class MaintenanceStatus(StatusBase): reflects activity on the unit itself, not on peers or related units. """ + name = 'maintenance' @@ -1889,6 +1953,7 @@ class WaitingStatus(StatusBase): it is integrated is not running. """ + name = 'waiting' @@ -1948,8 +2013,9 @@ class StorageMapping(Mapping[str, List['Storage']]): def __init__(self, storage_names: Iterable[str], backend: '_ModelBackend'): self._backend = backend - self._storage_map: _StorageDictType = {storage_name: None - for storage_name in storage_names} + self._storage_map: _StorageDictType = { + storage_name: None for storage_name in storage_names + } def __contains__(self, key: str): # pyright: ignore[reportIncompatibleMethodOverride] return key in self._storage_map @@ -1963,8 +2029,7 @@ def __iter__(self): def __getitem__(self, storage_name: str) -> List['Storage']: if storage_name not in self._storage_map: meant = ', or '.join(repr(k) for k in self._storage_map) - raise KeyError( - f'Storage {storage_name!r} not found. Did you mean {meant}?') + raise KeyError(f'Storage {storage_name!r} not found. Did you mean {meant}?') storage_list = self._storage_map[storage_name] if storage_list is None: storage_list = self._storage_map[storage_name] = [] @@ -1983,8 +2048,9 @@ def request(self, storage_name: str, count: int = 1): ModelError: if the storage is not in the charm's metadata. """ if storage_name not in self._storage_map: - raise ModelError(f'cannot add storage {storage_name!r}:' - ' it is not present in the charm metadata') + raise ModelError( + f'cannot add storage {storage_name!r}:' ' it is not present in the charm metadata' + ) self._backend.storage_add(storage_name, count) def _invalidate(self, storage_name: str): @@ -2015,7 +2081,7 @@ def index(self) -> int: @property def id(self) -> int: """.. deprecated:: 2.4.0 Use :attr:`Storage.index` instead.""" - logger.warning("model.Storage.id is being replaced - please use model.Storage.index") + logger.warning('model.Storage.id is being replaced - please use model.Storage.index') return self.index @property @@ -2027,7 +2093,7 @@ def full_id(self) -> str: def location(self) -> Path: """Location of the storage.""" if self._location is None: - raw = self._backend.storage_get(self.full_id, "location") + raw = self._backend.storage_get(self.full_id, 'location') self._location = Path(raw) return self._location @@ -2092,8 +2158,9 @@ class Container: name: str """The name of the container from ``metadata.yaml``, for example "postgres".""" - def __init__(self, name: str, backend: '_ModelBackend', - pebble_client: Optional[pebble.Client] = None): + def __init__( + self, name: str, backend: '_ModelBackend', pebble_client: Optional[pebble.Client] = None + ): self.name = name if pebble_client is None: @@ -2120,17 +2187,17 @@ def can_connect(self) -> bool: try: self._pebble.get_system_info() except pebble.ConnectionError as e: - logger.debug("Pebble API is not ready; ConnectionError: %s", e) + logger.debug('Pebble API is not ready; ConnectionError: %s', e) return False except FileNotFoundError as e: # In some cases, charm authors can attempt to hit the Pebble API before it has had the # chance to create the UNIX socket in the shared volume. - logger.debug("Pebble API is not ready; UNIX socket not found: %s", e) + logger.debug('Pebble API is not ready; UNIX socket not found: %s', e) return False except pebble.APIError as e: # An API error is only raised when the Pebble API returns invalid JSON, or the response # cannot be read. Both of these are a likely indicator that something is wrong. - logger.warning("Pebble API is not ready; APIError: %s", e) + logger.warning('Pebble API is not ready; APIError: %s', e) return False return True @@ -2160,8 +2227,9 @@ def restart(self, *service_names: str): if e.code != 400: raise e # support old Pebble instances that don't support the "restart" action - stop: Tuple[str, ...] = tuple(s.name for s in self.get_services( - *service_names).values() if s.is_running()) + stop: Tuple[str, ...] = tuple( + s.name for s in self.get_services(*service_names).values() if s.is_running() + ) if stop: self._pebble.stop_services(stop) self._pebble.start_services(service_names) @@ -2173,8 +2241,13 @@ def stop(self, *service_names: str): self._pebble.stop_services(service_names) - def add_layer(self, label: str, layer: Union[str, pebble.LayerDict, pebble.Layer], *, - combine: bool = False): + def add_layer( + self, + label: str, + layer: Union[str, pebble.LayerDict, pebble.Layer], + *, + combine: bool = False, + ): """Dynamically add a new layer onto the Pebble configuration layers. Args: @@ -2223,9 +2296,8 @@ def get_service(self, service_name: str) -> pebble.ServiceInfo: return services[service_name] def get_checks( - self, - *check_names: str, - level: Optional[pebble.CheckLevel] = None) -> 'CheckInfoMapping': + self, *check_names: str, level: Optional[pebble.CheckLevel] = None + ) -> 'CheckInfoMapping': """Fetch and return a mapping of check information indexed by check name. Args: @@ -2251,15 +2323,14 @@ def get_check(self, check_name: str) -> pebble.CheckInfo: return checks[check_name] @typing.overload - def pull(self, path: Union[str, PurePath], *, encoding: None) -> BinaryIO: - ... + def pull(self, path: Union[str, PurePath], *, encoding: None) -> BinaryIO: ... @typing.overload - def pull(self, path: Union[str, PurePath], *, encoding: str = 'utf-8') -> TextIO: - ... + def pull(self, path: Union[str, PurePath], *, encoding: str = 'utf-8') -> TextIO: ... - def pull(self, path: Union[str, PurePath], *, - encoding: Optional[str] = 'utf-8') -> Union[BinaryIO, TextIO]: + def pull( + self, path: Union[str, PurePath], *, encoding: Optional[str] = 'utf-8' + ) -> Union[BinaryIO, TextIO]: """Read a file's content from the remote system. Args: @@ -2278,17 +2349,19 @@ def pull(self, path: Union[str, PurePath], *, """ return self._pebble.pull(str(path), encoding=encoding) - def push(self, - path: Union[str, PurePath], - source: Union[bytes, str, BinaryIO, TextIO], - *, - encoding: str = 'utf-8', - make_dirs: bool = False, - permissions: Optional[int] = None, - user_id: Optional[int] = None, - user: Optional[str] = None, - group_id: Optional[int] = None, - group: Optional[str] = None): + def push( + self, + path: Union[str, PurePath], + source: Union[bytes, str, BinaryIO, TextIO], + *, + encoding: str = 'utf-8', + make_dirs: bool = False, + permissions: Optional[int] = None, + user_id: Optional[int] = None, + user: Optional[str] = None, + group_id: Optional[int] = None, + group: Optional[str] = None, + ): """Write content to a given file path on the remote system. Note that if another process has the file open on the remote system, @@ -2313,14 +2386,21 @@ def push(self, group: Group name for file. Group's GID must match group_id if both are specified. """ - self._pebble.push(str(path), source, encoding=encoding, - make_dirs=make_dirs, - permissions=permissions, - user_id=user_id, user=user, - group_id=group_id, group=group) + self._pebble.push( + str(path), + source, + encoding=encoding, + make_dirs=make_dirs, + permissions=permissions, + user_id=user_id, + user=user, + group_id=group_id, + group=group, + ) - def list_files(self, path: Union[str, PurePath], *, pattern: Optional[str] = None, - itself: bool = False) -> List[pebble.FileInfo]: + def list_files( + self, path: Union[str, PurePath], *, pattern: Optional[str] = None, itself: bool = False + ) -> List[pebble.FileInfo]: """Return list of directory entries from given path on remote system. Despite the name, this method returns a list of files *and* @@ -2334,12 +2414,13 @@ def list_files(self, path: Union[str, PurePath], *, pattern: Optional[str] = Non itself: If path refers to a directory, return information about the directory itself, rather than its contents. """ - return self._pebble.list_files(str(path), - pattern=pattern, itself=itself) + return self._pebble.list_files(str(path), pattern=pattern, itself=itself) - def push_path(self, - source_path: Union[str, Path, Iterable[Union[str, Path]]], - dest_dir: Union[str, PurePath]): + def push_path( + self, + source_path: Union[str, Path, Iterable[Union[str, Path]]], + dest_dir: Union[str, PurePath], + ): """Recursively push a local path or files to the remote system. Only regular files and directories are copied; symbolic links, device files, etc. are @@ -2415,15 +2496,18 @@ def local_list(source_path: Path) -> List[pebble.FileInfo]: user_id=info.user_id, user=info.user, group_id=info.group_id, - group=info.group) + group=info.group, + ) except (OSError, pebble.Error) as err: errors.append((str(source_path), err)) if errors: raise MultiPushPullError('failed to push one or more files', errors) - def pull_path(self, - source_path: Union[str, PurePath, Iterable[Union[str, PurePath]]], - dest_dir: Union[str, Path]): + def pull_path( + self, + source_path: Union[str, PurePath, Iterable[Union[str, PurePath]]], + dest_dir: Union[str, Path], + ): """Recursively pull a remote path or files to the local system. Only regular files and directories are copied; symbolic links, device files, etc. are @@ -2510,16 +2594,17 @@ def _build_fileinfo(path: Union[str, Path]) -> pebble.FileInfo: import grp import pwd + info = path.lstat() try: pw_name = pwd.getpwuid(info.st_uid).pw_name except KeyError: - logger.warning("Could not get name for user %s", info.st_uid) + logger.warning('Could not get name for user %s', info.st_uid) pw_name = None try: gr_name = grp.getgrgid(info.st_gid).gr_name except KeyError: - logger.warning("Could not get name for group %s", info.st_gid) + logger.warning('Could not get name for group %s', info.st_gid) gr_name = None return pebble.FileInfo( path=str(path), @@ -2531,11 +2616,13 @@ def _build_fileinfo(path: Union[str, Path]) -> pebble.FileInfo: user_id=info.st_uid, user=pw_name, group_id=info.st_gid, - group=gr_name) + group=gr_name, + ) @staticmethod - def _list_recursive(list_func: Callable[[Path], Iterable[pebble.FileInfo]], - path: Path) -> Generator[pebble.FileInfo, None, None]: + def _list_recursive( + list_func: Callable[[Path], Iterable[pebble.FileInfo]], path: Path + ) -> Generator[pebble.FileInfo, None, None]: """Recursively lists all files under path using the given list_func. Args: @@ -2558,13 +2645,13 @@ def _list_recursive(list_func: Callable[[Path], Iterable[pebble.FileInfo]], yield info else: logger.debug( - 'skipped unsupported file in Container.[push/pull]_path: %s', info.path) + 'skipped unsupported file in Container.[push/pull]_path: %s', info.path + ) @staticmethod def _build_destpath( - file_path: Union[str, Path], - source_path: Union[str, Path], - dest_dir: Union[str, Path]) -> Path: + file_path: Union[str, Path], source_path: Union[str, Path], dest_dir: Union[str, Path] + ) -> Path: """Converts a source file and destination dir into a full destination filepath. file_path: @@ -2580,8 +2667,7 @@ def _build_destpath( file_path, source_path, dest_dir = Path(file_path), Path(source_path), Path(dest_dir) prefix = str(source_path.parent) if prefix != '.' and os.path.commonprefix([prefix, str(file_path)]) != prefix: - raise RuntimeError( - f'file "{file_path}" does not have specified prefix "{prefix}"') + raise RuntimeError(f'file "{file_path}" does not have specified prefix "{prefix}"') path_suffix = os.path.relpath(str(file_path), prefix) return dest_dir / path_suffix @@ -2606,15 +2692,16 @@ def isdir(self, path: Union[str, PurePath]) -> bool: return files[0].type == pebble.FileType.DIRECTORY def make_dir( - self, - path: Union[str, PurePath], - *, - make_parents: bool = False, - permissions: Optional[int] = None, - user_id: Optional[int] = None, - user: Optional[str] = None, - group_id: Optional[int] = None, - group: Optional[str] = None): + self, + path: Union[str, PurePath], + *, + make_parents: bool = False, + permissions: Optional[int] = None, + user_id: Optional[int] = None, + user: Optional[str] = None, + group_id: Optional[int] = None, + group: Optional[str] = None, + ): """Create a directory on the remote system with the given attributes. Args: @@ -2629,10 +2716,15 @@ def make_dir( group: Group name for directory. Group's GID must match group_id if both are specified. """ - self._pebble.make_dir(str(path), make_parents=make_parents, - permissions=permissions, - user_id=user_id, user=user, - group_id=group_id, group=group) + self._pebble.make_dir( + str(path), + make_parents=make_parents, + permissions=permissions, + user_id=user_id, + user=user, + group_id=group_id, + group=group, + ) def remove_path(self, path: Union[str, PurePath], *, recursive: bool = False): """Remove a file or directory on the remote system. @@ -2668,9 +2760,8 @@ def exec( stdout: Optional[TextIO] = None, stderr: Optional[TextIO] = None, encoding: str = 'utf-8', - combine_stderr: bool = False - ) -> pebble.ExecProcess[str]: - ... + combine_stderr: bool = False, + ) -> pebble.ExecProcess[str]: ... # Exec I/O is bytes if encoding is explicitly set to None @typing.overload @@ -2690,9 +2781,8 @@ def exec( stdout: Optional[BinaryIO] = None, stderr: Optional[BinaryIO] = None, encoding: None = None, - combine_stderr: bool = False - ) -> pebble.ExecProcess[bytes]: - ... + combine_stderr: bool = False, + ) -> pebble.ExecProcess[bytes]: ... def exec( self, @@ -2710,7 +2800,7 @@ def exec( stdout: Optional[Union[TextIO, BinaryIO]] = None, stderr: Optional[Union[TextIO, BinaryIO]] = None, encoding: Optional[str] = 'utf-8', - combine_stderr: bool = False + combine_stderr: bool = False, ) -> pebble.ExecProcess[Any]: """Execute the given command on the remote system. @@ -2728,7 +2818,8 @@ def exec( version = JujuVersion.from_environ() if not version.supports_exec_service_context: raise RuntimeError( - f'exec with service_context not supported on Juju version {version}') + f'exec with service_context not supported on Juju version {version}' + ) return self._pebble.exec( command, service_context=service_context, @@ -2874,6 +2965,7 @@ def __repr__(self): class ModelError(Exception): """Base class for exceptions raised when interacting with the Model.""" + pass @@ -2925,10 +3017,11 @@ class SecretNotFoundError(ModelError): _ACTION_RESULT_KEY_REGEX = re.compile(r'^[a-z0-9](([a-z0-9-.]+)?[a-z0-9])?$') -def _format_action_result_dict(input: Dict[str, Any], - parent_key: Optional[str] = None, - output: Optional[Dict[str, str]] = None - ) -> Dict[str, str]: +def _format_action_result_dict( + input: Dict[str, Any], + parent_key: Optional[str] = None, + output: Optional[Dict[str, str]] = None, +) -> Dict[str, str]: """Turn a nested dictionary into a flattened dictionary, using '.' as a key seperator. This is used to allow nested dictionaries to be translated into the dotted format required by @@ -2966,11 +3059,11 @@ def _format_action_result_dict(input: Dict[str, Any], raise ValueError(f'invalid key {key!r}; must be a string') if not _ACTION_RESULT_KEY_REGEX.match(key): raise ValueError( - f"key {key!r} is invalid: must be similar to 'key', 'some-key2', " - f"or 'some.key'") + f"key {key!r} is invalid: must be similar to 'key', 'some-key2', or 'some.key'" + ) if parent_key: - key = f"{parent_key}.{key}" + key = f'{parent_key}.{key}' if isinstance(value, MutableMapping): value = typing.cast(Dict[str, Any], value) @@ -2994,14 +3087,15 @@ class _ModelBackend: LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) _STORAGE_KEY_RE = re.compile( - r'.*^-s\s+\(=\s+(?P.*?)\)\s*?$', - re.MULTILINE | re.DOTALL + r'.*^-s\s+\(=\s+(?P.*?)\)\s*?$', re.MULTILINE | re.DOTALL ) - def __init__(self, unit_name: Optional[str] = None, - model_name: Optional[str] = None, - model_uuid: Optional[str] = None): - + def __init__( + self, + unit_name: Optional[str] = None, + model_name: Optional[str] = None, + model_uuid: Optional[str] = None, + ): # if JUJU_UNIT_NAME is not being passed nor in the env, something is wrong unit_name_ = unit_name or os.getenv('JUJU_UNIT_NAME') if unit_name_ is None: @@ -3017,16 +3111,21 @@ def __init__(self, unit_name: Optional[str] = None, self._leader_check_time = None self._hook_is_running = '' - def _run(self, *args: str, return_output: bool = False, - use_json: bool = False, input_stream: Optional[str] = None - ) -> Union[str, Any, None]: + def _run( + self, + *args: str, + return_output: bool = False, + use_json: bool = False, + input_stream: Optional[str] = None, + ) -> Union[str, Any, None]: kwargs = { 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'check': True, - 'encoding': 'utf-8'} + 'encoding': 'utf-8', + } if input_stream: - kwargs.update({"input": input_stream}) + kwargs.update({'input': input_stream}) which_cmd = shutil.which(args[0]) if which_cmd is None: raise RuntimeError(f'command not found: {args[0]}') @@ -3060,8 +3159,9 @@ def relation_ids(self, relation_name: str) -> List[int]: def relation_list(self, relation_id: int) -> List[str]: try: - rel_list = self._run('relation-list', '-r', str(relation_id), - return_output=True, use_json=True) + rel_list = self._run( + 'relation-list', '-r', str(relation_id), return_output=True, use_json=True + ) return typing.cast(List[str], rel_list) except ModelError as e: if self._is_relation_not_found(e): @@ -3079,8 +3179,9 @@ def relation_remote_app_name(self, relation_id: int) -> Optional[str]: # If caller is asking for information about another relation, use # "relation-list --app" to get it. try: - rel_id = self._run('relation-list', '-r', str(relation_id), '--app', - return_output=True, use_json=True) + rel_id = self._run( + 'relation-list', '-r', str(relation_id), '--app', return_output=True, use_json=True + ) # if it returned anything at all, it's a str. return typing.cast(str, rel_id) @@ -3093,8 +3194,9 @@ def relation_remote_app_name(self, relation_id: int) -> Optional[str]: return None raise - def relation_get(self, relation_id: int, member_name: str, is_app: bool - ) -> '_RelationDataContent_Raw': + def relation_get( + self, relation_id: int, member_name: str, is_app: bool + ) -> '_RelationDataContent_Raw': if not isinstance(is_app, bool): raise TypeError('is_app parameter to relation_get must be a boolean') @@ -3102,7 +3204,8 @@ def relation_get(self, relation_id: int, member_name: str, is_app: bool version = JujuVersion.from_environ() if not version.has_app_data(): raise RuntimeError( - f'getting application data is not supported on Juju version {version}') + f'getting application data is not supported on Juju version {version}' + ) args = ['relation-get', '-r', str(relation_id), '-', member_name] if is_app: @@ -3124,12 +3227,13 @@ def relation_set(self, relation_id: int, key: str, value: str, is_app: bool) -> version = JujuVersion.from_environ() if not version.has_app_data(): raise RuntimeError( - f'setting application data is not supported on Juju version {version}') + f'setting application data is not supported on Juju version {version}' + ) args = ['relation-set', '-r', str(relation_id)] if is_app: args.append('--app') - args.extend(["--file", "-"]) + args.extend(['--file', '-']) try: content = yaml.safe_dump({key: value}) @@ -3153,7 +3257,7 @@ def is_leader(self) -> bool: check = True else: time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) - check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None) + check = time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None if check: # Current time MUST be saved before running is-leader to ensure the cache # is only used inside the window that is-leader itself asserts. @@ -3168,17 +3272,18 @@ def resource_get(self, resource_name: str) -> str: out = self._run('resource-get', resource_name, return_output=True) return typing.cast(str, out).strip() - def pod_spec_set(self, spec: Mapping[str, Any], - k8s_resources: Optional[Mapping[str, Any]] = None): + def pod_spec_set( + self, spec: Mapping[str, Any], k8s_resources: Optional[Mapping[str, Any]] = None + ): tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) try: spec_path = tmpdir / 'spec.yaml' - with spec_path.open("wt", encoding="utf8") as f: + with spec_path.open('wt', encoding='utf8') as f: yaml.safe_dump(spec, stream=f) args = ['--file', str(spec_path)] if k8s_resources: k8s_res_path = tmpdir / 'k8s-resources.yaml' - with k8s_res_path.open("wt", encoding="utf8") as f: + with k8s_res_path.open('wt', encoding='utf8') as f: yaml.safe_dump(k8s_resources, stream=f) args.extend(['--k8s-resources', str(k8s_res_path)]) self._run('pod-spec-set', *args) @@ -3193,9 +3298,12 @@ def status_get(self, *, is_app: bool = False) -> '_StatusDict': or an application. """ content = self._run( - 'status-get', '--include-data', f'--application={is_app}', + 'status-get', + '--include-data', + f'--application={is_app}', use_json=True, - return_output=True) + return_output=True, + ) # Unit status looks like (in YAML): # message: 'load: 0.28 0.26 0.26' # status: active @@ -3214,8 +3322,7 @@ def status_get(self, *, is_app: bool = False) -> '_StatusDict': if is_app: content = typing.cast(Dict[str, Dict[str, str]], content) app_status = content['application-status'] - return {'status': app_status['status'], - 'message': app_status['message']} + return {'status': app_status['status'], 'message': app_status['message']} else: return typing.cast('_StatusDict', content) @@ -3244,18 +3351,21 @@ def _storage_event_details(self) -> Tuple[int, str]: match = self._STORAGE_KEY_RE.match(output) if match is None: raise RuntimeError(f'unable to find storage key in {output!r}') - key = match.groupdict()["storage_key"] + key = match.groupdict()['storage_key'] - index = int(key.split("/")[1]) - location = self.storage_get(key, "location") + index = int(key.split('/')[1]) + location = self.storage_get(key, 'location') return index, location def storage_get(self, storage_name_id: str, attribute: str) -> str: if not len(attribute) > 0: # assume it's an empty string. - raise RuntimeError('calling storage_get with `attribute=""` will return a dict ' - 'and not a string. This usage is not supported.') - out = self._run('storage-get', '-s', storage_name_id, attribute, - return_output=True, use_json=True) + raise RuntimeError( + 'calling storage_get with `attribute=""` will return a dict ' + 'and not a string. This usage is not supported.' + ) + out = self._run( + 'storage-get', '-s', storage_name_id, attribute, return_output=True, use_json=True + ) return typing.cast(str, out) def storage_add(self, name: str, count: int = 1) -> None: @@ -3271,7 +3381,7 @@ def action_set(self, results: Dict[str, Any]) -> None: # The Juju action-set hook tool cannot interpret nested dicts, so we use a helper to # flatten out any nested dict structures into a dotted notation, and validate keys. flat_results = _format_action_result_dict(results) - self._run('action-set', *[f"{k}={v}" for k, v in flat_results.items()]) + self._run('action-set', *[f'{k}={v}' for k, v in flat_results.items()]) def action_log(self, message: str) -> None: self._run('action-log', message) @@ -3283,15 +3393,16 @@ def application_version_set(self, version: str) -> None: self._run('application-version-set', '--', version) @classmethod - def log_split(cls, message: str, max_len: int = MAX_LOG_LINE_LEN - ) -> Generator[str, None, None]: + def log_split( + cls, message: str, max_len: int = MAX_LOG_LINE_LEN + ) -> Generator[str, None, None]: """Helper to handle log messages that are potentially too long. This is a generator that splits a message string into multiple chunks if it is too long to safely pass to bash. Will only generate a single entry if the line is not too long. """ if len(message) > max_len: - yield f"Log string greater than {max_len}. Splitting into multiple chunks: " + yield f'Log string greater than {max_len}. Splitting into multiple chunks: ' while message: yield message[:max_len] @@ -3300,7 +3411,7 @@ def log_split(cls, message: str, max_len: int = MAX_LOG_LINE_LEN def juju_log(self, level: str, message: str) -> None: """Pass a log message on to the juju logger.""" for line in self.log_split(message): - self._run('juju-log', '--log-level', level, "--", line) + self._run('juju-log', '--log-level', level, '--', line) def network_get(self, binding_name: str, relation_id: Optional[int] = None) -> '_NetworkDict': """Return network info provided by network-get for a given binding. @@ -3320,8 +3431,9 @@ def network_get(self, binding_name: str, relation_id: Optional[int] = None) -> ' raise RelationNotFoundError() from e raise - def add_metrics(self, metrics: Mapping[str, Union[int, float]], - labels: Optional[Mapping[str, str]] = None) -> None: + def add_metrics( + self, metrics: Mapping[str, Union[int, float]], labels: Optional[Mapping[str, str]] = None + ) -> None: cmd: List[str] = ['add-metric'] if labels: label_args: List[str] = [] @@ -3362,15 +3474,19 @@ def planned_units(self) -> int: num_alive = sum(1 for unit in units.values() if unit['status'] != 'dying') return num_alive - def update_relation_data(self, relation_id: int, _entity: Union['Unit', 'Application'], - key: str, value: str): + def update_relation_data( + self, relation_id: int, _entity: Union['Unit', 'Application'], key: str, value: str + ): self.relation_set(relation_id, key, value, isinstance(_entity, Application)) - def secret_get(self, *, - id: Optional[str] = None, - label: Optional[str] = None, - refresh: bool = False, - peek: bool = False) -> Dict[str, str]: + def secret_get( + self, + *, + id: Optional[str] = None, + label: Optional[str] = None, + refresh: bool = False, + peek: bool = False, + ) -> Dict[str, str]: args: List[str] = [] if id is not None: args.append(id) @@ -3391,8 +3507,9 @@ def secret_get(self, *, raise return typing.cast(Dict[str, str], result) - def _run_for_secret(self, *args: str, return_output: bool = False, - use_json: bool = False) -> Union[str, Any, None]: + def _run_for_secret( + self, *args: str, return_output: bool = False, use_json: bool = False + ) -> Union[str, Any, None]: try: return self._run(*args, return_output=return_output, use_json=use_json) except ModelError as e: @@ -3400,9 +3517,9 @@ def _run_for_secret(self, *args: str, return_output: bool = False, raise SecretNotFoundError() from e raise - def secret_info_get(self, *, - id: Optional[str] = None, - label: Optional[str] = None) -> SecretInfo: + def secret_info_get( + self, *, id: Optional[str] = None, label: Optional[str] = None + ) -> SecretInfo: args: List[str] = [] if id is not None: args.append(id) @@ -3413,12 +3530,16 @@ def secret_info_get(self, *, id = list(info_dicts)[0] # Juju returns dict of {secret_id: {info}} return SecretInfo.from_dict(id, typing.cast(Dict[str, Any], info_dicts[id])) - def secret_set(self, id: str, *, - content: Optional[Dict[str, str]] = None, - label: Optional[str] = None, - description: Optional[str] = None, - expire: Optional[datetime.datetime] = None, - rotate: Optional[SecretRotate] = None): + def secret_set( + self, + id: str, + *, + content: Optional[Dict[str, str]] = None, + label: Optional[str] = None, + description: Optional[str] = None, + expire: Optional[datetime.datetime] = None, + rotate: Optional[SecretRotate] = None, + ): args = [id] if label is not None: args.extend(['--label', label]) @@ -3434,12 +3555,16 @@ def secret_set(self, id: str, *, args.append(f'{k}={v}') self._run_for_secret('secret-set', *args) - def secret_add(self, content: Dict[str, str], *, - label: Optional[str] = None, - description: Optional[str] = None, - expire: Optional[datetime.datetime] = None, - rotate: Optional[SecretRotate] = None, - owner: Optional[str] = None) -> str: + def secret_add( + self, + content: Dict[str, str], + *, + label: Optional[str] = None, + description: Optional[str] = None, + expire: Optional[datetime.datetime] = None, + rotate: Optional[SecretRotate] = None, + owner: Optional[str] = None, + ) -> str: args: List[str] = [] if label is not None: args.extend(['--label', label]) @@ -3516,13 +3641,13 @@ def _parse_opened_port(cls, port_str: str) -> Optional[Port]: def reboot(self, now: bool = False): if now: - self._run("juju-reboot", "--now") + self._run('juju-reboot', '--now') # Juju will kill the Charm process, and in testing no code after # this point would execute. However, we want to guarantee that for # Charmers, so we force that to be the case. sys.exit() else: - self._run("juju-reboot") + self._run('juju-reboot') def credential_get(self) -> 'CloudSpec': """Access cloud credentials by running the credential-get hook tool. @@ -3542,7 +3667,8 @@ class _ModelBackendValidator: def validate_metric_key(cls, key: str): if cls.METRIC_KEY_REGEX.match(key) is None: raise ModelError( - f'invalid metric key {key!r}: must match {cls.METRIC_KEY_REGEX.pattern}') + f'invalid metric key {key!r}: must match {cls.METRIC_KEY_REGEX.pattern}' + ) @classmethod def validate_metric_label(cls, label_name: str): @@ -3555,12 +3681,14 @@ def validate_metric_label(cls, label_name: str): @classmethod def format_metric_value(cls, value: Union[int, float]): if not isinstance(value, (int, float)): # pyright: ignore[reportUnnecessaryIsInstance] - raise ModelError(f'invalid metric value {value!r} provided:' - ' must be a positive finite float') + raise ModelError( + f'invalid metric value {value!r} provided:' ' must be a positive finite float' + ) if math.isnan(value) or math.isinf(value) or value < 0: - raise ModelError(f'invalid metric value {value!r} provided:' - ' must be a positive finite float') + raise ModelError( + f'invalid metric value {value!r} provided:' ' must be a positive finite float' + ) return str(value) @classmethod @@ -3568,12 +3696,10 @@ def validate_label_value(cls, label: str, value: str): # Label values cannot be empty, contain commas or equal signs as those are # used by add-metric as separators. if not value: - raise ModelError( - f'metric label {label} has an empty value, which is not allowed') + raise ModelError(f'metric label {label} has an empty value, which is not allowed') v = str(value) if re.search('[,=]', v) is not None: - raise ModelError( - f'metric label values must not contain "," or "=": {label}={value!r}') + raise ModelError(f'metric label values must not contain "," or "=": {label}={value!r}') class LazyNotice: diff --git a/ops/pebble.py b/ops/pebble.py index b450f0a8d..831c778a0 100644 --- a/ops/pebble.py +++ b/ops/pebble.py @@ -70,100 +70,121 @@ from ops._private import timeconv, yaml # Public as these are used in the Container.add_layer signature -ServiceDict = typing.TypedDict('ServiceDict', - {'summary': str, - 'description': str, - 'startup': str, - 'override': str, - 'command': str, - 'after': Sequence[str], - 'before': Sequence[str], - 'requires': Sequence[str], - 'environment': Dict[str, str], - 'user': str, - 'user-id': Optional[int], - 'group': str, - 'group-id': Optional[int], - 'working-dir': str, - 'on-success': str, - 'on-failure': str, - 'on-check-failure': Dict[str, Any], - 'backoff-delay': str, - 'backoff-factor': Optional[int], - 'backoff-limit': str, - 'kill-delay': Optional[str], - }, - total=False) - -HttpDict = typing.TypedDict('HttpDict', - {'url': str, - 'headers': Dict[str, str]}, - total=False) -TcpDict = typing.TypedDict('TcpDict', - {'port': int, - 'host': str}, - total=False) -ExecDict = typing.TypedDict('ExecDict', - {'command': str, - # see JujuVersion.supports_exec_service_context - 'service-context': str, - 'environment': Dict[str, str], - 'user-id': Optional[int], - 'user': str, - 'group-id': Optional[int], - 'group': str, - 'working-dir': str}, - total=False) - -CheckDict = typing.TypedDict('CheckDict', - {'override': str, - 'level': Union['CheckLevel', str], - 'period': Optional[str], - 'timeout': Optional[str], - 'http': Optional[HttpDict], - 'tcp': Optional[TcpDict], - 'exec': Optional[ExecDict], - 'threshold': Optional[int]}, - total=False) +ServiceDict = typing.TypedDict( + 'ServiceDict', + { + 'summary': str, + 'description': str, + 'startup': str, + 'override': str, + 'command': str, + 'after': Sequence[str], + 'before': Sequence[str], + 'requires': Sequence[str], + 'environment': Dict[str, str], + 'user': str, + 'user-id': Optional[int], + 'group': str, + 'group-id': Optional[int], + 'working-dir': str, + 'on-success': str, + 'on-failure': str, + 'on-check-failure': Dict[str, Any], + 'backoff-delay': str, + 'backoff-factor': Optional[int], + 'backoff-limit': str, + 'kill-delay': Optional[str], + }, + total=False, +) + +HttpDict = typing.TypedDict('HttpDict', {'url': str, 'headers': Dict[str, str]}, total=False) +TcpDict = typing.TypedDict('TcpDict', {'port': int, 'host': str}, total=False) +ExecDict = typing.TypedDict( + 'ExecDict', + { + 'command': str, + # see JujuVersion.supports_exec_service_context + 'service-context': str, + 'environment': Dict[str, str], + 'user-id': Optional[int], + 'user': str, + 'group-id': Optional[int], + 'group': str, + 'working-dir': str, + }, + total=False, +) + +CheckDict = typing.TypedDict( + 'CheckDict', + { + 'override': str, + 'level': Union['CheckLevel', str], + 'period': Optional[str], + 'timeout': Optional[str], + 'http': Optional[HttpDict], + 'tcp': Optional[TcpDict], + 'exec': Optional[ExecDict], + 'threshold': Optional[int], + }, + total=False, +) # In Python 3.11+ 'services' and 'labels' should be NotRequired, and total=True. -LogTargetDict = typing.TypedDict('LogTargetDict', - {'override': Union[Literal['merge'], Literal['replace']], - 'type': Literal['loki'], - 'location': str, - 'services': List[str], - 'labels': Dict[str, str]}, - total=False) - -LayerDict = typing.TypedDict('LayerDict', - {'summary': str, - 'description': str, - 'services': Dict[str, ServiceDict], - 'checks': Dict[str, CheckDict], - 'log-targets': Dict[str, LogTargetDict]}, - total=False) - -PlanDict = typing.TypedDict('PlanDict', - {'services': Dict[str, ServiceDict], - 'checks': Dict[str, CheckDict], - 'log-targets': Dict[str, LogTargetDict]}, - total=False) - -_AuthDict = TypedDict('_AuthDict', - {'permissions': Optional[str], - 'user-id': Optional[int], - 'user': Optional[str], - 'group-id': Optional[int], - 'group': Optional[str], - 'path': Optional[str], - 'make-dirs': Optional[bool], - 'make-parents': Optional[bool], - }, total=False) - -_ServiceInfoDict = TypedDict('_ServiceInfoDict', - {'startup': Union['ServiceStartup', str], - 'current': Union['ServiceStatus', str], - 'name': str}) +LogTargetDict = typing.TypedDict( + 'LogTargetDict', + { + 'override': Union[Literal['merge'], Literal['replace']], + 'type': Literal['loki'], + 'location': str, + 'services': List[str], + 'labels': Dict[str, str], + }, + total=False, +) + +LayerDict = typing.TypedDict( + 'LayerDict', + { + 'summary': str, + 'description': str, + 'services': Dict[str, ServiceDict], + 'checks': Dict[str, CheckDict], + 'log-targets': Dict[str, LogTargetDict], + }, + total=False, +) + +PlanDict = typing.TypedDict( + 'PlanDict', + { + 'services': Dict[str, ServiceDict], + 'checks': Dict[str, CheckDict], + 'log-targets': Dict[str, LogTargetDict], + }, + total=False, +) + +_AuthDict = TypedDict( + '_AuthDict', + { + 'permissions': Optional[str], + 'user-id': Optional[int], + 'user': Optional[str], + 'group-id': Optional[int], + 'group': Optional[str], + 'path': Optional[str], + 'make-dirs': Optional[bool], + 'make-parents': Optional[bool], + }, + total=False, +) + +_ServiceInfoDict = TypedDict( + '_ServiceInfoDict', + {'startup': Union['ServiceStartup', str], 'current': Union['ServiceStatus', str], 'name': str}, +) # Callback types for _MultiParser header and body handlers @@ -203,81 +224,96 @@ def __enter__(self) -> typing.IO[typing.AnyStr]: ... if TYPE_CHECKING: from typing_extensions import NotRequired - _CheckInfoDict = TypedDict('_CheckInfoDict', - {"name": str, - "level": NotRequired[Optional[Union['CheckLevel', str]]], - "status": Union['CheckStatus', str], - "failures": NotRequired[int], - "threshold": int, - "change-id": NotRequired[str]}) - _FileInfoDict = TypedDict('_FileInfoDict', - {"path": str, - "name": str, - "size": NotRequired[Optional[int]], - "permissions": str, - "last-modified": str, - "user-id": NotRequired[Optional[int]], - "user": NotRequired[Optional[str]], - "group-id": NotRequired[Optional[int]], - "group": NotRequired[Optional[str]], - "type": Union['FileType', str]}) - - _ProgressDict = TypedDict('_ProgressDict', - {'label': str, - 'done': int, - 'total': int}) - _TaskDict = TypedDict('_TaskDict', - {'id': str, - 'kind': str, - 'summary': str, - 'status': str, - 'log': NotRequired[Optional[List[str]]], - 'progress': _ProgressDict, - 'spawn-time': str, - 'ready-time': NotRequired[Optional[str]], - 'data': NotRequired[Optional[Dict[str, Any]]]}) - _ChangeDict = TypedDict('_ChangeDict', - {'id': str, - 'kind': str, - 'summary': str, - 'status': str, - 'ready': bool, - 'spawn-time': str, - 'tasks': NotRequired[Optional[List[_TaskDict]]], - 'err': NotRequired[Optional[str]], - 'ready-time': NotRequired[Optional[str]], - 'data': NotRequired[Optional[Dict[str, Any]]]}) - - _Error = TypedDict('_Error', - {'kind': str, - 'message': str}) - _Item = TypedDict('_Item', - {'path': str, - 'error': NotRequired[_Error]}) - _FilesResponse = TypedDict('_FilesResponse', - {'result': List[_Item]}) - - _WarningDict = TypedDict('_WarningDict', - {'message': str, - 'first-added': str, - 'last-added': str, - 'last-shown': NotRequired[Optional[str]], - 'expire-after': str, - 'repeat-after': str}) - - _NoticeDict = TypedDict('_NoticeDict', { - 'id': str, - 'user-id': NotRequired[Optional[int]], - 'type': str, - 'key': str, - 'first-occurred': str, - 'last-occurred': str, - 'last-repeated': str, - 'occurrences': int, - 'last-data': NotRequired[Optional[Dict[str, str]]], - 'repeat-after': NotRequired[str], - 'expire-after': NotRequired[str], - }) + _CheckInfoDict = TypedDict( + '_CheckInfoDict', + { + 'name': str, + 'level': NotRequired[Optional[Union['CheckLevel', str]]], + 'status': Union['CheckStatus', str], + 'failures': NotRequired[int], + 'threshold': int, + 'change-id': NotRequired[str], + }, + ) + _FileInfoDict = TypedDict( + '_FileInfoDict', + { + 'path': str, + 'name': str, + 'size': NotRequired[Optional[int]], + 'permissions': str, + 'last-modified': str, + 'user-id': NotRequired[Optional[int]], + 'user': NotRequired[Optional[str]], + 'group-id': NotRequired[Optional[int]], + 'group': NotRequired[Optional[str]], + 'type': Union['FileType', str], + }, + ) + + _ProgressDict = TypedDict('_ProgressDict', {'label': str, 'done': int, 'total': int}) + _TaskDict = TypedDict( + '_TaskDict', + { + 'id': str, + 'kind': str, + 'summary': str, + 'status': str, + 'log': NotRequired[Optional[List[str]]], + 'progress': _ProgressDict, + 'spawn-time': str, + 'ready-time': NotRequired[Optional[str]], + 'data': NotRequired[Optional[Dict[str, Any]]], + }, + ) + _ChangeDict = TypedDict( + '_ChangeDict', + { + 'id': str, + 'kind': str, + 'summary': str, + 'status': str, + 'ready': bool, + 'spawn-time': str, + 'tasks': NotRequired[Optional[List[_TaskDict]]], + 'err': NotRequired[Optional[str]], + 'ready-time': NotRequired[Optional[str]], + 'data': NotRequired[Optional[Dict[str, Any]]], + }, + ) + + _Error = TypedDict('_Error', {'kind': str, 'message': str}) + _Item = TypedDict('_Item', {'path': str, 'error': NotRequired[_Error]}) + _FilesResponse = TypedDict('_FilesResponse', {'result': List[_Item]}) + + _WarningDict = TypedDict( + '_WarningDict', + { + 'message': str, + 'first-added': str, + 'last-added': str, + 'last-shown': NotRequired[Optional[str]], + 'expire-after': str, + 'repeat-after': str, + }, + ) + + _NoticeDict = TypedDict( + '_NoticeDict', + { + 'id': str, + 'user-id': NotRequired[Optional[int]], + 'type': str, + 'key': str, + 'first-occurred': str, + 'last-occurred': str, + 'last-repeated': str, + 'occurrences': int, + 'last-data': NotRequired[Optional[Dict[str, str]]], + 'repeat-after': NotRequired[str], + 'expire-after': NotRequired[str], + }, + ) class _WebSocket(Protocol): @@ -305,8 +341,9 @@ class _NotProvidedFlag: class _UnixSocketConnection(http.client.HTTPConnection): """Implementation of HTTPConnection that connects to a named Unix socket.""" - def __init__(self, host: str, socket_path: str, - timeout: Union[_NotProvidedFlag, float] = _not_provided): + def __init__( + self, host: str, socket_path: str, timeout: Union[_NotProvidedFlag, float] = _not_provided + ): if timeout is _not_provided: super().__init__(host) else: @@ -333,8 +370,11 @@ def __init__(self, socket_path: str): def http_open(self, req: urllib.request.Request): """Override http_open to use a Unix socket connection (instead of TCP).""" - return self.do_open(_UnixSocketConnection, req, # type:ignore - socket_path=self.socket_path) + return self.do_open( + _UnixSocketConnection, # type:ignore + req, + socket_path=self.socket_path, + ) def _format_timeout(timeout: float) -> str: @@ -375,7 +415,7 @@ class ProtocolError(Error): class PathError(Error): """Raised when there's an error with a specific path.""" - kind: typing.Literal["not-found", "permission-denied", "generic-file-error"] + kind: typing.Literal['not-found', 'permission-denied', 'generic-file-error'] """Short string representing the kind of error.""" message: str @@ -501,7 +541,7 @@ def __str__(self): if out is None: continue truncated = ' [truncated]' if len(out) > self.STR_MAX_OUTPUT else '' - out = out[:self.STR_MAX_OUTPUT] + out = out[: self.STR_MAX_OUTPUT] message = f'{message}, {name}={out!r}{truncated}' return message @@ -563,21 +603,25 @@ def from_dict(cls, d: '_WarningDict') -> 'Warning': message=d['message'], first_added=timeconv.parse_rfc3339(d['first-added']), last_added=timeconv.parse_rfc3339(d['last-added']), - last_shown=(timeconv.parse_rfc3339(d['last-shown']) # type: ignore - if d.get('last-shown') else None), + last_shown=( + timeconv.parse_rfc3339(d['last-shown']) # type: ignore + if d.get('last-shown') + else None + ), expire_after=d['expire-after'], repeat_after=d['repeat-after'], ) def __repr__(self): - return ('Warning(' - f'message={self.message!r}, ' - f'first_added={self.first_added!r}, ' - f'last_added={self.last_added!r}, ' - f'last_shown={self.last_shown!r}, ' - f'expire_after={self.expire_after!r}, ' - f'repeat_after={self.repeat_after!r})' - ) + return ( + 'Warning(' + f'message={self.message!r}, ' + f'first_added={self.first_added!r}, ' + f'last_added={self.last_added!r}, ' + f'last_shown={self.last_shown!r}, ' + f'expire_after={self.expire_after!r}, ' + f'repeat_after={self.repeat_after!r})' + ) class TaskProgress: @@ -603,11 +647,12 @@ def from_dict(cls, d: '_ProgressDict') -> 'TaskProgress': ) def __repr__(self): - return ('TaskProgress(' - f'label={self.label!r}, ' - f'done={self.done!r}, ' - f'total={self.total!r})' - ) + return ( + 'TaskProgress(' + f'label={self.label!r}, ' + f'done={self.done!r}, ' + f'total={self.total!r})' + ) class TaskID(str): @@ -653,23 +698,27 @@ def from_dict(cls, d: '_TaskDict') -> 'Task': log=d.get('log') or [], progress=TaskProgress.from_dict(d['progress']), spawn_time=timeconv.parse_rfc3339(d['spawn-time']), - ready_time=(timeconv.parse_rfc3339(d['ready-time']) # type: ignore - if d.get('ready-time') else None), + ready_time=( + timeconv.parse_rfc3339(d['ready-time']) # type: ignore + if d.get('ready-time') + else None + ), data=d.get('data') or {}, ) def __repr__(self): - return ('Task(' - f'id={self.id!r}, ' - f'kind={self.kind!r}, ' - f'summary={self.summary!r}, ' - f'status={self.status!r}, ' - f'log={self.log!r}, ' - f'progress={self.progress!r}, ' - f'spawn_time={self.spawn_time!r}, ' - f'ready_time={self.ready_time!r}, ' - f'data={self.data!r})' - ) + return ( + 'Task(' + f'id={self.id!r}, ' + f'kind={self.kind!r}, ' + f'summary={self.summary!r}, ' + f'status={self.status!r}, ' + f'log={self.log!r}, ' + f'progress={self.progress!r}, ' + f'spawn_time={self.spawn_time!r}, ' + f'ready_time={self.ready_time!r}, ' + f'data={self.data!r})' + ) class ChangeID(str): @@ -718,24 +767,28 @@ def from_dict(cls, d: '_ChangeDict') -> 'Change': ready=d['ready'], err=d.get('err'), spawn_time=timeconv.parse_rfc3339(d['spawn-time']), - ready_time=(timeconv.parse_rfc3339(d['ready-time']) # type: ignore - if d.get('ready-time') else None), + ready_time=( + timeconv.parse_rfc3339(d['ready-time']) # type: ignore + if d.get('ready-time') + else None + ), data=d.get('data') or {}, ) def __repr__(self): - return ('Change(' - f'id={self.id!r}, ' - f'kind={self.kind!r}, ' - f'summary={self.summary!r}, ' - f'status={self.status!r}, ' - f'tasks={self.tasks!r}, ' - f'ready={self.ready!r}, ' - f'err={self.err!r}, ' - f'spawn_time={self.spawn_time!r}, ' - f'ready_time={self.ready_time!r}, ' - f'data={self.data!r})' - ) + return ( + 'Change(' + f'id={self.id!r}, ' + f'kind={self.kind!r}, ' + f'summary={self.summary!r}, ' + f'status={self.status!r}, ' + f'tasks={self.tasks!r}, ' + f'ready={self.ready!r}, ' + f'err={self.err!r}, ' + f'spawn_time={self.spawn_time!r}, ' + f'ready_time={self.ready_time!r}, ' + f'data={self.data!r})' + ) class Plan: @@ -753,13 +806,15 @@ def __init__(self, raw: Optional[Union[str, 'PlanDict']] = None): d = typing.cast('PlanDict', d) self._raw = raw - self._services: Dict[str, Service] = {name: Service(name, service) - for name, service in d.get('services', {}).items()} - self._checks: Dict[str, Check] = {name: Check(name, check) - for name, check in d.get('checks', {}).items()} + self._services: Dict[str, Service] = { + name: Service(name, service) for name, service in d.get('services', {}).items() + } + self._checks: Dict[str, Check] = { + name: Check(name, check) for name, check in d.get('checks', {}).items() + } self._log_targets: Dict[str, LogTarget] = { - name: LogTarget(name, target) - for name, target in d.get('log-targets', {}).items()} + name: LogTarget(name, target) for name, target in d.get('log-targets', {}).items() + } @property def services(self) -> Dict[str, 'Service']: @@ -790,7 +845,10 @@ def to_dict(self) -> 'PlanDict': fields = [ ('services', {name: service.to_dict() for name, service in self._services.items()}), ('checks', {name: check.to_dict() for name, check in self._checks.items()}), - ('log-targets', {name: target.to_dict() for name, target in self._log_targets.items()}) + ( + 'log-targets', + {name: target.to_dict() for name, target in self._log_targets.items()}, + ), ] dct = {name: value for name, value in fields if value} return typing.cast('PlanDict', dct) @@ -836,12 +894,13 @@ def __init__(self, raw: Optional[Union[str, 'LayerDict']] = None): self.summary = d.get('summary', '') self.description = d.get('description', '') - self.services = {name: Service(name, service) - for name, service in d.get('services', {}).items()} - self.checks = {name: Check(name, check) - for name, check in d.get('checks', {}).items()} - self.log_targets = {name: LogTarget(name, target) - for name, target in d.get('log-targets', {}).items()} + self.services = { + name: Service(name, service) for name, service in d.get('services', {}).items() + } + self.checks = {name: Check(name, check) for name, check in d.get('checks', {}).items()} + self.log_targets = { + name: LogTarget(name, target) for name, target in d.get('log-targets', {}).items() + } def to_yaml(self) -> str: """Convert this layer to its YAML representation.""" @@ -854,7 +913,7 @@ def to_dict(self) -> 'LayerDict': ('description', self.description), ('services', {name: service.to_dict() for name, service in self.services.items()}), ('checks', {name: check.to_dict() for name, check in self.checks.items()}), - ('log-targets', {name: target.to_dict() for name, target in self.log_targets.items()}) + ('log-targets', {name: target.to_dict() for name, target in self.log_targets.items()}), ] dct = {name: value for name, value in fields if value} return typing.cast('LayerDict', dct) @@ -1009,11 +1068,12 @@ def from_dict(cls, d: '_ServiceInfoDict') -> 'ServiceInfo': ) def __repr__(self): - return ('ServiceInfo(' - f'name={self.name!r}, ' - f'startup={self.startup}, ' - f'current={self.current})' - ) + return ( + 'ServiceInfo(' + f'name={self.name!r}, ' + f'startup={self.startup}, ' + f'current={self.current})' + ) class Check: @@ -1220,18 +1280,19 @@ def from_dict(cls, d: '_FileInfoDict') -> 'FileInfo': ) def __repr__(self): - return ('FileInfo(' - f'path={self.path!r}, ' - f'name={self.name!r}, ' - f'type={self.type}, ' - f'size={self.size}, ' - f'permissions=0o{self.permissions:o}, ' - f'last_modified={self.last_modified!r}, ' - f'user_id={self.user_id}, ' - f'user={self.user!r}, ' - f'group_id={self.group_id}, ' - f'group={self.group!r})' - ) + return ( + 'FileInfo(' + f'path={self.path!r}, ' + f'name={self.name!r}, ' + f'type={self.type}, ' + f'size={self.size}, ' + f'permissions=0o{self.permissions:o}, ' + f'last_modified={self.last_modified!r}, ' + f'user_id={self.user_id}, ' + f'user={self.user!r}, ' + f'group_id={self.group_id}, ' + f'group={self.group!r})' + ) class CheckInfo: @@ -1313,14 +1374,15 @@ def from_dict(cls, d: '_CheckInfoDict') -> 'CheckInfo': ) def __repr__(self): - return ('CheckInfo(' - f'name={self.name!r}, ' - f'level={self.level}, ' - f'status={self.status}, ' - f'failures={self.failures}, ' - f'threshold={self.threshold!r}, ' - f'change_id={self.change_id!r})' - ) + return ( + 'CheckInfo(' + f'name={self.name!r}, ' + f'level={self.level}, ' + f'status={self.status}, ' + f'failures={self.failures}, ' + f'threshold={self.threshold!r}, ' + f'change_id={self.change_id!r})' + ) class NoticeType(enum.Enum): @@ -1401,9 +1463,11 @@ def from_dict(cls, d: '_NoticeDict') -> 'Notice': occurrences=d['occurrences'], last_data=d.get('last-data') or {}, repeat_after=timeconv.parse_duration(d['repeat-after']) - if 'repeat-after' in d else None, + if 'repeat-after' in d + else None, expire_after=timeconv.parse_duration(d['expire-after']) - if 'expire-after' in d else None, + if 'expire-after' in d + else None, ) @@ -1546,7 +1610,7 @@ def wait_output(self) -> Tuple[AnyStr, Optional[AnyStr]]: if self.stdout is None: raise TypeError( "can't use wait_output() when exec was called with the stdout argument; " - "use wait() instead" + 'use wait() instead' ) if self._encoding is not None: @@ -1600,11 +1664,13 @@ def _has_fileno(f: Any) -> bool: return False -def _reader_to_websocket(reader: '_WebsocketReader', - ws: '_WebSocket', - encoding: str, - cancel_reader: Optional[int] = None, - bufsize: int = 16 * 1024): +def _reader_to_websocket( + reader: '_WebsocketReader', + ws: '_WebSocket', + encoding: str, + cancel_reader: Optional[int] = None, + bufsize: int = 16 * 1024, +): """Read reader through to EOF and send each chunk read to the websocket.""" while True: if cancel_reader is not None: @@ -1623,8 +1689,7 @@ def _reader_to_websocket(reader: '_WebsocketReader', ws.send('{"command":"end"}') # type: ignore # Send "end" command as TEXT frame to signal EOF -def _websocket_to_writer(ws: '_WebSocket', writer: '_WebsocketWriter', - encoding: Optional[str]): +def _websocket_to_writer(ws: '_WebSocket', writer: '_WebsocketWriter', encoding: Optional[str]): """Receive messages from websocket (until end signal) and write to writer.""" while True: chunk = ws.recv() @@ -1744,10 +1809,13 @@ class Client: _chunk_size = 8192 - def __init__(self, socket_path: str, - opener: Optional[urllib.request.OpenerDirector] = None, - base_url: str = 'http://localhost', - timeout: float = 5.0): + def __init__( + self, + socket_path: str, + opener: Optional[urllib.request.OpenerDirector] = None, + base_url: str = 'http://localhost', + timeout: float = 5.0, + ): if not isinstance(socket_path, str): raise TypeError(f'`socket_path` should be a string, not: {type(socket_path)}') if opener is None: @@ -1768,12 +1836,13 @@ def _get_default_opener(cls, socket_path: str) -> urllib.request.OpenerDirector: return opener # we need to cast the return type depending on the request params - def _request(self, - method: str, - path: str, - query: Optional[Dict[str, Any]] = None, - body: Optional[Dict[str, Any]] = None - ) -> Dict[str, Any]: + def _request( + self, + method: str, + path: str, + query: Optional[Dict[str, Any]] = None, + body: Optional[Dict[str, Any]] = None, + ) -> Dict[str, Any]: """Make a JSON request to the Pebble server with the given HTTP method and path. If query dict is provided, it is encoded and appended as a query string @@ -1793,8 +1862,10 @@ def _request(self, return raw_resp @staticmethod - def _ensure_content_type(headers: email.message.Message, - expected: 'Literal["multipart/form-data", "application/json"]'): + def _ensure_content_type( + headers: email.message.Message, + expected: 'Literal["multipart/form-data", "application/json"]', + ): """Parse Content-Type header from headers and ensure it's equal to expected. Return a dict of any options in the header, e.g., {'boundary': ...}. @@ -1807,7 +1878,9 @@ def _ensure_content_type(headers: email.message.Message, return options def _request_raw( - self, method: str, path: str, + self, + method: str, + path: str, query: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, Any]] = None, data: Optional[Union[bytes, Generator[bytes, Any, Any]]] = None, @@ -1815,7 +1888,7 @@ def _request_raw( """Make a request to the Pebble server; return the raw HTTPResponse object.""" url = self.base_url + path if query: - url = f"{url}?{urllib.parse.urlencode(query, doseq=True)}" + url = f'{url}?{urllib.parse.urlencode(query, doseq=True)}' if headers is None: headers = {} @@ -1837,8 +1910,9 @@ def _request_raw( except urllib.error.URLError as e: if e.args and isinstance(e.args[0], FileNotFoundError): raise ConnectionError( - f"Could not connect to Pebble: socket not found at {self.socket_path!r} " - "(container restarted?)") from None + f'Could not connect to Pebble: socket not found at {self.socket_path!r} ' + '(container restarted?)' + ) from None raise ConnectionError(e.reason) from e return response @@ -1861,7 +1935,9 @@ def ack_warnings(self, timestamp: datetime.datetime) -> int: return resp['result'] def get_changes( - self, select: ChangeState = ChangeState.IN_PROGRESS, service: Optional[str] = None, + self, + select: ChangeState = ChangeState.IN_PROGRESS, + service: Optional[str] = None, ) -> List[Change]: """Get list of changes in given state, filter by service name if given.""" query: Dict[str, Union[str, int]] = {'select': select.value} @@ -1917,7 +1993,10 @@ def replan_services(self, timeout: float = 30.0, delay: float = 0.1) -> ChangeID return self._services_action('replan', [], timeout, delay) def start_services( - self, services: Iterable[str], timeout: float = 30.0, delay: float = 0.1, + self, + services: Iterable[str], + timeout: float = 30.0, + delay: float = 0.1, ) -> ChangeID: """Start services by name and wait (poll) for them to be started. @@ -1938,7 +2017,10 @@ def start_services( return self._services_action('start', services, timeout, delay) def stop_services( - self, services: Iterable[str], timeout: float = 30.0, delay: float = 0.1, + self, + services: Iterable[str], + timeout: float = 30.0, + delay: float = 0.1, ) -> ChangeID: """Stop services by name and wait (poll) for them to be started. @@ -1959,7 +2041,10 @@ def stop_services( return self._services_action('stop', services, timeout, delay) def restart_services( - self, services: Iterable[str], timeout: float = 30.0, delay: float = 0.1, + self, + services: Iterable[str], + timeout: float = 30.0, + delay: float = 0.1, ) -> ChangeID: """Restart services by name and wait (poll) for them to be started. @@ -1980,12 +2065,16 @@ def restart_services( return self._services_action('restart', services, timeout, delay) def _services_action( - self, action: str, services: Iterable[str], timeout: Optional[float], - delay: float, + self, + action: str, + services: Iterable[str], + timeout: Optional[float], + delay: float, ) -> ChangeID: if isinstance(services, (str, bytes)) or not hasattr(services, '__iter__'): raise TypeError( - f'services must be of type Iterable[str], not {type(services).__name__}') + f'services must be of type Iterable[str], not {type(services).__name__}' + ) services = list(services) for s in services: @@ -2002,7 +2091,8 @@ def _services_action( return change_id def wait_change( - self, change_id: ChangeID, + self, + change_id: ChangeID, timeout: Optional[float] = 30.0, delay: float = 0.1, ) -> Change: @@ -2066,16 +2156,19 @@ def _wait_change(self, change_id: ChangeID, timeout: Optional[float] = None) -> except APIError as e: if e.code == 404: raise NotImplementedError( - 'server does not implement wait-change endpoint') from None + 'server does not implement wait-change endpoint' + ) from None if e.code == 504: raise TimeoutError( - f'timed out waiting for change {change_id} ({timeout} seconds)') from None + f'timed out waiting for change {change_id} ({timeout} seconds)' + ) from None raise return Change.from_dict(resp['result']) - def _wait_change_using_polling(self, change_id: ChangeID, timeout: Optional[float], - delay: float): + def _wait_change_using_polling( + self, change_id: ChangeID, timeout: Optional[float], delay: float + ): """Wait for a change to be ready by polling the get-change API.""" deadline = time.time() + timeout if timeout is not None else 0 @@ -2089,8 +2182,8 @@ def _wait_change_using_polling(self, change_id: ChangeID, timeout: Optional[floa raise TimeoutError(f'timed out waiting for change {change_id} ({timeout} seconds)') def add_layer( - self, label: str, layer: Union[str, 'LayerDict', Layer], *, - combine: bool = False): + self, label: str, layer: Union[str, 'LayerDict', Layer], *, combine: bool = False + ): """Dynamically add a new layer onto the Pebble configuration layers. If combine is False (the default), append the new layer as the top @@ -2109,7 +2202,8 @@ def add_layer( layer_yaml = layer.to_yaml() else: raise TypeError( - f'layer must be str, dict, or pebble.Layer, not {type(layer).__name__}') + f'layer must be str, dict, or pebble.Layer, not {type(layer).__name__}' + ) body = { 'action': 'add', @@ -2138,17 +2232,12 @@ def get_services(self, names: Optional[Iterable[str]] = None) -> List[ServiceInf return [ServiceInfo.from_dict(info) for info in resp['result']] @typing.overload - def pull(self, path: str, *, encoding: None) -> BinaryIO: - ... + def pull(self, path: str, *, encoding: None) -> BinaryIO: ... @typing.overload - def pull(self, path: str, *, encoding: str = 'utf-8') -> TextIO: - ... + def pull(self, path: str, *, encoding: str = 'utf-8') -> TextIO: ... - def pull(self, - path: str, - *, - encoding: Optional[str] = 'utf-8') -> Union[BinaryIO, TextIO]: + def pull(self, path: str, *, encoding: Optional[str] = 'utf-8') -> Union[BinaryIO, TextIO]: """Read a file's content from the remote system. Args: @@ -2216,13 +2305,18 @@ def _raise_on_path_error(resp: '_FilesResponse', path: str): raise PathError(error['kind'], error['message']) def push( - self, path: str, source: '_IOSource', *, - encoding: str = 'utf-8', make_dirs: bool = False, - permissions: Optional[int] = None, - user_id: Optional[int] = None, - user: Optional[str] = None, - group_id: Optional[int] = None, - group: Optional[str] = None): + self, + path: str, + source: '_IOSource', + *, + encoding: str = 'utf-8', + make_dirs: bool = False, + permissions: Optional[int] = None, + user_id: Optional[int] = None, + user: Optional[str] = None, + group_id: Optional[int] = None, + group: Optional[str] = None, + ): """Write content to a given file path on the remote system. Args: @@ -2268,11 +2362,13 @@ def push( self._raise_on_path_error(typing.cast('_FilesResponse', resp), path) @staticmethod - def _make_auth_dict(permissions: Optional[int], - user_id: Optional[int], - user: Optional[str], - group_id: Optional[int], - group: Optional[str]) -> '_AuthDict': + def _make_auth_dict( + permissions: Optional[int], + user_id: Optional[int], + user: Optional[str], + group_id: Optional[int], + group: Optional[str], + ) -> '_AuthDict': d: _AuthDict = {} if permissions is not None: d['permissions'] = format(permissions, '03o') @@ -2286,8 +2382,9 @@ def _make_auth_dict(permissions: Optional[int], d['group'] = group return d - def _encode_multipart(self, metadata: Dict[str, Any], path: str, - source: '_IOSource', encoding: str): + def _encode_multipart( + self, metadata: Dict[str, Any], path: str, source: '_IOSource', encoding: str + ): # Python's stdlib mime/multipart handling is screwy and doesn't handle # binary properly, so roll our own. if isinstance(source, str): @@ -2302,15 +2399,21 @@ def _encode_multipart(self, metadata: Dict[str, Any], path: str, def generator() -> Generator[bytes, None, None]: yield b''.join([ - b'--', boundary, b'\r\n', + b'--', + boundary, + b'\r\n', b'Content-Type: application/json\r\n', b'Content-Disposition: form-data; name="request"\r\n', b'\r\n', - json.dumps(metadata).encode('utf-8'), b'\r\n', - b'--', boundary, b'\r\n', + json.dumps(metadata).encode('utf-8'), + b'\r\n', + b'--', + boundary, + b'\r\n', b'Content-Type: application/octet-stream\r\n', b'Content-Disposition: form-data; name="files"; filename="', - path_escaped, b'"\r\n', + path_escaped, + b'"\r\n', b'\r\n', ]) @@ -2323,13 +2426,16 @@ def generator() -> Generator[bytes, None, None]: yield b''.join([ b'\r\n', - b'--', boundary, b'--\r\n', + b'--', + boundary, + b'--\r\n', ]) return generator(), content_type - def list_files(self, path: str, *, pattern: Optional[str] = None, - itself: bool = False) -> List[FileInfo]: + def list_files( + self, path: str, *, pattern: Optional[str] = None, itself: bool = False + ) -> List[FileInfo]: """Return list of directory entries from given path on remote system. Despite the name, this method returns a list of files *and* @@ -2360,12 +2466,16 @@ def list_files(self, path: str, *, pattern: Optional[str] = None, return [FileInfo.from_dict(d) for d in result] def make_dir( - self, path: str, *, make_parents: bool = False, - permissions: Optional[int] = None, - user_id: Optional[int] = None, - user: Optional[str] = None, - group_id: Optional[int] = None, - group: Optional[str] = None): + self, + path: str, + *, + make_parents: bool = False, + permissions: Optional[int] = None, + user_id: Optional[int] = None, + user: Optional[str] = None, + group_id: Optional[int] = None, + group: Optional[str] = None, + ): """Create a directory on the remote system with the given attributes. Args: @@ -2437,9 +2547,8 @@ def exec( stdout: Optional[TextIO] = None, stderr: Optional[TextIO] = None, encoding: str = 'utf-8', - combine_stderr: bool = False - ) -> ExecProcess[str]: - ... + combine_stderr: bool = False, + ) -> ExecProcess[str]: ... # Exec I/O is bytes if encoding is explicitly set to None @typing.overload @@ -2459,9 +2568,8 @@ def exec( stdout: Optional[BinaryIO] = None, stderr: Optional[BinaryIO] = None, encoding: None = None, - combine_stderr: bool = False - ) -> ExecProcess[bytes]: - ... + combine_stderr: bool = False, + ) -> ExecProcess[bytes]: ... def exec( self, @@ -2479,7 +2587,7 @@ def exec( stdout: Optional[Union[TextIO, BinaryIO]] = None, stderr: Optional[Union[TextIO, BinaryIO]] = None, encoding: Optional[str] = 'utf-8', - combine_stderr: bool = False + combine_stderr: bool = False, ) -> ExecProcess[Any]: r"""Execute the given command on the remote system. @@ -2641,7 +2749,7 @@ def exec( change_id = resp['change'] task_id = resp['result']['task-id'] - stderr_ws: Optional['_WebSocket'] = None + stderr_ws: Optional[_WebSocket] = None try: control_ws = self._connect_websocket(task_id, 'control') stdio_ws = self._connect_websocket(task_id, 'stdio') @@ -2669,6 +2777,7 @@ def exec( def _cancel_stdin(): os.write(cancel_writer, b'x') # doesn't matter what we write os.close(cancel_writer) + cancel_stdin = _cancel_stdin t = _start_thread(_reader_to_websocket, stdin, stdio_ws, encoding, cancel_reader) @@ -2677,8 +2786,7 @@ def _cancel_stdin(): else: process_stdin = _WebsocketWriter(stdio_ws) if encoding is not None: - process_stdin = io.TextIOWrapper( - process_stdin, encoding=encoding, newline='') # type: ignore + process_stdin = io.TextIOWrapper(process_stdin, encoding=encoding, newline='') # type: ignore if stdout is not None: t = _start_thread(_websocket_to_writer, stdio_ws, stdout, encoding) @@ -2687,8 +2795,7 @@ def _cancel_stdin(): else: process_stdout = _WebsocketReader(stdio_ws) if encoding is not None: - process_stdout = io.TextIOWrapper( - process_stdout, encoding=encoding, newline='') # type: ignore + process_stdout = io.TextIOWrapper(process_stdout, encoding=encoding, newline='') # type: ignore process_stderr = None if not combine_stderr: @@ -2700,7 +2807,10 @@ def _cancel_stdin(): process_stderr = _WebsocketReader(ws) if encoding is not None: process_stderr = io.TextIOWrapper( - process_stderr, encoding=encoding, newline='') # type: ignore + process_stderr, # type: ignore + encoding=encoding, + newline='', + ) process: ExecProcess[Any] = ExecProcess( stdin=process_stdin, # type: ignore @@ -2724,7 +2834,7 @@ def _connect_websocket(self, task_id: str, websocket_id: str) -> '_WebSocket': sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(self.socket_path) url = self._websocket_url(task_id, websocket_id) - ws: '_WebSocket' = websocket.WebSocket(skip_utf8_validation=True) # type: ignore + ws: _WebSocket = websocket.WebSocket(skip_utf8_validation=True) # type: ignore ws.connect(url, socket=sock) return ws @@ -2746,8 +2856,9 @@ def send_signal(self, sig: Union[int, str], services: Iterable[str]): currently running. """ if isinstance(services, (str, bytes)) or not hasattr(services, '__iter__'): - raise TypeError('services must be of type Iterable[str], ' - f'not {type(services).__name__}') + raise TypeError( + f'services must be of type Iterable[str], not {type(services).__name__}' + ) for s in services: if not isinstance(s, str): raise TypeError(f'service names must be str, not {type(s).__name__}') @@ -2761,9 +2872,7 @@ def send_signal(self, sig: Union[int, str], services: Iterable[str]): self._request('POST', '/v1/signals', body=body) def get_checks( - self, - level: Optional[CheckLevel] = None, - names: Optional[Iterable[str]] = None + self, level: Optional[CheckLevel] = None, names: Optional[Iterable[str]] = None ) -> List[CheckInfo]: """Get the check status for the configured checks. @@ -2784,9 +2893,14 @@ def get_checks( resp = self._request('GET', '/v1/checks', query) return [CheckInfo.from_dict(info) for info in resp['result']] - def notify(self, type: NoticeType, key: str, *, - data: Optional[Dict[str, str]] = None, - repeat_after: Optional[datetime.timedelta] = None) -> str: + def notify( + self, + type: NoticeType, + key: str, + *, + data: Optional[Dict[str, str]] = None, + repeat_after: Optional[datetime.timedelta] = None, + ) -> str: """Record an occurrence of a notice with the specified options. Args: @@ -2869,7 +2983,7 @@ class _FilesParser: def __init__(self, boundary: Union[bytes, str]): self._response: Optional[_FilesResponse] = None # externally managed - self._part_type: Optional[Literal["response", "files"]] = None # externally managed + self._part_type: Optional[Literal['response', 'files']] = None # externally managed self._headers: Optional[email.message.Message] = None # externally managed self._files: Dict[str, _Tempfile] = {} @@ -2883,10 +2997,8 @@ def __init__(self, boundary: Union[bytes, str]): self._max_lookahead = 8 * 1024 * 1024 self._parser = _MultipartParser( - boundary, - self._process_header, - self._process_body, - max_lookahead=self._max_lookahead) + boundary, self._process_header, self._process_body, max_lookahead=self._max_lookahead + ) # RFC 2046 says that the boundary string needs to be preceded by a CRLF. # Unfortunately, the request library's header parsing logic strips off one of @@ -2900,8 +3012,7 @@ def _process_header(self, data: bytes): content_disposition = self._headers.get_content_disposition() if content_disposition != 'form-data': - raise ProtocolError( - f'unexpected content disposition: {content_disposition!r}') + raise ProtocolError(f'unexpected content disposition: {content_disposition!r}') name = self._headers.get_param('name', header='content-disposition') if name == 'files': @@ -2910,8 +3021,7 @@ def _process_header(self, data: bytes): raise ProtocolError('multipart "files" part missing filename') self._prepare_tempfile(filename) elif name != 'response': - raise ProtocolError( - f'unexpected name in content-disposition header: {name!r}') + raise ProtocolError(f'unexpected name in content-disposition header: {name!r}') self._part_type = typing.cast('Literal["response", "files"]', name) @@ -2968,20 +3078,25 @@ def get_file(self, path: str, encoding: Optional[str]) -> '_TextOrBinaryIO': # We're using text-based file I/O purely for file encoding purposes, not for # newline normalization. newline='' serves the line endings as-is. newline = '' if encoding else None - file_io = open(self._files[path].name, mode, # noqa: SIM115 - encoding=encoding, newline=newline) + file_io = open( # noqa: SIM115 + self._files[path].name, + mode, + encoding=encoding, + newline=newline, + ) # open() returns IO[Any] return typing.cast('_TextOrBinaryIO', file_io) class _MultipartParser: def __init__( - self, - marker: bytes, - handle_header: '_HeaderHandler', - handle_body: '_BodyHandler', - max_lookahead: int = 0, - max_boundary_length: int = 0): + self, + marker: bytes, + handle_header: '_HeaderHandler', + handle_body: '_BodyHandler', + max_lookahead: int = 0, + max_boundary_length: int = 0, + ): r"""Configures a parser for mime multipart messages. Args: @@ -3054,14 +3169,14 @@ def feed(self, data: bytes): safe_bound = max(0, len(self._buf) - self._max_boundary_length) if ii != -1: # part body is finished - self._handle_body(self._buf[self._pos:ii], done=True) + self._handle_body(self._buf[self._pos : ii], done=True) self._buf = self._buf[ii:] self._pos = 0 if self._done: return # terminal boundary reached elif safe_bound > self._pos: # write partial body data - data = self._buf[self._pos:safe_bound] + data = self._buf[self._pos : safe_bound] self._pos = safe_bound self._handle_body(data) return # waiting for more data @@ -3069,8 +3184,7 @@ def feed(self, data: bytes): return # waiting for more data -def _next_part_boundary(buf: bytes, marker: bytes, start: int = 0 - ) -> Tuple[int, int, bool]: +def _next_part_boundary(buf: bytes, marker: bytes, start: int = 0) -> Tuple[int, int, bool]: """Returns the index of the next boundary marker in buf beginning at start. Returns: diff --git a/ops/storage.py b/ops/storage.py index 2714749ba..ccbebd007 100644 --- a/ops/storage.py +++ b/ops/storage.py @@ -13,6 +13,7 @@ # limitations under the License. """Structures to offer storage to the charm (through Juju or locally).""" + import logging import os import pickle @@ -58,13 +59,13 @@ def __init__(self, filename: Union['Path', str]): if not os.path.exists(str(filename)): # sqlite3.connect creates the file silently if it does not exist - logger.debug("Initializing SQLite local storage: %s.", filename) + logger.debug('Initializing SQLite local storage: %s.', filename) - if filename != ":memory:": + if filename != ':memory:': self._ensure_db_permissions(str(filename)) - self._db = sqlite3.connect(str(filename), - isolation_level=None, - timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._db = sqlite3.connect( + str(filename), isolation_level=None, timeout=self.DB_LOCK_TIMEOUT.total_seconds() + ) self._setup() def _ensure_db_permissions(self, filename: str): @@ -74,33 +75,33 @@ def _ensure_db_permissions(self, filename: str): try: os.chmod(filename, mode) except OSError as e: - raise RuntimeError(f"Unable to adjust access permission of {filename!r}") from e + raise RuntimeError(f'Unable to adjust access permission of {filename!r}') from e return try: fd = os.open(filename, os.O_CREAT | os.O_EXCL, mode=mode) except OSError as e: - raise RuntimeError(f"Unable to adjust access permission of {filename!r}") from e + raise RuntimeError(f'Unable to adjust access permission of {filename!r}') from e os.close(fd) def _setup(self): """Make the database ready to be used as storage.""" # Make sure that the database is locked until the connection is closed, # not until the transaction ends. - self._db.execute("PRAGMA locking_mode=EXCLUSIVE") - c = self._db.execute("BEGIN") + self._db.execute('PRAGMA locking_mode=EXCLUSIVE') + c = self._db.execute('BEGIN') c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") if c.fetchone()[0] == 0: # Keep in mind what might happen if the process dies somewhere below. # The system must not be rendered permanently broken by that. - self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") - self._db.execute(''' + self._db.execute('CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)') + self._db.execute(""" CREATE TABLE notice ( sequence INTEGER PRIMARY KEY AUTOINCREMENT, event_path TEXT, observer_path TEXT, method_name TEXT) - ''') + """) self._db.commit() def close(self) -> None: @@ -127,7 +128,7 @@ def save_snapshot(self, handle_path: str, snapshot_data: Any) -> None: """ # Use pickle for serialization, so the value remains portable. raw_data = pickle.dumps(snapshot_data) - self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data)) + self._db.execute('REPLACE INTO snapshot VALUES (?, ?)', (handle_path, raw_data)) def load_snapshot(self, handle_path: str) -> Any: """Part of the Storage API, retrieve a snapshot that was previously saved. @@ -139,7 +140,7 @@ def load_snapshot(self, handle_path: str) -> Any: NoSnapshotError: if there is no snapshot for the given handle_path. """ c = self._db.cursor() - c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + c.execute('SELECT data FROM snapshot WHERE handle=?', (handle_path,)) row = c.fetchone() if row: return pickle.loads(row[0]) # noqa: S301 @@ -150,12 +151,12 @@ def drop_snapshot(self, handle_path: str): Dropping a snapshot that doesn't exist is treated as a no-op. """ - self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + self._db.execute('DELETE FROM snapshot WHERE handle=?', (handle_path,)) def list_snapshots(self) -> Generator[str, None, None]: """Return the name of all snapshots that are currently saved.""" c = self._db.cursor() - c.execute("SELECT handle FROM snapshot") + c.execute('SELECT handle FROM snapshot') while True: rows = c.fetchmany() if not rows: @@ -165,17 +166,21 @@ def list_snapshots(self) -> Generator[str, None, None]: def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None: """Part of the Storage API, record an notice (event and observer).""" - self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)', - (event_path, observer_path, method_name)) + self._db.execute( + 'INSERT INTO notice VALUES (NULL, ?, ?, ?)', (event_path, observer_path, method_name) + ) def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None: """Part of the Storage API, remove a notice that was previously recorded.""" - self._db.execute(''' + self._db.execute( + """ DELETE FROM notice WHERE event_path=? AND observer_path=? AND method_name=? - ''', (event_path, observer_path, method_name)) + """, + (event_path, observer_path, method_name), + ) def notices(self, event_path: Optional[str] = None) -> '_NoticeGenerator': """Part of the Storage API, return all notices that begin with event_path. @@ -188,18 +193,21 @@ def notices(self, event_path: Optional[str] = None) -> '_NoticeGenerator': Iterable of (event_path, observer_path, method_name) tuples """ if event_path: - c = self._db.execute(''' + c = self._db.execute( + """ SELECT event_path, observer_path, method_name FROM notice WHERE event_path=? ORDER BY sequence - ''', (event_path,)) + """, + (event_path,), + ) else: - c = self._db.execute(''' + c = self._db.execute(""" SELECT event_path, observer_path, method_name FROM notice ORDER BY sequence - ''') + """) while True: rows = c.fetchmany() if not rows: @@ -215,7 +223,7 @@ class JujuStorage: as the way to store state for the framework and for components. """ - NOTICE_KEY = "#notices#" + NOTICE_KEY = '#notices#' def __init__(self, backend: Optional['_JujuStorageBackend'] = None): self._backend: _JujuStorageBackend = backend or _JujuStorageBackend() @@ -327,6 +335,7 @@ class _SimpleLoader(_BaseLoader): # type: ignore that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just subclass SafeLoader and add tuples back in. """ + # Taken from the example at: # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml @@ -335,7 +344,8 @@ class _SimpleLoader(_BaseLoader): # type: ignore _SimpleLoader.add_constructor( # type: ignore 'tag:yaml.org,2002:python/tuple', - _SimpleLoader.construct_python_tuple) # type: ignore + _SimpleLoader.construct_python_tuple, # type: ignore +) class _SimpleDumper(_BaseDumper): # type: ignore @@ -344,6 +354,7 @@ class _SimpleDumper(_BaseDumper): # type: ignore YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So we want to only support dumping out types that are safe to load. """ + represent_tuple: '_TupleRepresenterType' = yaml.Dumper.represent_tuple @@ -375,11 +386,9 @@ def set(self, key: str, value: Any) -> None: # have the same default style. encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None) content = yaml.dump( - {key: encoded_value}, - default_style='|', - default_flow_style=False, - Dumper=_SimpleDumper) - _run(["state-set", "--file", "-"], input=content, check=True) + {key: encoded_value}, default_style='|', default_flow_style=False, Dumper=_SimpleDumper + ) + _run(['state-set', '--file', '-'], input=content, check=True) def get(self, key: str) -> Any: """Get the bytes value associated with a given key. @@ -390,7 +399,7 @@ def get(self, key: str) -> Any: CalledProcessError: if 'state-get' returns an error code. """ # We don't capture stderr here so it can end up in debug logs. - p = _run(["state-get", key], stdout=subprocess.PIPE, check=True) + p = _run(['state-get', key], stdout=subprocess.PIPE, check=True) if p.stdout == '' or p.stdout == '\n': raise KeyError(key) return yaml.load(p.stdout, Loader=_SimpleLoader) # type: ignore # noqa: S506 @@ -403,7 +412,7 @@ def delete(self, key: str) -> None: Raises: CalledProcessError: if 'state-delete' returns an error code. """ - _run(["state-delete", key], check=True) + _run(['state-delete', key], check=True) class NoSnapshotError(Exception): diff --git a/ops/testing.py b/ops/testing.py index e534aaf2f..8a508ddce 100644 --- a/ops/testing.py +++ b/ops/testing.py @@ -14,7 +14,6 @@ """Infrastructure to build unit tests for charms using the ops library.""" - import dataclasses import datetime import fnmatch @@ -65,31 +64,37 @@ ReadableBuffer = Union[bytes, str, StringIO, BytesIO, BinaryIO] _StringOrPath = Union[str, pathlib.PurePosixPath, pathlib.Path] -_FileKwargs = TypedDict('_FileKwargs', { - 'permissions': Optional[int], - 'last_modified': datetime.datetime, - 'user_id': Optional[int], - 'user': Optional[str], - 'group_id': Optional[int], - 'group': Optional[str], -}) - -_RelationEntities = TypedDict('_RelationEntities', { - 'app': str, - 'units': List[str] -}) +_FileKwargs = TypedDict( + '_FileKwargs', + { + 'permissions': Optional[int], + 'last_modified': datetime.datetime, + 'user_id': Optional[int], + 'user': Optional[str], + 'group_id': Optional[int], + 'group': Optional[str], + }, +) + +_RelationEntities = TypedDict('_RelationEntities', {'app': str, 'units': List[str]}) _StatusName = Literal['unknown', 'blocked', 'active', 'maintenance', 'waiting'] -_RawStatus = TypedDict('_RawStatus', { - 'status': _StatusName, - 'message': str, -}) -_ConfigOption = TypedDict('_ConfigOption', { - 'type': Literal['string', 'int', 'float', 'boolean', 'secret'], - 'description': str, - 'default': Union[str, int, float, bool], -}) -_RawConfig = TypedDict("_RawConfig", {'options': Dict[str, _ConfigOption]}) +_RawStatus = TypedDict( + '_RawStatus', + { + 'status': _StatusName, + 'message': str, + }, +) +_ConfigOption = TypedDict( + '_ConfigOption', + { + 'type': Literal['string', 'int', 'float', 'boolean', 'secret'], + 'description': str, + 'default': Union[str, int, float, bool], + }, +) +_RawConfig = TypedDict('_RawConfig', {'options': Dict[str, _ConfigOption]}) # YAMLStringOrFile is something like metadata.yaml or actions.yaml. You can @@ -114,6 +119,7 @@ class ExecArgs: These arguments will be passed to the :meth:`Harness.handle_exec` handler function. See :meth:`ops.pebble.Client.exec` for documentation of properties. """ + command: List[str] environment: Dict[str, str] working_dir: Optional[str] @@ -134,9 +140,10 @@ class ExecResult: This class is typically used to return the output and exit code from the :meth:`Harness.handle_exec` result or handler function. """ + exit_code: int = 0 - stdout: Union[str, bytes] = b"" - stderr: Union[str, bytes] = b"" + stdout: Union[str, bytes] = b'' + stderr: Union[str, bytes] = b'' ExecHandler = Callable[[ExecArgs], Union[None, ExecResult]] @@ -169,7 +176,7 @@ def __init__(self, message: str, output: ActionOutput): def __str__(self): if self.message: return self.message - return "Event handler called `fail()` with no additional details." + return 'Event handler called `fail()` with no additional details.' @dataclasses.dataclass() @@ -245,17 +252,18 @@ def test_bar(harness): """ def __init__( - self, - charm_cls: Type[CharmType], - *, - meta: Optional[YAMLStringOrFile] = None, - actions: Optional[YAMLStringOrFile] = None, - config: Optional[YAMLStringOrFile] = None): + self, + charm_cls: Type[CharmType], + *, + meta: Optional[YAMLStringOrFile] = None, + actions: Optional[YAMLStringOrFile] = None, + config: Optional[YAMLStringOrFile] = None, + ): self._charm_cls = charm_cls self._charm: Optional[CharmType] = None self._charm_dir = 'no-disk-path' # this may be updated by _create_meta self._meta = self._create_meta(meta, actions) - self._unit_name: str = f"{self._meta.name}/0" + self._unit_name: str = f'{self._meta.name}/0' self._hooks_enabled: bool = True self._relation_id_counter: int = 0 self._action_id_counter: int = 0 @@ -264,7 +272,8 @@ def __init__( self._model = model.Model(self._meta, self._backend) self._storage = storage.SQLiteStorage(':memory:') self._framework = framework.Framework( - self._storage, self._charm_dir, self._meta, self._model) + self._storage, self._charm_dir, self._meta, self._model + ) def _event_context(self, event_name: str): """Configures the Harness to behave as if an event hook were running. @@ -334,8 +343,9 @@ def charm(self) -> CharmType: Until then, attempting to access this property will raise an exception. """ if self._charm is None: - raise RuntimeError('The charm instance is not available yet. ' - 'Call Harness.begin() first.') + raise RuntimeError( + 'The charm instance is not available yet. Call Harness.begin() first.' + ) return self._charm @property @@ -446,7 +456,7 @@ def begin_with_initial_hooks(self) -> None: rel_ids = self._backend._relation_ids_map.get(relname, []) random.shuffle(rel_ids) for rel_id in rel_ids: - app_name = self._backend._relation_app_and_units[rel_id]["app"] + app_name = self._backend._relation_app_and_units[rel_id]['app'] self._emit_relation_created(relname, rel_id, app_name) if self._backend._is_leader: charm.on.leader_elected.emit() @@ -464,13 +474,13 @@ def begin_with_initial_hooks(self) -> None: # If the initial hooks do not set a unit status, the Juju controller will switch # the unit status from "Maintenance" to "Unknown". See gh#726 post_setup_sts = self._backend.status_get() - if post_setup_sts.get("status") == "maintenance" and not post_setup_sts.get("message"): + if post_setup_sts.get('status') == 'maintenance' and not post_setup_sts.get('message'): self._backend._unit_status = {'status': 'unknown', 'message': ''} all_ids = list(self._backend._relation_names.items()) random.shuffle(all_ids) for rel_id, rel_name in all_ids: rel_app_and_units = self._backend._relation_app_and_units[rel_id] - app_name = rel_app_and_units["app"] + app_name = rel_app_and_units['app'] # Note: Juju *does* fire relation events for a given relation in the sorted order of # the unit names. It also always fires relation-changed immediately after # relation-joined for the same unit. @@ -479,12 +489,10 @@ def begin_with_initial_hooks(self) -> None: if self._backend._relation_data_raw[rel_id].get(app_name): app = self._model.get_app(app_name) charm.on[rel_name].relation_changed.emit(relation, app, None) - for unit_name in sorted(rel_app_and_units["units"]): + for unit_name in sorted(rel_app_and_units['units']): remote_unit = self._model.get_unit(unit_name) - charm.on[rel_name].relation_joined.emit( - relation, remote_unit.app, remote_unit) - charm.on[rel_name].relation_changed.emit( - relation, remote_unit.app, remote_unit) + charm.on[rel_name].relation_joined.emit(relation, remote_unit.app, remote_unit) + charm.on[rel_name].relation_changed.emit(relation, remote_unit.app, remote_unit) def cleanup(self) -> None: """Called by the test infrastructure to clean up any temporary directories/files/etc. @@ -493,8 +501,11 @@ def cleanup(self) -> None: """ self._backend._cleanup() - def _create_meta(self, charm_metadata_yaml: Optional[YAMLStringOrFile], - action_metadata_yaml: Optional[YAMLStringOrFile]) -> CharmMeta: + def _create_meta( + self, + charm_metadata_yaml: Optional[YAMLStringOrFile], + action_metadata_yaml: Optional[YAMLStringOrFile], + ) -> CharmMeta: """Create a CharmMeta object. Handle the cases where a user doesn't supply explicit metadata snippets. @@ -513,7 +524,7 @@ def _create_meta(self, charm_metadata_yaml: Optional[YAMLStringOrFile], charmcraft_metadata: Optional[Dict[str, Any]] = None if charm_dir: # Check charmcraft.yaml and load it if it exists - charmcraft_meta = charm_dir / "charmcraft.yaml" + charmcraft_meta = charm_dir / 'charmcraft.yaml' if charmcraft_meta.is_file(): self._charm_dir = charm_dir charmcraft_metadata = yaml.safe_load(charmcraft_meta.read_text()) @@ -526,7 +537,7 @@ def _create_meta(self, charm_metadata_yaml: Optional[YAMLStringOrFile], else: # Check charmcraft.yaml for metadata if no metadata is provided if charmcraft_metadata is not None: - meta_keys = ["name", "summary", "description"] + meta_keys = ['name', 'summary', 'description'] if any(key in charmcraft_metadata for key in meta_keys): # Unrelated keys in the charmcraft.yaml file will be ignored. charm_metadata = charmcraft_metadata @@ -540,7 +551,7 @@ def _create_meta(self, charm_metadata_yaml: Optional[YAMLStringOrFile], # Use default metadata if metadata is not found if charm_metadata is None: - charm_metadata = {"name": "test-charm"} + charm_metadata = {'name': 'test-charm'} action_metadata: Optional[Dict[str, Any]] = None # Load actions from parameters if provided @@ -550,8 +561,8 @@ def _create_meta(self, charm_metadata_yaml: Optional[YAMLStringOrFile], action_metadata = yaml.safe_load(action_metadata_yaml) else: # Check charmcraft.yaml for actions if no actions are provided - if charmcraft_metadata is not None and "actions" in charmcraft_metadata: - action_metadata = charmcraft_metadata["actions"] + if charmcraft_metadata is not None and 'actions' in charmcraft_metadata: + action_metadata = charmcraft_metadata['actions'] # Still no actions, check actions.yaml if charm_dir and action_metadata is None: @@ -584,11 +595,12 @@ def _get_config(self, charm_config_yaml: Optional['YAMLStringOrFile']): else: if charm_dir: # Check charmcraft.yaml for config if no config is provided - charmcraft_meta = charm_dir / "charmcraft.yaml" + charmcraft_meta = charm_dir / 'charmcraft.yaml' if charmcraft_meta.is_file(): charmcraft_metadata: Dict[str, Any] = yaml.safe_load( - charmcraft_meta.read_text()) - config = charmcraft_metadata.get("config") + charmcraft_meta.read_text() + ) + config = charmcraft_metadata.get('config') # Still no config, check config.yaml if config is None: @@ -605,8 +617,9 @@ def _get_config(self, charm_config_yaml: Optional['YAMLStringOrFile']): raise TypeError(config) return cast('_RawConfig', config) - def add_oci_resource(self, resource_name: str, - contents: Optional[Mapping[str, str]] = None) -> None: + def add_oci_resource( + self, resource_name: str, contents: Optional[Mapping[str, str]] = None + ) -> None: """Add OCI resources to the backend. This will register an OCI resource and create a temporary file for processing metadata @@ -618,13 +631,14 @@ def add_oci_resource(self, resource_name: str, contents: Optional custom dict to write for the named resource. """ if not contents: - contents = {'registrypath': 'registrypath', - 'username': 'username', - 'password': 'password', - } + contents = { + 'registrypath': 'registrypath', + 'username': 'username', + 'password': 'password', + } if resource_name not in self._meta.resources: raise RuntimeError(f'Resource {resource_name} is not a defined resources') - if self._meta.resources[resource_name].type != "oci-image": + if self._meta.resources[resource_name].type != 'oci-image': raise RuntimeError(f'Resource {resource_name} is not an OCI Image') as_yaml = yaml.safe_dump(contents) @@ -644,9 +658,10 @@ def add_resource(self, resource_name: str, content: AnyStr) -> None: if resource_name not in self._meta.resources: raise RuntimeError(f'Resource {resource_name} is not a defined resource') record = self._meta.resources[resource_name] - if record.type != "file": + if record.type != 'file': raise RuntimeError( - f'Resource {resource_name} is not a file, but actually {record.type}') + f'Resource {resource_name} is not a file, but actually {record.type}' + ) filename = record.filename if filename is None: filename = resource_name @@ -656,7 +671,7 @@ def add_resource(self, resource_name: str, content: AnyStr) -> None: def populate_oci_resources(self) -> None: """Populate all OCI resources.""" for name, data in self._meta.resources.items(): - if data.type == "oci-image": + if data.type == 'oci-image': self.add_oci_resource(name) def disable_hooks(self) -> None: @@ -703,8 +718,7 @@ def _next_relation_id(self): self._relation_id_counter += 1 return rel_id - def add_storage(self, storage_name: str, count: int = 1, - *, attach: bool = False) -> List[str]: + def add_storage(self, storage_name: str, count: int = 1, *, attach: bool = False) -> List[str]: """Create a new storage device and attach it to this unit. To have repeatable tests, each device will be initialized with @@ -725,7 +739,8 @@ def add_storage(self, storage_name: str, count: int = 1, """ if storage_name not in self._meta.storages: raise RuntimeError( - f"the key '{storage_name}' is not specified as a storage key in metadata") + f"the key '{storage_name}' is not specified as a storage key in metadata" + ) storage_indices = self._backend.storage_add(storage_name, count) @@ -755,11 +770,11 @@ def detach_storage(self, storage_id: str) -> None: raise RuntimeError('cannot detach storage before Harness is initialised') storage_name, storage_index = storage_id.split('/', 1) storage_index = int(storage_index) - storage_attached = self._backend._storage_is_attached( - storage_name, storage_index) + storage_attached = self._backend._storage_is_attached(storage_name, storage_index) if storage_attached and self._hooks_enabled: self.charm.on[storage_name].storage_detaching.emit( - model.Storage(storage_name, storage_index, self._backend)) + model.Storage(storage_name, storage_index, self._backend) + ) self._backend._storage_detach(storage_id) def attach_storage(self, storage_id: str) -> None: @@ -790,7 +805,8 @@ def attach_storage(self, storage_id: str) -> None: storage_index = int(storage_index) self.charm.on[storage_name].storage_attached.emit( - model.Storage(storage_name, storage_index, self._backend)) + model.Storage(storage_name, storage_index, self._backend) + ) def remove_storage(self, storage_id: str) -> None: """Detach a storage device. @@ -811,17 +827,23 @@ def remove_storage(self, storage_id: str) -> None: storage_index = int(storage_index) if storage_name not in self._meta.storages: raise RuntimeError( - f"the key '{storage_name}' is not specified as a storage key in metadata") - is_attached = self._backend._storage_is_attached( - storage_name, storage_index) + f"the key '{storage_name}' is not specified as a storage key in metadata" + ) + is_attached = self._backend._storage_is_attached(storage_name, storage_index) if self._charm is not None and self._hooks_enabled and is_attached: self.charm.on[storage_name].storage_detaching.emit( - model.Storage(storage_name, storage_index, self._backend)) + model.Storage(storage_name, storage_index, self._backend) + ) self._backend._storage_remove(storage_id) - def add_relation(self, relation_name: str, remote_app: str, *, - app_data: Optional[Mapping[str, str]] = None, - unit_data: Optional[Mapping[str, str]] = None) -> int: + def add_relation( + self, + relation_name: str, + remote_app: str, + *, + app_data: Optional[Mapping[str, str]] = None, + unit_data: Optional[Mapping[str, str]] = None, + ) -> int: """Declare that there is a new relation between this application and `remote_app`. This function creates a relation with an application and triggers a @@ -868,24 +890,26 @@ def add_relation(self, relation_name: str, remote_app: str, *, Return: The ID of the relation created. """ - if not (relation_name in self._meta.provides - or relation_name in self._meta.requires - or relation_name in self._meta.peers): + if not ( + relation_name in self._meta.provides + or relation_name in self._meta.requires + or relation_name in self._meta.peers + ): raise RelationNotFoundError(f'relation {relation_name!r} not declared in metadata') relation_id = self._next_relation_id() - self._backend._relation_ids_map.setdefault( - relation_name, []).append(relation_id) + self._backend._relation_ids_map.setdefault(relation_name, []).append(relation_id) self._backend._relation_names[relation_id] = relation_name self._backend._relation_list_map[relation_id] = [] self._backend._relation_data_raw[relation_id] = { remote_app: {}, self._backend.unit_name: {}, - self._backend.app_name: {}} + self._backend.app_name: {}, + } self._backend._relation_app_and_units[relation_id] = { - "app": remote_app, - "units": [], + 'app': remote_app, + 'units': [], } # Reload the relation_ids list if self._model is not None: @@ -904,10 +928,10 @@ def add_relation(self, relation_name: str, remote_app: str, *, if not self._backend._networks.get((None, None)): # If we don't already have a network binding for this relation id, create one. if not self._backend._networks.get((relation_name, relation_id)): - self.add_network("10.0.0.10", endpoint=relation_name, relation_id=relation_id) + self.add_network('10.0.0.10', endpoint=relation_name, relation_id=relation_id) # If we don't already have a default network binding for this endpoint, create one. if not self._backend._networks.get((relation_name, None)): - self.add_network("192.0.2.0", endpoint=relation_name) + self.add_network('192.0.2.0', endpoint=relation_name) return relation_id @@ -954,21 +978,21 @@ def remove_relation(self, relation_id: int) -> None: # Remove secret grants that give access via this relation for secret in self._backend._secrets: - secret.grants = {rid: names for rid, names in secret.grants.items() - if rid != relation_id} + secret.grants = { + rid: names for rid, names in secret.grants.items() if rid != relation_id + } - def _emit_relation_created(self, relation_name: str, relation_id: int, - remote_app: str) -> None: + def _emit_relation_created( + self, relation_name: str, relation_id: int, remote_app: str + ) -> None: """Trigger relation-created for a given relation with a given remote application.""" if self._charm is None or not self._hooks_enabled: return relation = self._model.get_relation(relation_name, relation_id) app = self._model.get_app(remote_app) - self._charm.on[relation_name].relation_created.emit( - relation, app) + self._charm.on[relation_name].relation_created.emit(relation, app) - def _emit_relation_broken(self, relation_name: str, relation_id: int, - remote_app: str) -> None: + def _emit_relation_broken(self, relation_name: str, relation_id: int, remote_app: str) -> None: """Trigger relation-broken for a given relation with a given remote application.""" if self._charm is None or not self._hooks_enabled: return @@ -1004,8 +1028,10 @@ def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: relation = self._model.get_relation(relation_name, relation_id) if not relation: - raise RuntimeError('Relation id {} is mapped to relation name {},' - 'but no relation matching that name was found.') + raise RuntimeError( + 'Relation id {} is mapped to relation name {},' + 'but no relation matching that name was found.' + ) self._backend._relation_data_raw[relation_id][remote_unit_name] = {} app = relation.app @@ -1017,7 +1043,7 @@ def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: f'not {remote_unit_name!r}.' ) app_and_units = self._backend._relation_app_and_units - app_and_units[relation_id]["units"].append(remote_unit_name) + app_and_units[relation_id]['units'].append(remote_unit_name) # Make sure that the Model reloads the relation_list for this relation_id, as well as # reloading the relation data for this unit. remote_unit = self._model.get_unit(remote_unit_name) @@ -1027,8 +1053,7 @@ def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: self._model.relations._invalidate(relation_name) if self._charm is None or not self._hooks_enabled: return - self._charm.on[relation_name].relation_joined.emit( - relation, remote_unit.app, remote_unit) + self._charm.on[relation_name].relation_joined.emit(relation, remote_unit.app, remote_unit) def remove_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: """Remove a unit from a relation. @@ -1062,8 +1087,10 @@ def remove_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: # This should not really happen, since there being a relation name mapped # to this ID in _relation_names should guarantee that you created the relation # following the proper path, but still... - raise RuntimeError('Relation id {} is mapped to relation name {},' - 'but no relation matching that name was found.') + raise RuntimeError( + 'Relation id {} is mapped to relation name {},' + 'but no relation matching that name was found.' + ) unit_cache = relation.data.get(remote_unit, None) @@ -1073,7 +1100,7 @@ def remove_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: self._emit_relation_departed(relation_id, remote_unit_name) # remove the relation data for the departed unit now that the event has happened self._backend._relation_list_map[relation_id].remove(remote_unit_name) - self._backend._relation_app_and_units[relation_id]["units"].remove(remote_unit_name) + self._backend._relation_app_and_units[relation_id]['units'].remove(remote_unit_name) self._backend._relation_data_raw[relation_id].pop(remote_unit_name) self.model._relations._invalidate(relation_name=relation.name) @@ -1092,8 +1119,7 @@ def _emit_relation_departed(self, relation_id: int, unit_name: str): unit = self.model.get_unit(unit_name) else: raise ValueError('Invalid Unit Name') - self._charm.on[rel_name].relation_departed.emit( - relation, app, unit, unit_name) + self._charm.on[rel_name].relation_departed.emit(relation, app, unit, unit_name) def get_relation_data(self, relation_id: int, app_or_unit: AppUnitOrName) -> Mapping[str, str]: """Get the relation data bucket for a single app or unit in a given relation. @@ -1127,9 +1153,7 @@ def get_pod_spec(self) -> Tuple[Mapping[Any, Any], Mapping[Any, Any]]: """ return self._backend._pod_spec - def get_container_pebble_plan( - self, container_name: str - ) -> pebble.Plan: + def get_container_pebble_plan(self, container_name: str) -> pebble.Plan: """Return the current plan that Pebble is executing for the given container. Args: @@ -1163,10 +1187,15 @@ def container_pebble_ready(self, container_name: str): self.set_can_connect(container, True) self.charm.on[container_name].pebble_ready.emit(container) - def pebble_notify(self, container_name: str, key: str, *, - data: Optional[Dict[str, str]] = None, - repeat_after: Optional[datetime.timedelta] = None, - type: pebble.NoticeType = pebble.NoticeType.CUSTOM) -> str: + def pebble_notify( + self, + container_name: str, + key: str, + *, + data: Optional[Dict[str, str]] = None, + repeat_after: Optional[datetime.timedelta] = None, + type: pebble.NoticeType = pebble.NoticeType.CUSTOM, + ) -> str: """Record a Pebble notice with the specified key and data. If :meth:`begin` has been called and the notice is new or was repeated, @@ -1234,10 +1263,10 @@ def set_model_uuid(self, uuid: str) -> None: self._backend.model_uuid = uuid def update_relation_data( - self, - relation_id: int, - app_or_unit: str, - key_values: Mapping[str, str], + self, + relation_id: int, + app_or_unit: str, + key_values: Mapping[str, str], ) -> None: """Update the relation data for a given unit or application in a given relation. @@ -1261,8 +1290,10 @@ def update_relation_data( entity = self._model.get_app(app_or_unit) if not relation: - raise RuntimeError('Relation id {} is mapped to relation name {},' - 'but no relation matching that name was found.') + raise RuntimeError( + 'Relation id {} is mapped to relation name {},' + 'but no relation matching that name was found.' + ) rel_data = relation.data.get(entity, None) if rel_data is not None: @@ -1324,9 +1355,9 @@ def _emit_relation_changed(self, relation_id: int, app_or_unit: str): self._charm.on[rel_name].relation_changed.emit(*args) def _update_config( - self, - key_values: Optional[Mapping[str, Union[str, int, float, bool]]] = None, - unset: Iterable[str] = (), + self, + key_values: Optional[Mapping[str, Union[str, int, float, bool]]] = None, + unset: Iterable[str] = (), ) -> None: """Update the config as seen by the charm. @@ -1361,9 +1392,9 @@ def _update_config( config.pop(key, None) def update_config( - self, - key_values: Optional[Mapping[str, Union[str, int, float, bool]]] = None, - unset: Iterable[str] = (), + self, + key_values: Optional[Mapping[str, Union[str, int, float, bool]]] = None, + unset: Iterable[str] = (), ) -> None: """Update the config as seen by the charm. @@ -1420,7 +1451,7 @@ def set_planned_units(self, num_units: int) -> None: event. """ if num_units < 0: - raise TypeError("num_units must be 0 or a positive integer.") + raise TypeError('num_units must be 0 or a positive integer.') self._backend._planned_units = num_units def reset_planned_units(self) -> None: @@ -1432,13 +1463,17 @@ def reset_planned_units(self) -> None: """ self._backend._planned_units = None - def add_network(self, address: str, *, - endpoint: Optional[str] = None, - relation_id: Optional[int] = None, - cidr: Optional[str] = None, - interface: str = 'eth0', - ingress_addresses: Optional[Iterable[str]] = None, - egress_subnets: Optional[Iterable[str]] = None): + def add_network( + self, + address: str, + *, + endpoint: Optional[str] = None, + relation_id: Optional[int] = None, + cidr: Optional[str] = None, + interface: str = 'eth0', + ingress_addresses: Optional[Iterable[str]] = None, + egress_subnets: Optional[Iterable[str]] = None, + ): """Add simulated network data for the given relation endpoint (binding). Calling this multiple times with the same (binding, relation_id) @@ -1485,11 +1520,13 @@ def add_network(self, address: str, *, relation_name = self._backend._relation_names.get(relation_id) if relation_name is None: raise model.ModelError( - f'relation_id {relation_id} has not been added; use add_relation') + f'relation_id {relation_id} has not been added; use add_relation' + ) if endpoint != relation_name: raise model.ModelError( - f"endpoint {endpoint!r} does not correspond to relation_id " - + f"{relation_id} ({relation_name!r})") + f'endpoint {endpoint!r} does not correspond to relation_id ' + f'{relation_id} ({relation_name!r})' + ) parsed_address = ipaddress.ip_address(address) # raises ValueError if not an IP if cidr is None: @@ -1503,12 +1540,14 @@ def add_network(self, address: str, *, egress_subnets = [cidr] data = { - 'bind-addresses': [{ - 'interface-name': interface, - 'addresses': [ - {'cidr': cidr, 'value': address}, - ], - }], + 'bind-addresses': [ + { + 'interface-name': interface, + 'addresses': [ + {'cidr': cidr, 'value': address}, + ], + } + ], 'egress-subnets': list(egress_subnets), 'ingress-addresses': list(ingress_addresses), } @@ -1618,8 +1657,10 @@ def set_secret_content(self, secret_id: str, content: Dict[str, str]): model.Secret._validate_content(content) secret = self._ensure_secret(secret_id) if secret.owner_name in [self.model.app.name, self.model.unit.name]: - raise RuntimeError(f'Secret {secret_id!r} owned by the charm under test, ' - f"can't call set_secret_content") + raise RuntimeError( + f'Secret {secret_id!r} owned by the charm under test, ' + f"can't call set_secret_content" + ) new_revision = _SecretRevision( revision=secret.revisions[-1].revision + 1, content=content, @@ -1653,8 +1694,9 @@ def grant_secret(self, secret_id: str, observer: AppUnitOrName): # Model secrets: if secret.owner_name in [self.model.app.name, self.model.unit.name]: - raise RuntimeError(f'Secret {secret_id!r} owned by the charm under test, "' - f"can't call grant_secret") + raise RuntimeError( + f"Secret {secret_id!r} owned by the charm under test, can't call grant_secret" + ) relation_id = self._secret_relation_id_to(secret) if relation_id not in secret.grants: secret.grants[relation_id] = set() @@ -1683,8 +1725,10 @@ def revoke_secret(self, secret_id: str, observer: AppUnitOrName): # Model secrets: if secret.owner_name in [self.model.app.name, self.model.unit.name]: - raise RuntimeError(f'Secret {secret_id!r} owned by the charm under test, "' - f"can't call revoke_secret") + raise RuntimeError( + f'Secret {secret_id!r} owned by the charm under test, "' + f"can't call revoke_secret" + ) relation_id = self._secret_relation_id_to(secret) if relation_id not in secret.grants: @@ -1696,8 +1740,10 @@ def _secret_relation_id_to(self, secret: '_Secret') -> int: owner_app = secret.owner_name.split('/')[0] relation_id = self._backend._relation_id_to(owner_app) if relation_id is None: - raise RuntimeError(f'No relation between this charm ({self.model.app.name}) ' - f'and secret owner ({owner_app})') + raise RuntimeError( + f'No relation between this charm ({self.model.app.name}) ' + f'and secret owner ({owner_app})' + ) return relation_id def get_secret_grants(self, secret_id: str, relation_id: int) -> Set[str]: @@ -1733,13 +1779,14 @@ def trigger_secret_rotation(self, secret_id: str, *, label: Optional[str] = None """ secret = self._ensure_secret(secret_id) if secret.owner_name == self.model.uuid: - raise RuntimeError("Cannot trigger the secret-rotate event for a user secret.") + raise RuntimeError('Cannot trigger the secret-rotate event for a user secret.') if label is None: label = secret.label self.charm.on.secret_rotate.emit(secret_id, label) - def trigger_secret_removal(self, secret_id: str, revision: int, *, - label: Optional[str] = None): + def trigger_secret_removal( + self, secret_id: str, revision: int, *, label: Optional[str] = None + ): """Trigger a secret-remove event for the given secret and revision. This event is fired by Juju for a specific revision when all the @@ -1758,8 +1805,9 @@ def trigger_secret_removal(self, secret_id: str, revision: int, *, label = secret.label self.charm.on.secret_remove.emit(secret_id, label, revision) - def trigger_secret_expiration(self, secret_id: str, revision: int, *, - label: Optional[str] = None): + def trigger_secret_expiration( + self, secret_id: str, revision: int, *, label: Optional[str] = None + ): """Trigger a secret-expired event for the given secret. This event is fired by Juju when a secret's expiration time elapses, @@ -1775,7 +1823,7 @@ def trigger_secret_expiration(self, secret_id: str, revision: int, *, """ secret = self._ensure_secret(secret_id) if secret.owner_name == self.model.uuid: - raise RuntimeError("Cannot trigger the secret-expired event for a user secret.") + raise RuntimeError('Cannot trigger the secret-expired event for a user secret.') if label is None: label = secret.label self.charm.on.secret_expired.emit(secret_id, label, revision) @@ -1851,12 +1899,14 @@ def evaluate_status(self) -> None: self.charm.unit._collected_statuses = [] charm._evaluate_status(self.charm) - def handle_exec(self, - container: Union[str, Container], - command_prefix: Sequence[str], - *, - handler: Optional[ExecHandler] = None, - result: Optional[Union[int, str, bytes, ExecResult]] = None): + def handle_exec( + self, + container: Union[str, Container], + command_prefix: Sequence[str], + *, + handler: Optional[ExecHandler] = None, + result: Optional[Union[int, str, bytes, ExecResult]] = None, + ): r"""Register a handler to simulate the Pebble command execution. This allows a test harness to simulate the behavior of running commands in a container. @@ -1926,7 +1976,7 @@ def handle_timeout(args: testing.ExecArgs) -> int: harness.handle_exec('database', ['foo'], handler=handle_timeout) """ if (handler is None and result is None) or (handler is not None and result is not None): - raise TypeError("Either handler or result must be provided, but not both.") + raise TypeError('Either handler or result must be provided, but not both.') container_name = container if isinstance(container, str) else container.name if result is not None: if isinstance(result, int) and not isinstance(result, bool): @@ -1935,11 +1985,12 @@ def handle_timeout(args: testing.ExecArgs) -> int: result = ExecResult(stdout=result) elif not isinstance(result, ExecResult): raise TypeError( - f"result must be int, str, bytes, or ExecResult, " - f"not {result.__class__.__name__}") + f'result must be int, str, bytes, or ExecResult, ' + f'not {result.__class__.__name__}' + ) self._backend._pebble_clients[container_name]._handle_exec( command_prefix=command_prefix, - handler=(lambda _: result) if handler is None else handler # type: ignore + handler=(lambda _: result) if handler is None else handler, # type: ignore ) @property @@ -1947,8 +1998,9 @@ def reboot_count(self) -> int: """Number of times the charm has called :meth:`ops.Unit.reboot`.""" return self._backend._reboot_count - def run_action(self, action_name: str, - params: Optional[Dict[str, Any]] = None) -> ActionOutput: + def run_action( + self, action_name: str, params: Optional[Dict[str, Any]] = None + ) -> ActionOutput: """Simulates running a charm action, as with ``juju run``. Use this only after calling :meth:`begin`. @@ -1981,21 +2033,22 @@ def run_action(self, action_name: str, try: action_meta = self.charm.meta.actions[action_name] except KeyError: - raise RuntimeError(f"Charm does not have a {action_name!r} action.") from None + raise RuntimeError(f'Charm does not have a {action_name!r} action.') from None if params is None: params = {} for key in action_meta.required: # Juju requires that the key is in the passed parameters, even if there is a default # value in actions.yaml. if key not in params: - raise RuntimeError(f"{key!r} parameter is required, but missing.") + raise RuntimeError(f'{key!r} parameter is required, but missing.') if not action_meta.additional_properties: for key in params: if key not in action_meta.parameters: # Match Juju's error message. raise model.ModelError( f'additional property "{key}" is not allowed, ' - f'given {{"{key}":{params[key]!r}}}') + f'given {{"{key}":{params[key]!r}}}' + ) action_under_test = _RunningAction(action_name, ActionOutput([], {}), params) handler = getattr(self.charm.on, f"{action_name.replace('-', '_')}_action") self._backend._running_action = action_under_test @@ -2004,8 +2057,8 @@ def run_action(self, action_name: str, self._backend._running_action = None if action_under_test.failure_message is not None: raise ActionFailed( - message=action_under_test.failure_message, - output=action_under_test.output) + message=action_under_test.failure_message, output=action_under_test.output + ) return action_under_test.output def set_cloud_spec(self, spec: 'model.CloudSpec'): @@ -2076,6 +2129,7 @@ def wrapped(self: '_TestingModelBackend', *args: Any, **kwargs: Any): full_args = (*full_args, kwargs) self._calls.append(full_args) return orig_method(self, *args, **kwargs) + return wrapped setattr(cls, meth_name, decorator(orig_method)) @@ -2092,6 +2146,7 @@ class TargetClass: And for any public method that exists on both classes, it will copy the __doc__ for that method. """ + def decorator(target_cls: Any): for meth_name in target_cls.__dict__: if meth_name.startswith('_'): @@ -2100,12 +2155,14 @@ def decorator(target_cls: Any): if source_method is not None and source_method.__doc__: target_cls.__dict__[meth_name].__doc__ = source_method.__doc__ return target_cls + return decorator @_record_calls class _TestingConfig(Dict[str, Union[str, int, float, bool]]): """Represents the Juju Config.""" + _supported_types = { 'string': str, 'boolean': bool, @@ -2132,7 +2189,7 @@ def _load_defaults(charm_config: '_RawConfig') -> Dict[str, Union[str, int, floa """ if not charm_config: return {} - cfg: Dict[str, '_ConfigOption'] = charm_config.get('options', {}) + cfg: Dict[str, _ConfigOption] = charm_config.get('options', {}) return {key: value.get('default', None) for key, value in cfg.items()} def _config_set(self, key: str, value: Union[str, int, float, bool]): @@ -2141,24 +2198,31 @@ def _config_set(self, key: str, value: Union[str, int, float, bool]): # has the expected type. option = self._spec.get('options', {}).get(key) if not option: - raise RuntimeError(f'Unknown config option {key}; ' - 'not declared in `config.yaml`.' - 'Check https://juju.is/docs/sdk/config for the ' - 'spec.') + raise RuntimeError( + f'Unknown config option {key}; ' + 'not declared in `config.yaml`.' + 'Check https://juju.is/docs/sdk/config for the ' + 'spec.' + ) declared_type = option.get('type') if not declared_type: - raise RuntimeError(f'Incorrectly formatted `options.yaml`, option {key} ' - 'is expected to declare a `type`.') + raise RuntimeError( + f'Incorrectly formatted `options.yaml`, option {key} ' + 'is expected to declare a `type`.' + ) if declared_type not in self._supported_types: raise RuntimeError( 'Incorrectly formatted `options.yaml`: `type` needs to be one ' - 'of [{}], not {}.'.format(', '.join(self._supported_types), declared_type)) + 'of [{}], not {}.'.format(', '.join(self._supported_types), declared_type) + ) if type(value) is not self._supported_types[declared_type]: - raise RuntimeError(f'Config option {key} is supposed to be of type ' - f'{declared_type}, not `{type(value).__name__}`.') + raise RuntimeError( + f'Config option {key} is supposed to be of type ' + f'{declared_type}, not `{type(value).__name__}`.' + ) # call 'normal' setattr. dict.__setitem__(self, key, value) # type: ignore @@ -2171,11 +2235,11 @@ def __setitem__(self, key: Any, value: Any): class _TestingRelationDataContents(Dict[str, str]): def __setitem__(self, key: str, value: str): if not isinstance(key, str): - raise model.RelationDataError( - f'relation data keys must be strings, not {type(key)}') + raise model.RelationDataError(f'relation data keys must be strings, not {type(key)}') if not isinstance(value, str): raise model.RelationDataError( - f'relation data values must be strings, not {type(value)}') + f'relation data values must be strings, not {type(value)}' + ) super().__setitem__(key, value) def copy(self): @@ -2219,8 +2283,8 @@ def __init__(self, unit_name: str, meta: charm.CharmMeta, config: '_RawConfig'): self.model_uuid = str(uuid.uuid4()) self._harness_tmp_dir = tempfile.TemporaryDirectory(prefix='ops-harness-') - self._harness_storage_path = pathlib.Path(self._harness_tmp_dir.name) / "storages" - self._harness_container_path = pathlib.Path(self._harness_tmp_dir.name) / "containers" + self._harness_storage_path = pathlib.Path(self._harness_tmp_dir.name) / 'storages' + self._harness_container_path = pathlib.Path(self._harness_tmp_dir.name) / 'containers' self._harness_storage_path.mkdir() self._harness_container_path.mkdir() # this is used by the _record_calls decorator @@ -2252,7 +2316,8 @@ def __init__(self, unit_name: str, meta: charm.CharmMeta, config: '_RawConfig'): # : device id that is key for given storage_name # Initialize the _storage_list with values present on metadata.yaml self._storage_list: Dict[str, Dict[int, Dict[str, Any]]] = { - k: {} for k in self._meta.storages} + k: {} for k in self._meta.storages + } self._storage_attached: Dict[str, Set[int]] = {k: set() for k in self._meta.storages} self._storage_index_counter = 0 # {container_name : _TestingPebbleClient} @@ -2321,8 +2386,9 @@ def relation_get(self, relation_id: int, member_name: str, is_app: bool): raise model.RelationNotFoundError() return self._relation_data_raw[relation_id][member_name] - def update_relation_data(self, relation_id: int, _entity: Union[model.Unit, model.Application], - key: str, value: str): + def update_relation_data( + self, relation_id: int, _entity: Union[model.Unit, model.Application], key: str, value: str + ): # this is where the 'real' backend would call relation-set. raw_data = self._relation_data_raw[relation_id][_entity.name] if value == '': @@ -2335,9 +2401,11 @@ def relation_set(self, relation_id: int, key: str, value: str, is_app: bool): raise TypeError('is_app parameter to relation_set must be a boolean') if 'relation_broken' in self._hook_is_running and not self.relation_remote_app_name( - relation_id): + relation_id + ): raise RuntimeError( - 'remote-side relation data cannot be accessed during a relation-broken event') + 'remote-side relation data cannot be accessed during a relation-broken event' + ) if relation_id not in self._relation_data_raw: raise RelationNotFoundError(relation_id) @@ -2364,10 +2432,11 @@ def application_version_set(self, version: str): def resource_get(self, resource_name: str): if resource_name not in self._resources_map: raise model.ModelError( - "ERROR could not download resource: HTTP request failed: " - "Get https://.../units/unit-{}/resources/{}: resource#{}/{} not found".format( + 'ERROR could not download resource: HTTP request failed: ' + 'Get https://.../units/unit-{}/resources/{}: resource#{}/{} not found'.format( self.unit_name.replace('/', '-'), resource_name, self.app_name, resource_name - )) + ) + ) filename, contents = self._resources_map[resource_name] resource_dir = self._get_resource_dir() resource_filename = resource_dir / resource_name / filename @@ -2389,8 +2458,10 @@ def status_get(self, *, is_app: bool = False): def status_set(self, status: '_StatusName', message: str = '', *, is_app: bool = False): if status in [model.ErrorStatus.name, model.UnknownStatus.name]: - raise model.ModelError(f'ERROR invalid status "{status}", expected one of' - ' [maintenance blocked waiting active]') + raise model.ModelError( + f'ERROR invalid status "{status}", expected one of' + ' [maintenance blocked waiting active]' + ) if is_app: self._app_status = {'status': status, 'message': message} else: @@ -2403,11 +2474,14 @@ def storage_list(self, name: str, include_detached: bool = False): name: name (i.e. from metadata.yaml). include_detached: True to include unattached storage mounts as well. """ - return [index for index in self._storage_list[name] - if include_detached or self._storage_is_attached(name, index)] + return [ + index + for index in self._storage_list[name] + if include_detached or self._storage_is_attached(name, index) + ] def storage_get(self, storage_name_id: str, attribute: str) -> Any: - name, index = storage_name_id.split("/", 1) + name, index = storage_name_id.split('/', 1) index = int(index) try: if index not in self._storage_attached[name]: @@ -2416,7 +2490,8 @@ def storage_get(self, storage_name_id: str, attribute: str) -> Any: return self._storage_list[name][index][attribute] except KeyError: raise model.ModelError( - f'ERROR invalid value "{name}/{index}" for option -s: storage not found') from None + f'ERROR invalid value "{name}/{index}" for option -s: storage not found' + ) from None def storage_add(self, name: str, count: int = 1) -> List[int]: if '/' in name: @@ -2465,7 +2540,7 @@ def _storage_attach(self, storage_id: str): root = client._root mounting_dir = root / mount.location[1:] mounting_dir.parent.mkdir(parents=True, exist_ok=True) - target_dir = pathlib.Path(store["location"]) + target_dir = pathlib.Path(store['location']) target_dir.mkdir(parents=True, exist_ok=True) try: mounting_dir.symlink_to(target_dir, target_is_directory=True) @@ -2473,9 +2548,8 @@ def _storage_attach(self, storage_id: str): # If the symlink is already the one we want, then we # don't need to do anything here. # NOTE: In Python 3.9, this can use `mounting_dir.readlink()` - if ( - not mounting_dir.is_symlink() - or os.readlink(mounting_dir) != str(target_dir) + if not mounting_dir.is_symlink() or os.readlink(mounting_dir) != str( + target_dir ): raise @@ -2502,14 +2576,14 @@ def action_get(self) -> Dict[str, Any]: assert self._running_action is not None action_meta = self._meta.actions[self._running_action.name] for name, meta in action_meta.parameters.items(): - if "default" in meta: - params[name] = meta["default"] + if 'default' in meta: + params[name] = meta['default'] params.update(self._running_action.parameters) return params def action_set(self, results: Dict[str, Any]): assert self._running_action is not None - for key in ("stdout", "stderr", "stdout-encoding", "stderr-encoding"): + for key in ('stdout', 'stderr', 'stdout-encoding', 'stderr-encoding'): if key in results: # Match Juju's error message. raise model.ModelError(f'ERROR cannot set reserved action key "{key}"') @@ -2612,21 +2686,26 @@ def _ensure_secret_id_or_label(self, id: Optional[str], label: Optional[str]): secret = next((s for s in self._secrets if s.label == label), None) if secret is None: raise model.SecretNotFoundError( - f'Secret not found by ID ({id!r}) or label ({label!r})') + f'Secret not found by ID ({id!r}) or label ({label!r})' + ) return secret - def secret_get(self, *, - id: Optional[str] = None, - label: Optional[str] = None, - refresh: bool = False, - peek: bool = False) -> Dict[str, str]: + def secret_get( + self, + *, + id: Optional[str] = None, + label: Optional[str] = None, + refresh: bool = False, + peek: bool = False, + ) -> Dict[str, str]: secret = self._ensure_secret_id_or_label(id, label) if secret.owner_name == self.model_uuid: # This is a user secret - charms only ever have view access. if self.app_name not in secret.user_secrets_grants: raise model.SecretNotFoundError( - f'Secret {id!r} not granted access to {self.app_name!r}') + f'Secret {id!r} not granted access to {self.app_name!r}' + ) elif secret.owner_name not in [self.app_name, self.unit_name]: # This is a model secret - the model might have admin or view access. # Check that caller has permission to get this secret @@ -2636,11 +2715,13 @@ def secret_get(self, *, relation_id = self._relation_id_to(owner_app) if relation_id is None: raise model.SecretNotFoundError( - f'Secret {id!r} does not have relation to {owner_app!r}') + f'Secret {id!r} does not have relation to {owner_app!r}' + ) grants = secret.grants.get(relation_id, set()) if self.app_name not in grants and self.unit_name not in grants: raise model.SecretNotFoundError( - f'Secret {id!r} not granted access to {self.app_name!r} or {self.unit_name!r}') + f'Secret {id!r} not granted access to {self.app_name!r} or {self.unit_name!r}' + ) if peek or refresh: revision = secret.revisions[-1] @@ -2675,11 +2756,12 @@ def _ensure_secret_owner(self, secret: _Secret): if unit_secret or (app_secret and self.is_leader()): return raise model.SecretNotFoundError( - f'You must own secret {secret.id!r} to perform this operation') + f'You must own secret {secret.id!r} to perform this operation' + ) - def secret_info_get(self, *, - id: Optional[str] = None, - label: Optional[str] = None) -> model.SecretInfo: + def secret_info_get( + self, *, id: Optional[str] = None, label: Optional[str] = None + ) -> model.SecretInfo: secret = self._ensure_secret_id_or_label(id, label) self._ensure_secret_owner(secret) @@ -2700,21 +2782,22 @@ def secret_info_get(self, *, rotates=rotates, ) - def secret_set(self, id: str, *, - content: Optional[Dict[str, str]] = None, - label: Optional[str] = None, - description: Optional[str] = None, - expire: Optional[datetime.datetime] = None, - rotate: Optional[model.SecretRotate] = None) -> None: + def secret_set( + self, + id: str, + *, + content: Optional[Dict[str, str]] = None, + label: Optional[str] = None, + description: Optional[str] = None, + expire: Optional[datetime.datetime] = None, + rotate: Optional[model.SecretRotate] = None, + ) -> None: secret = self._ensure_secret(id) self._ensure_secret_owner(secret) if content is None: content = secret.revisions[-1].content - revision = _SecretRevision( - revision=secret.revisions[-1].revision + 1, - content=content - ) + revision = _SecretRevision(revision=secret.revisions[-1].revision + 1, content=content) secret.revisions.append(revision) if label is not None: if label: @@ -2737,26 +2820,33 @@ def secret_set(self, id: str, *, @classmethod def _generate_secret_id(cls) -> str: # Not a proper Juju secrets-style xid, but that's okay - return f"secret:{uuid.uuid4()}" - - def secret_add(self, content: Dict[str, str], *, - label: Optional[str] = None, - description: Optional[str] = None, - expire: Optional[datetime.datetime] = None, - rotate: Optional[model.SecretRotate] = None, - owner: Optional[str] = None) -> str: + return f'secret:{uuid.uuid4()}' + + def secret_add( + self, + content: Dict[str, str], + *, + label: Optional[str] = None, + description: Optional[str] = None, + expire: Optional[datetime.datetime] = None, + rotate: Optional[model.SecretRotate] = None, + owner: Optional[str] = None, + ) -> str: owner_name = self.unit_name if owner == 'unit' else self.app_name - return self._secret_add(content, owner_name, - label=label, - description=description, - expire=expire, - rotate=rotate) - - def _secret_add(self, content: Dict[str, str], owner_name: str, *, - label: Optional[str] = None, - description: Optional[str] = None, - expire: Optional[datetime.datetime] = None, - rotate: Optional[model.SecretRotate] = None) -> str: + return self._secret_add( + content, owner_name, label=label, description=description, expire=expire, rotate=rotate + ) + + def _secret_add( + self, + content: Dict[str, str], + owner_name: str, + *, + label: Optional[str] = None, + description: Optional[str] = None, + expire: Optional[datetime.datetime] = None, + rotate: Optional[model.SecretRotate] = None, + ) -> str: id = self._generate_secret_id() revision = _SecretRevision( revision=1, @@ -2826,14 +2916,23 @@ def _check_protocol_and_port(self, protocol: str, port: Optional[int]): # should be testing details of error messages). if protocol == 'icmp': if port is not None: - raise model.ModelError(f'ERROR protocol "{protocol}" doesn\'t support any ports; got "{port}"\n') # noqa: E501 + raise model.ModelError( + f'ERROR protocol "{protocol}" doesn\'t support any ports; got "{port}"\n' + ) elif protocol in ['tcp', 'udp']: if port is None: - raise model.ModelError(f'ERROR invalid port "{protocol}": strconv.Atoi: parsing "{protocol}": invalid syntax\n') # noqa: E501 + raise model.ModelError( + f'ERROR invalid port "{protocol}": ' + f'strconv.Atoi: parsing "{protocol}": invalid syntax\n' + ) if not (1 <= port <= 65535): - raise model.ModelError(f'ERROR port range bounds must be between 1 and 65535, got {port}-{port}\n') # noqa: E501 + raise model.ModelError( + f'ERROR port range bounds must be between 1 and 65535, got {port}-{port}\n' + ) else: - raise model.ModelError(f'ERROR invalid protocol "{protocol}", expected "tcp", "udp", or "icmp"\n') # noqa: E501 + raise model.ModelError( + f'ERROR invalid protocol "{protocol}", expected "tcp", "udp", or "icmp"\n' + ) def reboot(self, now: bool = False): self._reboot_count += 1 @@ -2846,25 +2945,28 @@ def reboot(self, now: bool = False): def credential_get(self) -> model.CloudSpec: if not self._cloud_spec: raise model.ModelError( - 'ERROR cloud spec is empty, set it with `Harness.set_cloud_spec()` first') + 'ERROR cloud spec is empty, set it with `Harness.set_cloud_spec()` first' + ) return self._cloud_spec @_copy_docstrings(pebble.ExecProcess) class _TestingExecProcess: - def __init__(self, - command: List[str], - timeout: Optional[float], - exit_code: Optional[int], - stdin: Union[TextIO, BinaryIO, None], - stdout: Union[TextIO, BinaryIO, None], - stderr: Union[TextIO, BinaryIO, None], - is_timeout: bool): + def __init__( + self, + command: List[str], + timeout: Optional[float], + exit_code: Optional[int], + stdin: Union[TextIO, BinaryIO, None], + stdout: Union[TextIO, BinaryIO, None], + stderr: Union[TextIO, BinaryIO, None], + is_timeout: bool, + ): self._command = command self._timeout = timeout self._is_timeout = is_timeout if exit_code is None and not is_timeout: - raise ValueError("when is_timeout is False, exit_code must not be None") + raise ValueError('when is_timeout is False, exit_code must not be None') self._exit_code = exit_code self.stdin = stdin self.stdout = stdout @@ -2872,29 +2974,27 @@ def __init__(self, def wait(self): if self._is_timeout: - raise pebble.TimeoutError( - f'timed out waiting for change ({self._timeout} seconds)' - ) + raise pebble.TimeoutError(f'timed out waiting for change ({self._timeout} seconds)') if self._exit_code != 0: raise pebble.ExecError(self._command, cast(int, self._exit_code), None, None) def wait_output(self) -> Tuple[AnyStr, Optional[AnyStr]]: if self._is_timeout: - raise pebble.TimeoutError( - f'timed out waiting for change ({self._timeout} seconds)' - ) + raise pebble.TimeoutError(f'timed out waiting for change ({self._timeout} seconds)') out_value = self.stdout.read() if self.stdout is not None else None err_value = self.stderr.read() if self.stderr is not None else None if self._exit_code != 0: - raise pebble.ExecError[AnyStr](self._command, - cast(int, self._exit_code), - cast(Union[AnyStr, None], out_value), - cast(Union[AnyStr, None], err_value)) + raise pebble.ExecError[AnyStr]( + self._command, + cast(int, self._exit_code), + cast(Union[AnyStr, None], out_value), + cast(Union[AnyStr, None], err_value), + ) return cast(AnyStr, out_value), cast(Union[AnyStr, None], err_value) def send_signal(self, sig: Union[int, str]): # the process is always terminated when ExecProcess is return in the simulation. - raise BrokenPipeError("[Errno 32] Broken pipe") + raise BrokenPipeError('[Errno 32] Broken pipe') @_copy_docstrings(pebble.Client) @@ -2923,8 +3023,10 @@ def _handle_exec(self, command_prefix: Sequence[str], handler: ExecHandler): def _check_connection(self): if not self._backend._can_connect(self): - msg = ('Cannot connect to Pebble; did you forget to call ' - 'begin_with_initial_hooks() or set_can_connect()?') + msg = ( + 'Cannot connect to Pebble; did you forget to call ' + 'begin_with_initial_hooks() or set_can_connect()?' + ) raise pebble.ConnectionError(msg) def get_system_info(self) -> pebble.SystemInfo: @@ -2932,7 +3034,8 @@ def get_system_info(self) -> pebble.SystemInfo: return pebble.SystemInfo(version='1.0.0') def get_warnings( - self, select: pebble.WarningState = pebble.WarningState.PENDING, + self, + select: pebble.WarningState = pebble.WarningState.PENDING, ) -> List['pebble.Warning']: raise NotImplementedError(self.get_warnings) @@ -2940,8 +3043,9 @@ def ack_warnings(self, timestamp: datetime.datetime) -> int: raise NotImplementedError(self.ack_warnings) def get_changes( - self, select: pebble.ChangeState = pebble.ChangeState.IN_PROGRESS, - service: Optional[str] = None, + self, + select: pebble.ChangeState = pebble.ChangeState.IN_PROGRESS, + service: Optional[str] = None, ) -> List[pebble.Change]: raise NotImplementedError(self.get_changes) @@ -2968,7 +3072,10 @@ def replan_services(self, timeout: float = 30.0, delay: float = 0.1): return self.autostart_services(timeout, delay) def start_services( - self, services: List[str], timeout: float = 30.0, delay: float = 0.1, + self, + services: List[str], + timeout: float = 30.0, + delay: float = 0.1, ): # A common mistake is to pass just the name of a service, rather than a list of services, # so trap that so it is caught quickly. @@ -2989,7 +3096,10 @@ def start_services( self._service_status[name] = pebble.ServiceStatus.ACTIVE def stop_services( - self, services: List[str], timeout: float = 30.0, delay: float = 0.1, + self, + services: List[str], + timeout: float = 30.0, + delay: float = 0.1, ): # handle a common mistake of passing just a name rather than a list of names if isinstance(services, str): @@ -3009,7 +3119,10 @@ def stop_services( self._service_status[name] = pebble.ServiceStatus.INACTIVE def restart_services( - self, services: List[str], timeout: float = 30.0, delay: float = 0.1, + self, + services: List[str], + timeout: float = 30.0, + delay: float = 0.1, ): # handle a common mistake of passing just a name rather than a list of names if isinstance(services, str): @@ -3029,13 +3142,20 @@ def restart_services( self._service_status[name] = pebble.ServiceStatus.ACTIVE def wait_change( - self, change_id: pebble.ChangeID, timeout: float = 30.0, delay: float = 0.1, + self, + change_id: pebble.ChangeID, + timeout: float = 30.0, + delay: float = 0.1, ) -> pebble.Change: raise NotImplementedError(self.wait_change) def add_layer( - self, label: str, layer: Union[str, 'pebble.LayerDict', pebble.Layer], *, - combine: bool = False): + self, + label: str, + layer: Union[str, 'pebble.LayerDict', pebble.Layer], + *, + combine: bool = False, + ): # I wish we could combine some of this helpful object corralling with the actual backend, # rather than having to re-implement it. Maybe we could subclass if not isinstance(label, str): @@ -3047,7 +3167,8 @@ def add_layer( layer_obj = layer else: raise TypeError( - f'layer must be str, dict, or pebble.Layer, not {type(layer).__name__}') + f'layer must be str, dict, or pebble.Layer, not {type(layer).__name__}' + ) self._check_connection() @@ -3059,11 +3180,15 @@ def add_layer( # 'override' is actually single quoted in the real error, but # it shouldn't be, hopefully that gets cleaned up. if not service.override: - raise RuntimeError(f'500 Internal Server Error: layer "{label}" must define' - f'"override" for service "{name}"') + raise RuntimeError( + f'500 Internal Server Error: layer "{label}" must define' + f'"override" for service "{name}"' + ) if service.override not in ('merge', 'replace'): - raise RuntimeError(f'500 Internal Server Error: layer "{label}" has invalid ' - f'"override" value on service "{name}"') + raise RuntimeError( + f'500 Internal Server Error: layer "{label}" has invalid ' + f'"override" value on service "{name}"' + ) elif service.override == 'replace': layer.services[name] = service elif service.override == 'merge': @@ -3130,49 +3255,51 @@ def get_services(self, names: Optional[List[str]] = None) -> List[pebble.Service startup = pebble.ServiceStartup.DISABLED else: startup = pebble.ServiceStartup(service.startup) - info = pebble.ServiceInfo(name, - startup=startup, - current=pebble.ServiceStatus(status)) + info = pebble.ServiceInfo(name, startup=startup, current=pebble.ServiceStatus(status)) infos.append(info) return infos @staticmethod def _check_absolute_path(path: str): - if not path.startswith("/"): - raise pebble.PathError( - 'generic-file-error', - f'paths must be absolute, got {path!r}' - ) + if not path.startswith('/'): + raise pebble.PathError('generic-file-error', f'paths must be absolute, got {path!r}') - def pull(self, path: str, *, - encoding: Optional[str] = 'utf-8') -> Union[BinaryIO, TextIO]: + def pull(self, path: str, *, encoding: Optional[str] = 'utf-8') -> Union[BinaryIO, TextIO]: self._check_connection() self._check_absolute_path(path) file_path = self._root / path[1:] try: return cast( Union[BinaryIO, TextIO], - file_path.open("rb" if encoding is None else "r", encoding=encoding)) + file_path.open('rb' if encoding is None else 'r', encoding=encoding), + ) except FileNotFoundError: - raise pebble.PathError('not-found', - f'stat {path}: no such file or directory') from None + raise pebble.PathError( + 'not-found', f'stat {path}: no such file or directory' + ) from None except IsADirectoryError: - raise pebble.PathError('generic-file-error', - f'can only read a regular file: "{path}"') from None + raise pebble.PathError( + 'generic-file-error', f'can only read a regular file: "{path}"' + ) from None def push( - self, path: str, source: 'ReadableBuffer', *, - encoding: str = 'utf-8', make_dirs: bool = False, permissions: Optional[int] = None, - user_id: Optional[int] = None, - user: Optional[str] = None, - group_id: Optional[int] = None, - group: Optional[str] = None + self, + path: str, + source: 'ReadableBuffer', + *, + encoding: str = 'utf-8', + make_dirs: bool = False, + permissions: Optional[int] = None, + user_id: Optional[int] = None, + user: Optional[str] = None, + group_id: Optional[int] = None, + group: Optional[str] = None, ) -> None: self._check_connection() if permissions is not None and not (0 <= permissions <= 0o777): raise pebble.PathError( - 'generic-file-error', - f'permissions not within 0o000 to 0o777: {permissions:#o}') + 'generic-file-error', f'permissions not within 0o000 to 0o777: {permissions:#o}' + ) self._check_absolute_path(path) file_path = self._root / path[1:] if make_dirs and not file_path.parent.exists(): @@ -3183,7 +3310,8 @@ def push( user_id=user_id, user=user, group_id=group_id, - group=group) + group=group, + ) permissions = permissions if permissions is not None else 0o644 try: if isinstance(source, str): @@ -3200,18 +3328,21 @@ def push( os.chmod(file_path, permissions) except FileNotFoundError as e: raise pebble.PathError( - 'not-found', f'parent directory not found: {e.args[0]}') from None + 'not-found', f'parent directory not found: {e.args[0]}' + ) from None except NotADirectoryError: - raise pebble.PathError('generic-file-error', - f'open {path}.~: not a directory') from None + raise pebble.PathError( + 'generic-file-error', f'open {path}.~: not a directory' + ) from None - def list_files(self, path: str, *, pattern: Optional[str] = None, - itself: bool = False) -> List[pebble.FileInfo]: + def list_files( + self, path: str, *, pattern: Optional[str] = None, itself: bool = False + ) -> List[pebble.FileInfo]: self._check_connection() self._check_absolute_path(path) file_path = self._root / path[1:] if not file_path.exists(): - raise self._api_error(404, f"stat {path}: no such file or directory") + raise self._api_error(404, f'stat {path}: no such file or directory') files = [file_path] if not itself: try: @@ -3222,37 +3353,35 @@ def list_files(self, path: str, *, pattern: Optional[str] = None, if pattern is not None: files = [file for file in files if fnmatch.fnmatch(file.name, pattern)] - file_infos = [ - Container._build_fileinfo(file) - for file in files - ] + file_infos = [Container._build_fileinfo(file) for file in files] for file_info in file_infos: rel_path = os.path.relpath(file_info.path, start=self._root) rel_path = '/' if rel_path == '.' else '/' + rel_path file_info.path = rel_path - if rel_path == "/": - file_info.name = "/" + if rel_path == '/': + file_info.name = '/' return file_infos def make_dir( - self, path: str, *, - make_parents: bool = False, - permissions: Optional[int] = None, - user_id: Optional[int] = None, - user: Optional[str] = None, - group_id: Optional[int] = None, - group: Optional[str] = None + self, + path: str, + *, + make_parents: bool = False, + permissions: Optional[int] = None, + user_id: Optional[int] = None, + user: Optional[str] = None, + group_id: Optional[int] = None, + group: Optional[str] = None, ) -> None: self._check_connection() if permissions is not None and not (0 <= permissions <= 0o777): raise pebble.PathError( - 'generic-file-error', - f'permissions not within 0o000 to 0o777: {permissions:#o}') + 'generic-file-error', f'permissions not within 0o000 to 0o777: {permissions:#o}' + ) self._check_absolute_path(path) dir_path = self._root / path[1:] if not dir_path.parent.exists() and not make_parents: - raise pebble.PathError( - 'not-found', f'parent directory not found: {path}') + raise pebble.PathError('not-found', f'parent directory not found: {path}') if not dir_path.parent.exists() and make_parents: self.make_dir( os.path.dirname(path), @@ -3261,7 +3390,8 @@ def make_dir( user_id=user_id, user=user, group_id=group_id, - group=group) + group=group, + ) try: permissions = permissions if permissions else 0o755 dir_path.mkdir() @@ -3269,8 +3399,8 @@ def make_dir( except FileExistsError: if not make_parents: raise pebble.PathError( - 'generic-file-error', - f'mkdir {path}: file exists') from None + 'generic-file-error', f'mkdir {path}: file exists' + ) from None except NotADirectoryError as e: # Attempted to create a subdirectory of a file raise pebble.PathError('generic-file-error', f'not a directory: {e.args[0]}') from None @@ -3282,8 +3412,7 @@ def remove_path(self, path: str, *, recursive: bool = False): if not file_path.exists(): if recursive: return - raise pebble.PathError( - 'not-found', f'remove {path}: no such file or directory') + raise pebble.PathError('not-found', f'remove {path}: no such file or directory') if file_path.is_dir(): if recursive: shutil.rmtree(file_path) @@ -3293,7 +3422,8 @@ def remove_path(self, path: str, *, recursive: bool = False): except OSError as e: raise pebble.PathError( 'generic-file-error', - 'cannot remove non-empty directory without recursive=True') from e + 'cannot remove non-empty directory without recursive=True', + ) from e else: file_path.unlink() @@ -3304,9 +3434,9 @@ def _find_exec_handler(self, command: List[str]) -> Optional[ExecHandler]: return self._exec_handlers[command_prefix] return None - def _transform_exec_handler_output(self, - data: Union[str, bytes], - encoding: Optional[str]) -> Union[io.BytesIO, io.StringIO]: + def _transform_exec_handler_output( + self, data: Union[str, bytes], encoding: Optional[str] + ) -> Union[io.BytesIO, io.StringIO]: if isinstance(data, bytes): if encoding is None: return io.BytesIO(data) @@ -3315,8 +3445,9 @@ def _transform_exec_handler_output(self, else: if encoding is None: raise ValueError( - f"exec handler must return bytes if encoding is None," - f"not {data.__class__.__name__}") + f'exec handler must return bytes if encoding is None,' + f'not {data.__class__.__name__}' + ) else: return io.StringIO(typing.cast(str, data)) @@ -3336,12 +3467,12 @@ def exec( stdout: Optional[Union[TextIO, BinaryIO]] = None, stderr: Optional[Union[TextIO, BinaryIO]] = None, encoding: Optional[str] = 'utf-8', - combine_stderr: bool = False + combine_stderr: bool = False, ) -> ExecProcess[Any]: self._check_connection() handler = self._find_exec_handler(command) if handler is None: - message = "execution handler not found, please register one using Harness.handle_exec" + message = 'execution handler not found, please register one using Harness.handle_exec' raise self._api_error(500, message) environment = {} if environment is None else environment if service_context is not None: @@ -3357,7 +3488,7 @@ def exec( group = service.group if group is None else group group_id = service.group_id if group_id is None else group_id - if hasattr(stdin, "read"): + if hasattr(stdin, 'read'): stdin = stdin.read() # type: ignore exec_args = ExecArgs( @@ -3371,29 +3502,31 @@ def exec( group=group, stdin=cast(Union[str, bytes, None], stdin), encoding=encoding, - combine_stderr=combine_stderr + combine_stderr=combine_stderr, ) - proc_stdin = self._transform_exec_handler_output(b"", encoding) + proc_stdin = self._transform_exec_handler_output(b'', encoding) if stdin is not None: proc_stdin = None - proc_stdout = self._transform_exec_handler_output(b"", encoding) - proc_stderr = self._transform_exec_handler_output(b"", encoding) + proc_stdout = self._transform_exec_handler_output(b'', encoding) + proc_stderr = self._transform_exec_handler_output(b'', encoding) try: result = handler(exec_args) except TimeoutError: if timeout is not None: - exec_process = _TestingExecProcess(command=command, - timeout=timeout, - exit_code=None, - stdin=proc_stdin, - stdout=proc_stdout, - stderr=proc_stderr, - is_timeout=True) + exec_process = _TestingExecProcess( + command=command, + timeout=timeout, + exit_code=None, + stdin=proc_stdin, + stdout=proc_stdout, + stderr=proc_stderr, + is_timeout=True, + ) return cast(pebble.ExecProcess[Any], exec_process) else: raise RuntimeError( - "a TimeoutError occurred in the execution handler, " - "but no timeout value was provided in the execution arguments." + 'a TimeoutError occurred in the execution handler, ' + 'but no timeout value was provided in the execution arguments.' ) from None if result is None: exit_code = 0 @@ -3404,23 +3537,27 @@ def exec( proc_stdout = self._transform_exec_handler_output(result.stdout, encoding) proc_stderr = self._transform_exec_handler_output(result.stderr, encoding) else: - raise TypeError(f"execution handler returned an unexpected type: {type(result)!r}.") + raise TypeError(f'execution handler returned an unexpected type: {type(result)!r}.') if combine_stderr and proc_stderr.getvalue(): - raise ValueError("execution handler returned a non-empty stderr " - "even though combine_stderr is enabled.") + raise ValueError( + 'execution handler returned a non-empty stderr ' + 'even though combine_stderr is enabled.' + ) if stdout is not None: shutil.copyfileobj(cast(io.IOBase, proc_stdout), cast(io.IOBase, stdout)) proc_stdout = None if stderr is not None: shutil.copyfileobj(cast(io.IOBase, proc_stderr), cast(io.IOBase, stderr)) proc_stderr = None - exec_process = _TestingExecProcess(command=command, - timeout=timeout, - exit_code=exit_code, - stdin=proc_stdin, - stdout=proc_stdout, - stderr=proc_stderr, - is_timeout=False) + exec_process = _TestingExecProcess( + command=command, + timeout=timeout, + exit_code=exit_code, + stdin=proc_stdin, + stdout=proc_stdout, + stderr=proc_stderr, + is_timeout=False, + ) return cast(pebble.ExecProcess[Any], exec_process) def send_signal(self, sig: Union[int, str], service_names: Iterable[str]): @@ -3453,15 +3590,25 @@ def send_signal(self, sig: Union[int, str], service_names: Iterable[str]): def get_checks(self, level=None, names=None): # type:ignore raise NotImplementedError(self.get_checks) # type:ignore - def notify(self, type: pebble.NoticeType, key: str, *, - data: Optional[Dict[str, str]] = None, - repeat_after: Optional[datetime.timedelta] = None) -> str: + def notify( + self, + type: pebble.NoticeType, + key: str, + *, + data: Optional[Dict[str, str]] = None, + repeat_after: Optional[datetime.timedelta] = None, + ) -> str: notice_id, _ = self._notify(type, key, data=data, repeat_after=repeat_after) return notice_id - def _notify(self, type: pebble.NoticeType, key: str, *, - data: Optional[Dict[str, str]] = None, - repeat_after: Optional[datetime.timedelta] = None) -> Tuple[str, bool]: + def _notify( + self, + type: pebble.NoticeType, + key: str, + *, + data: Optional[Dict[str, str]] = None, + repeat_after: Optional[datetime.timedelta] = None, + ) -> Tuple[str, bool]: """Record an occurrence of a notice with the specified details. Return a tuple of (notice_id, new_or_repeated). @@ -3552,16 +3699,21 @@ def get_notices( if keys is not None: keys = set(keys) - notices = [notice for notice in self._notices.values() if - self._notice_matches(notice, filter_user_id, types, keys)] + notices = [ + notice + for notice in self._notices.values() + if self._notice_matches(notice, filter_user_id, types, keys) + ] notices.sort(key=lambda notice: notice.last_repeated) return notices @staticmethod - def _notice_matches(notice: pebble.Notice, - user_id: Optional[int] = None, - types: Optional[Set[str]] = None, - keys: Optional[Set[str]] = None) -> bool: + def _notice_matches( + notice: pebble.Notice, + user_id: Optional[int] = None, + types: Optional[Set[str]] = None, + keys: Optional[Set[str]] = None, + ) -> bool: # Same logic as NoticeFilter.matches in Pebble. # For example: if user_id filter is set and it doesn't match, return False. if user_id is not None and not (notice.user_id is None or user_id == notice.user_id): diff --git a/pyproject.toml b/pyproject.toml index f873965c0..6a303b63f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,6 +73,11 @@ aggressive = 3 [tool.ruff] line-length = 99 target-version = "py38" +extend-exclude = ["docs"] + +# Ruff formatter configuration +[tool.ruff.format] +quote-style = "single" [tool.ruff.lint] select = [ diff --git a/test/charms/test_main/src/charm.py b/test/charms/test_main/src/charm.py index ccab5d62a..e987d344f 100755 --- a/test/charms/test_main/src/charm.py +++ b/test/charms/test_main/src/charm.py @@ -43,7 +43,6 @@ def __init__(self, *args: typing.Any): super().__init__(*args) self._stored.set_default( try_excepthook=False, - on_install=[], on_start=[], on_config_changed=[], @@ -59,20 +58,16 @@ def __init__(self, *args: typing.Any): on_collect_metrics=[], on_test_pebble_ready=[], on_test_pebble_custom_notice=[], - on_log_critical_action=[], on_log_error_action=[], on_log_warning_action=[], on_log_info_action=[], on_log_debug_action=[], - on_secret_changed=[], on_secret_remove=[], on_secret_rotate=[], on_secret_expired=[], - on_custom=[], - # Observed event type names per invocation. A list is used to preserve the # order in which charm handlers have observed the events. observed_event_types=[], @@ -90,8 +85,9 @@ def __init__(self, *args: typing.Any): self.framework.observe(self.on.mon_relation_departed, self._on_mon_relation_departed) self.framework.observe(self.on.ha_relation_broken, self._on_ha_relation_broken) self.framework.observe(self.on.test_pebble_ready, self._on_test_pebble_ready) - self.framework.observe(self.on.test_pebble_custom_notice, - self._on_test_pebble_custom_notice) + self.framework.observe( + self.on.test_pebble_custom_notice, self._on_test_pebble_custom_notice + ) self.framework.observe(self.on.secret_remove, self._on_secret_remove) self.framework.observe(self.on.secret_rotate, self._on_secret_rotate) @@ -116,7 +112,7 @@ def __init__(self, *args: typing.Any): self.framework.observe(self.on.custom, self._on_custom) if os.getenv('TRY_EXCEPTHOOK', False): - raise RuntimeError("failing as requested") + raise RuntimeError('failing as requested') def _on_install(self, event: ops.InstallEvent): self._stored.on_install.append(type(event).__name__) @@ -151,12 +147,14 @@ def _on_db_relation_joined(self, event: ops.RelationJoinedEvent): self._stored.db_relation_joined_data = event.snapshot() def _on_mon_relation_changed(self, event: ops.RelationChangedEvent): - assert event.app is not None, ( - 'application name cannot be None for a relation-changed event') + assert ( + event.app is not None + ), 'application name cannot be None for a relation-changed event' if os.environ.get('JUJU_REMOTE_UNIT'): assert event.unit is not None, ( 'a unit name cannot be None for a relation-changed event' - ' associated with a remote unit') + ' associated with a remote unit' + ) assert event.relation.active, 'a changed relation is always active' assert self.model.relations['mon'] self._stored.on_mon_relation_changed.append(type(event).__name__) @@ -164,8 +162,9 @@ def _on_mon_relation_changed(self, event: ops.RelationChangedEvent): self._stored.mon_relation_changed_data = event.snapshot() def _on_mon_relation_departed(self, event: ops.RelationDepartedEvent): - assert event.app is not None, ( - 'application name cannot be None for a relation-departed event') + assert ( + event.app is not None + ), 'application name cannot be None for a relation-departed event' assert event.relation.active, 'a departed relation is still active' assert self.model.relations['mon'] self._stored.on_mon_relation_departed.append(type(event).__name__) @@ -173,10 +172,12 @@ def _on_mon_relation_departed(self, event: ops.RelationDepartedEvent): self._stored.mon_relation_departed_data = event.snapshot() def _on_ha_relation_broken(self, event: ops.RelationBrokenEvent): - assert event.app is None, ( - 'relation-broken events cannot have a reference to a remote application') - assert event.unit is None, ( - 'relation broken events cannot have a reference to a remote unit') + assert ( + event.app is None + ), 'relation-broken events cannot have a reference to a remote application' + assert ( + event.unit is None + ), 'relation broken events cannot have a reference to a remote unit' assert not event.relation.active, 'relation broken events always have a broken relation' assert not self.model.relations['ha'] self._stored.on_ha_relation_broken.append(type(event).__name__) @@ -184,8 +185,7 @@ def _on_ha_relation_broken(self, event: ops.RelationBrokenEvent): self._stored.ha_relation_broken_data = event.snapshot() def _on_test_pebble_ready(self, event: ops.PebbleReadyEvent): - assert event.workload is not None, ( - 'workload events must have a reference to a container') + assert event.workload is not None, 'workload events must have a reference to a container' self._stored.on_test_pebble_ready.append(type(event).__name__) self._stored.observed_event_types.append(type(event).__name__) self._stored.test_pebble_ready_data = event.snapshot() @@ -198,15 +198,17 @@ def _on_test_pebble_custom_notice(self, event: ops.PebbleCustomNoticeEvent): self._stored.test_pebble_custom_notice_data = event.snapshot() def _on_start_action(self, event: ops.ActionEvent): - assert event.handle.kind == 'start_action', ( - 'event action name cannot be different from the one being handled') + assert ( + event.handle.kind == 'start_action' + ), 'event action name cannot be different from the one being handled' self._stored.on_start_action.append(type(event).__name__) self._stored.observed_event_types.append(type(event).__name__) def _on_secret_changed(self, event: ops.SecretChangedEvent): # subprocess and isinstance don't mix well - assert type(event.secret).__name__ == 'Secret', ( - f'SecretEvent.secret must be a Secret instance, not {type(event.secret)}') + assert ( + type(event.secret).__name__ == 'Secret' + ), f'SecretEvent.secret must be a Secret instance, not {type(event.secret)}' assert event.secret.id, 'secret must have an ID' self._stored.on_secret_changed.append(type(event).__name__) self._stored.observed_event_types.append(type(event).__name__) @@ -214,8 +216,9 @@ def _on_secret_changed(self, event: ops.SecretChangedEvent): def _on_secret_remove(self, event: ops.SecretRemoveEvent): # subprocess and isinstance don't mix well - assert type(event.secret).__name__ == 'Secret', ( - f'SecretEvent.secret must be a Secret instance, not {type(event.secret)}') + assert ( + type(event.secret).__name__ == 'Secret' + ), f'SecretEvent.secret must be a Secret instance, not {type(event.secret)}' assert event.secret.id, 'secret must have an ID' self._stored.on_secret_remove.append(type(event).__name__) self._stored.observed_event_types.append(type(event).__name__) @@ -223,8 +226,9 @@ def _on_secret_remove(self, event: ops.SecretRemoveEvent): def _on_secret_rotate(self, event: ops.SecretRotateEvent): # subprocess and isinstance don't mix well - assert type(event.secret).__name__ == 'Secret', ( - f'SecretEvent.secret must be a Secret instance, not {type(event.secret)}') + assert ( + type(event.secret).__name__ == 'Secret' + ), f'SecretEvent.secret must be a Secret instance, not {type(event.secret)}' assert event.secret.id, 'secret must have an ID' self._stored.on_secret_rotate.append(type(event).__name__) self._stored.observed_event_types.append(type(event).__name__) @@ -232,16 +236,18 @@ def _on_secret_rotate(self, event: ops.SecretRotateEvent): def _on_secret_expired(self, event: ops.SecretExpiredEvent): # subprocess and isinstance don't mix well - assert type(event.secret).__name__ == 'Secret', ( - f'SecretEvent.secret must be a Secret instance, not {type(event.secret)}') + assert ( + type(event.secret).__name__ == 'Secret' + ), f'SecretEvent.secret must be a Secret instance, not {type(event.secret)}' assert event.secret.id, 'secret must have an ID' self._stored.on_secret_expired.append(type(event).__name__) self._stored.observed_event_types.append(type(event).__name__) self._stored.secret_expired_data = event.snapshot() def _on_foo_bar_action(self, event: ops.ActionEvent): - assert event.handle.kind == 'foo_bar_action', ( - 'event action name cannot be different from the one being handled') + assert ( + event.handle.kind == 'foo_bar_action' + ), 'event action name cannot be different from the one being handled' self._stored.on_foo_bar_action.append(type(event).__name__) self._stored.observed_event_types.append(type(event).__name__) diff --git a/test/charms/test_smoke/src/charm.py b/test/charms/test_smoke/src/charm.py index eafc0a4b3..029480126 100755 --- a/test/charms/test_smoke/src/charm.py +++ b/test/charms/test_smoke/src/charm.py @@ -43,5 +43,5 @@ def _on_install(self, event: EventBase): self.unit.status = ActiveStatus() -if __name__ == "__main__": +if __name__ == '__main__': main(SmokeCharm) diff --git a/test/fake_pebble.py b/test/fake_pebble.py index 82a2b700c..69fe43eff 100644 --- a/test/fake_pebble.py +++ b/test/fake_pebble.py @@ -28,25 +28,28 @@ from typing_extensions import NotRequired _Response = typing.TypedDict( - "_Response", { - "result": typing.Optional[typing.Dict[str, str]], - "status": str, - "status-code": int, - "type": str, - "change": NotRequired[str]}) + '_Response', + { + 'result': typing.Optional[typing.Dict[str, str]], + 'status': str, + 'status-code': int, + 'type': str, + 'change': NotRequired[str], + }, +) class Handler(http.server.BaseHTTPRequestHandler): - _route = typing.List[typing.Tuple[ - typing.Literal['GET', 'POST'], - typing.Any, - typing.Callable[..., None] - ]] - - def __init__(self, - request: socket.socket, - client_address: typing.Tuple[str, int], - server: socketserver.BaseServer): + _route = typing.List[ + typing.Tuple[typing.Literal['GET', 'POST'], typing.Any, typing.Callable[..., None]] + ] + + def __init__( + self, + request: socket.socket, + client_address: typing.Tuple[str, int], + server: socketserver.BaseServer, + ): self.routes: Handler._route = [ ('GET', re.compile(r'^/system-info$'), self.get_system_info), ('POST', re.compile(r'^/services$'), self.services_action), @@ -67,45 +70,41 @@ def respond(self, d: _Response, status: int = 200): def bad_request(self, message: str): d: _Response = { - "result": { - "message": message, + 'result': { + 'message': message, }, - "status": "Bad Request", - "status-code": 400, - "type": "error" + 'status': 'Bad Request', + 'status-code': 400, + 'type': 'error', } self.respond(d, 400) def not_found(self): d: _Response = { - "result": { - "message": "invalid API endpoint requested" - }, - "status": "Not Found", - "status-code": 404, - "type": "error" + 'result': {'message': 'invalid API endpoint requested'}, + 'status': 'Not Found', + 'status-code': 404, + 'type': 'error', } self.respond(d, 404) def method_not_allowed(self): d: _Response = { - "result": { - "message": 'method "PUT" not allowed' - }, - "status": "Method Not Allowed", - "status-code": 405, - "type": "error" + 'result': {'message': 'method "PUT" not allowed'}, + 'status': 'Method Not Allowed', + 'status-code': 405, + 'type': 'error', } self.respond(d, 405) def internal_server_error(self, msg: Exception): d: _Response = { - "result": { - "message": f"internal server error: {msg}", + 'result': { + 'message': f'internal server error: {msg}', }, - "status": "Internal Server Error", - "status-code": 500, - "type": "error" + 'status': 'Internal Server Error', + 'status-code': 500, + 'type': 'error', } self.respond(d, 500) @@ -157,23 +156,19 @@ def read_body_json(self) -> typing.Dict[str, str]: body = body.decode('utf-8') return json.loads(body) - def get_system_info(self, - match: typing.Any, - query: typing.Dict[str, str], - data: typing.Dict[str, str]): + def get_system_info( + self, match: typing.Any, query: typing.Dict[str, str], data: typing.Dict[str, str] + ): self.respond({ - "result": { - "version": "3.14.159" - }, - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': {'version': '3.14.159'}, + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) - def services_action(self, - match: typing.Any, - query: typing.Dict[str, str], - data: typing.Dict[str, str]): + def services_action( + self, match: typing.Any, query: typing.Dict[str, str], data: typing.Dict[str, str] + ): action = data['action'] services = data['services'] if action == 'start': @@ -182,11 +177,11 @@ def services_action(self, self.bad_request(f'service "{service}" does not exist') return self.respond({ - "change": "1234", - "result": None, - "status": "Accepted", - "status-code": 202, - "type": "async" + 'change': '1234', + 'result': None, + 'status': 'Accepted', + 'status-code': 202, + 'type': 'async', }) else: self.bad_request(f'action "{action}" not implemented') diff --git a/test/pebble_cli.py b/test/pebble_cli.py index 573c842aa..5ae722df0 100644 --- a/test/pebble_cli.py +++ b/test/pebble_cli.py @@ -40,9 +40,11 @@ def main(): p.add_argument('change_id', help='ID of change to abort') p = subparsers.add_parser('ack', help='acknowledge warnings up to given time') - p.add_argument('--timestamp', help='time to acknowledge up to (YYYY-mm-ddTHH:MM:SS.f+ZZ:zz' - 'format), default current time', - type=timeconv.parse_rfc3339) + p.add_argument( + '--timestamp', + help='time to acknowledge up to (YYYY-mm-ddTHH:MM:SS.f+ZZ:zzformat), default current time', + type=timeconv.parse_rfc3339, + ) p = subparsers.add_parser('add', help='add a configuration layer dynamically') p.add_argument('--combine', action='store_true', help='combine layer instead of appending') @@ -55,39 +57,58 @@ def main(): p.add_argument('change_id', help='ID of change to fetch') p = subparsers.add_parser('changes', help='show (filtered) changes') - p.add_argument('--select', help='change state to filter on, default %(default)s', - choices=[s.value for s in pebble.ChangeState], default='all') + p.add_argument( + '--select', + help='change state to filter on, default %(default)s', + choices=[s.value for s in pebble.ChangeState], + default='all', + ) p.add_argument('--service', help='optional service name to filter on') p = subparsers.add_parser('checks', help='show (filtered) checks') - p.add_argument('--level', help='check level to filter on, default all levels', - choices=[c.value for c in pebble.CheckLevel], default='') + p.add_argument( + '--level', + help='check level to filter on, default all levels', + choices=[c.value for c in pebble.CheckLevel], + default='', + ) p.add_argument('name', help='check name(s) to filter on', nargs='*') p = subparsers.add_parser('exec', help='execute a command') p.add_argument('--context', help='service context') - p.add_argument('--env', help='environment variables to set', action='append', - metavar='KEY=VALUE') + p.add_argument( + '--env', help='environment variables to set', action='append', metavar='KEY=VALUE' + ) p.add_argument('--working-dir', help='working directory to run command in') - p.add_argument('--io-mode', help='input/output mode, default %(default)r', - choices=['passthrough', 'string'], default='passthrough') + p.add_argument( + '--io-mode', + help='input/output mode, default %(default)r', + choices=['passthrough', 'string'], + default='passthrough', + ) p.add_argument('-t', '--timeout', type=float, help='timeout in seconds') p.add_argument('-u', '--user', help='user to run as') p.add_argument('-g', '--group', help='group to run as') - p.add_argument('--encoding', help="input/output encoding or 'none', default %(default)r", - default='utf-8') + p.add_argument( + '--encoding', help="input/output encoding or 'none', default %(default)r", default='utf-8' + ) p.add_argument('--combine-stderr', help='combine stderr into stdout', action='store_true') p.add_argument('exec_command', help='command and arguments', nargs='+', metavar='command') p = subparsers.add_parser('ls', help='list files') - p.add_argument('-d', '--directory', action='store_true', - help='list directories themselves, not their contents') + p.add_argument( + '-d', + '--directory', + action='store_true', + help='list directories themselves, not their contents', + ) p.add_argument('-p', '--pattern', help='glob pattern to filter results') p.add_argument('path', help='name of directory or file') p = subparsers.add_parser('mkdir', help='create directory') - p.add_argument('-p', '--parents', action='store_true', - help='create parent directories if needed') + p.add_argument( + '-p', '--parents', action='store_true', help='create parent directories if needed' + ) p.add_argument('path', help='path to create') p = subparsers.add_parser('plan', help='show configuration plan (combined layers)') @@ -105,8 +126,9 @@ def main(): p.add_argument('remote_path', help='path of remote file to copy to') p = subparsers.add_parser('rm', help='remove path') - p.add_argument('-r', '--recursive', action='store_true', - help='recursively delete directory contents') + p.add_argument( + '-r', '--recursive', action='store_true', help='recursively delete directory contents' + ) p.add_argument('path', help='path to remove') p = subparsers.add_parser('services', help='show service status') @@ -125,8 +147,12 @@ def main(): p.add_argument('change_id', help='ID of change to wait for') p = subparsers.add_parser('warnings', help='show (filtered) warnings') - p.add_argument('--select', help='warning state to filter on, default %(default)s', - choices=[s.value for s in pebble.WarningState], default='all') + p.add_argument( + '--select', + help='warning state to filter on, default %(default)s', + choices=[s.value for s in pebble.WarningState], + default='all', + ) args = parser.parse_args() @@ -162,8 +188,9 @@ def main(): elif args.command == 'change': result = client.get_change(pebble.ChangeID(args.change_id)) elif args.command == 'changes': - result = client.get_changes(select=pebble.ChangeState(args.select), - service=args.service) + result = client.get_changes( + select=pebble.ChangeState(args.select), service=args.service + ) elif args.command == 'checks': result = client.get_checks(level=pebble.CheckLevel(args.level), names=args.name) elif args.command == 'exec': @@ -243,9 +270,13 @@ def main(): elif args.command == 'push': with open(args.local_path, 'rb') as f: client.push( - args.remote_path, f, make_dirs=args.dirs, + args.remote_path, + f, + make_dirs=args.dirs, permissions=int(args.mode, 8) if args.mode is not None else None, - user=args.user, group=args.group) + user=args.user, + group=args.group, + ) result = f'wrote {args.local_path} to remote file {args.remote_path}' elif args.command == 'rm': client.remove_path(args.path, recursive=bool(args.recursive)) @@ -268,8 +299,7 @@ def main(): print(f'APIError: {e.code} {e.status}: {e.message}', file=sys.stderr) sys.exit(1) except pebble.ConnectionError as e: - print(f'ConnectionError: cannot connect to socket {socket_path!r}: {e}', - file=sys.stderr) + print(f'ConnectionError: cannot connect to socket {socket_path!r}: {e}', file=sys.stderr) sys.exit(1) except pebble.ChangeError as e: print('ChangeError:', e, file=sys.stderr) diff --git a/test/smoke/test_smoke.py b/test/smoke/test_smoke.py index 1c2f28e40..a1ff79d56 100644 --- a/test/smoke/test_smoke.py +++ b/test/smoke/test_smoke.py @@ -26,11 +26,10 @@ async def test_smoke(ops_test: OpsTest): # Build the charm. (We just build it for focal -- it *should* work to deploy it on # older versions of Juju.) - charm = await ops_test.build_charm("./test/charms/test_smoke/") + charm = await ops_test.build_charm('./test/charms/test_smoke/') for series in ['focal', 'bionic', 'xenial']: - app = await ops_test.model.deploy( - charm, series=series, application_name=f"{series}-smoke") + app = await ops_test.model.deploy(charm, series=series, application_name=f'{series}-smoke') await ops_test.model.wait_for_idle(timeout=600) - assert app.status == "active", f"Series {series} failed with '{app.status}' status" + assert app.status == 'active', f"Series {series} failed with '{app.status}' status" diff --git a/test/test_charm.py b/test/test_charm.py index 5d72bc7d2..b70673cec 100644 --- a/test/test_charm.py +++ b/test/test_charm.py @@ -34,7 +34,6 @@ def fake_script(request: pytest.FixtureRequest) -> FakeScript: def test_basic(request: pytest.FixtureRequest): class MyCharm(ops.CharmBase): - def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -75,6 +74,7 @@ def dec(fn: typing.Any) -> typing.Callable[..., None]: def wrapper(charm: 'MyCharm', evt: ops.EventBase): events.append(evt) fn(charm, evt) + return wrapper class MyCharm(ops.CharmBase): @@ -97,12 +97,11 @@ def _on_start(self, event: ops.EventBase): def test_observer_not_referenced_warning( - request: pytest.FixtureRequest, - caplog: pytest.LogCaptureFixture + request: pytest.FixtureRequest, caplog: pytest.LogCaptureFixture ): class MyObj(ops.Object): def __init__(self, charm: ops.CharmBase): - super().__init__(charm, "obj") + super().__init__(charm, 'obj') framework.observe(charm.on.start, self._on_start) def _on_start(self, _: ops.StartEvent): @@ -142,7 +141,6 @@ class MyCharm(ops.CharmBase): def test_relation_events(request: pytest.FixtureRequest): - class MyCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -161,7 +159,8 @@ def on_any_relation(self, event: ops.RelationEvent): self.seen.append(type(event).__name__) # language=YAML - meta = ops.CharmMeta.from_yaml(metadata=''' + meta = ops.CharmMeta.from_yaml( + metadata=""" name: my-charm requires: req1: @@ -178,7 +177,8 @@ def on_any_relation(self, event: ops.RelationEvent): interface: peer1 peer-2: interface: peer2 -''') +""" + ) framework = create_framework(request, meta=meta) charm = MyCharm(framework) @@ -220,7 +220,7 @@ def __init__(self, framework: ops.Framework): def _on_stor1_attach(self, event: ops.StorageAttachedEvent): self.seen.append(type(event).__name__) - assert event.storage.location == Path("/var/srv/stor1/0") + assert event.storage.location == Path('/var/srv/stor1/0') def _on_stor2_detach(self, event: ops.StorageDetachingEvent): self.seen.append(type(event).__name__) @@ -232,7 +232,7 @@ def _on_stor4_attach(self, event: ops.StorageAttachedEvent): self.seen.append(type(event).__name__) # language=YAML - meta = ops.CharmMeta.from_yaml(''' + meta = ops.CharmMeta.from_yaml(""" name: my-charm storage: stor-4: @@ -257,10 +257,10 @@ def _on_stor4_attach(self, event: ops.StorageAttachedEvent): multiple: range: 10+ type: filesystem -''') +""") fake_script.write( - "storage-get", + 'storage-get', """ if [ "$1" = "-s" ]; then id=${2#*/} @@ -292,7 +292,7 @@ def _on_stor4_attach(self, event: ops.StorageAttachedEvent): """, ) fake_script.write( - "storage-list", + 'storage-list', """ echo '["disks/0"]' """, @@ -307,12 +307,13 @@ def _on_stor4_attach(self, event: ops.StorageAttachedEvent): framework = create_framework(request, meta=meta) charm = MyCharm(framework) - charm.on['stor1'].storage_attached.emit(ops.Storage("stor1", 0, charm.model._backend)) - charm.on['stor2'].storage_detaching.emit(ops.Storage("stor2", 0, charm.model._backend)) - charm.on['stor3'].storage_attached.emit(ops.Storage("stor3", 0, charm.model._backend)) - charm.on['stor-4'].storage_attached.emit(ops.Storage("stor-4", 0, charm.model._backend)) + charm.on['stor1'].storage_attached.emit(ops.Storage('stor1', 0, charm.model._backend)) + charm.on['stor2'].storage_detaching.emit(ops.Storage('stor2', 0, charm.model._backend)) + charm.on['stor3'].storage_attached.emit(ops.Storage('stor3', 0, charm.model._backend)) + charm.on['stor-4'].storage_attached.emit(ops.Storage('stor-4', 0, charm.model._backend)) charm.on['stor-multiple-dashes'].storage_attached.emit( - ops.Storage("stor-multiple-dashes", 0, charm.model._backend)) + ops.Storage('stor-multiple-dashes', 0, charm.model._backend) + ) assert charm.seen == [ 'StorageAttachedEvent', @@ -323,16 +324,13 @@ def _on_stor4_attach(self, event: ops.StorageAttachedEvent): def test_workload_events(request: pytest.FixtureRequest): - class MyCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) self.seen: typing.List[str] = [] for workload in ('container-a', 'containerb'): # Hook up relation events to generic handler. - self.framework.observe( - self.on[workload].pebble_ready, - self.on_any_pebble_ready) + self.framework.observe(self.on[workload].pebble_ready, self.on_any_pebble_ready) self.framework.observe( self.on[workload].pebble_custom_notice, self.on_any_pebble_custom_notice, @@ -345,12 +343,14 @@ def on_any_pebble_custom_notice(self, event: ops.PebbleCustomNoticeEvent): self.seen.append(type(event).__name__) # language=YAML - meta = ops.CharmMeta.from_yaml(metadata=''' + meta = ops.CharmMeta.from_yaml( + metadata=""" name: my-charm containers: container-a: containerb: -''') +""" + ) framework = create_framework(request, meta=meta) charm = MyCharm(framework) @@ -358,14 +358,18 @@ def on_any_pebble_custom_notice(self, event: ops.PebbleCustomNoticeEvent): assert 'containerb_pebble_ready' in repr(charm.on) charm.on['container-a'].pebble_ready.emit( - charm.framework.model.unit.get_container('container-a')) + charm.framework.model.unit.get_container('container-a') + ) charm.on['containerb'].pebble_ready.emit( - charm.framework.model.unit.get_container('containerb')) + charm.framework.model.unit.get_container('containerb') + ) charm.on['container-a'].pebble_custom_notice.emit( - charm.framework.model.unit.get_container('container-a'), '1', 'custom', 'x') + charm.framework.model.unit.get_container('container-a'), '1', 'custom', 'x' + ) charm.on['containerb'].pebble_custom_notice.emit( - charm.framework.model.unit.get_container('containerb'), '2', 'custom', 'y') + charm.framework.model.unit.get_container('containerb'), '2', 'custom', 'y' + ) assert charm.seen == [ 'PebbleReadyEvent', @@ -377,7 +381,8 @@ def on_any_pebble_custom_notice(self, event: ops.PebbleCustomNoticeEvent): def test_relations_meta(): # language=YAML - meta = ops.CharmMeta.from_yaml(metadata=''' + meta = ops.CharmMeta.from_yaml( + metadata=""" name: my-charm requires: database: @@ -387,7 +392,8 @@ def test_relations_meta(): metrics: interface: prometheus-scraping optional: true -''') +""" + ) assert meta.requires['database'].interface_name == 'mongodb' assert meta.requires['database'].limit == 1 @@ -403,82 +409,70 @@ def test_relations_meta(): def test_relations_meta_limit_type_validation(): with pytest.raises(TypeError, match=r"limit should be an int, not "): # language=YAML - ops.CharmMeta.from_yaml(''' + ops.CharmMeta.from_yaml(""" name: my-charm requires: database: interface: mongodb limit: foobar -''') +""") def test_relations_meta_scope_type_validation(): with pytest.raises( - TypeError, - match="scope should be one of 'global', 'container'; not 'foobar'" + TypeError, match="scope should be one of 'global', 'container'; not 'foobar'" ): # language=YAML - ops.CharmMeta.from_yaml(''' + ops.CharmMeta.from_yaml(""" name: my-charm requires: database: interface: mongodb scope: foobar -''') +""") def test_meta_from_charm_root(): with tempfile.TemporaryDirectory() as d: td = pathlib.Path(d) (td / 'metadata.yaml').write_text( - yaml.safe_dump( - {"name": "bob", - "requires": { - "foo": - {"interface": "bar"} - }})) + yaml.safe_dump({'name': 'bob', 'requires': {'foo': {'interface': 'bar'}}}) + ) meta = ops.CharmMeta.from_charm_root(td) - assert meta.name == "bob" - assert meta.requires['foo'].interface_name == "bar" + assert meta.name == 'bob' + assert meta.requires['foo'].interface_name == 'bar' def test_actions_from_charm_root(): with tempfile.TemporaryDirectory() as d: td = pathlib.Path(d) (td / 'actions.yaml').write_text( - yaml.safe_dump( - {"foo": { - "description": "foos the bar", - "additionalProperties": False - }} - ) + yaml.safe_dump({'foo': {'description': 'foos the bar', 'additionalProperties': False}}) ) (td / 'metadata.yaml').write_text( - yaml.safe_dump( - {"name": "bob", - "requires": { - "foo": - {"interface": "bar"} - }})) + yaml.safe_dump({'name': 'bob', 'requires': {'foo': {'interface': 'bar'}}}) + ) meta = ops.CharmMeta.from_charm_root(td) - assert meta.name == "bob" - assert meta.requires['foo'].interface_name == "bar" + assert meta.name == 'bob' + assert meta.requires['foo'].interface_name == 'bar' assert not meta.actions['foo'].additional_properties - assert meta.actions['foo'].description == "foos the bar" + assert meta.actions['foo'].description == 'foos the bar' def _setup_test_action(fake_script: FakeScript): fake_script.write('action-get', """echo '{"foo-name": "name", "silent": true}'""") - fake_script.write('action-set', "") - fake_script.write('action-log', "") - fake_script.write('action-fail', "") + fake_script.write('action-set', '') + fake_script.write('action-log', '') + fake_script.write('action-fail', '') def _get_action_test_meta(): - return ops.CharmMeta.from_yaml(metadata=''' + return ops.CharmMeta.from_yaml( + metadata=""" name: my-charm -''', actions=''' +""", + actions=""" foo-bar: description: "Foos the bar." params: @@ -494,13 +488,12 @@ def _get_action_test_meta(): start: description: "Start the unit." additionalProperties: false -''') +""", + ) def test_action_events(request: pytest.FixtureRequest, fake_script: FakeScript): - class MyCharm(ops.CharmBase): - def __init__(self, framework: ops.Framework): super().__init__(framework) framework.observe(self.on.foo_bar_action, self._on_foo_bar_action) @@ -524,32 +517,31 @@ def _on_start_action(self, event: ops.ActionEvent): assert 'foo_bar_action' in events assert 'start_action' in events - action_id = "1234" + action_id = '1234' charm.on.foo_bar_action.emit(id=action_id) - assert charm.seen_action_params == {"foo-name": "name", "silent": True} + assert charm.seen_action_params == {'foo-name': 'name', 'silent': True} assert fake_script.calls() == [ ['action-get', '--format=json'], - ['action-log', "test-log"], - ['action-set', "res=val with spaces", f"id={action_id}"], - ['action-fail', "test-fail"], + ['action-log', 'test-log'], + ['action-set', 'res=val with spaces', f'id={action_id}'], + ['action-fail', 'test-fail'], ] -@pytest.mark.parametrize("bad_res", [ - {'a': {'b': 'c'}, 'a.b': 'c'}, - {'a': {'B': 'c'}}, - {'a': {(1, 2): 'c'}}, - {'a': {None: 'c'}}, - {'aBc': 'd'} -]) +@pytest.mark.parametrize( + 'bad_res', + [ + {'a': {'b': 'c'}, 'a.b': 'c'}, + {'a': {'B': 'c'}}, + {'a': {(1, 2): 'c'}}, + {'a': {None: 'c'}}, + {'aBc': 'd'}, + ], +) def test_invalid_action_results( - request: pytest.FixtureRequest, - fake_script: FakeScript, - bad_res: typing.Dict[str, typing.Any] + request: pytest.FixtureRequest, fake_script: FakeScript, bad_res: typing.Dict[str, typing.Any] ): - class MyCharm(ops.CharmBase): - def __init__(self, framework: ops.Framework): super().__init__(framework) self.res: typing.Dict[str, typing.Any] = {} @@ -569,15 +561,11 @@ def _on_foo_bar_action(self, event: ops.ActionEvent): def test_action_event_defer_fails( - request: pytest.FixtureRequest, - monkeypatch: pytest.MonkeyPatch, - fake_script: FakeScript + request: pytest.FixtureRequest, monkeypatch: pytest.MonkeyPatch, fake_script: FakeScript ): - cmd_type = 'action' class MyCharm(ops.CharmBase): - def __init__(self, framework: ops.Framework): super().__init__(framework) framework.observe(self.on.start_action, self._on_start_action) @@ -585,8 +573,7 @@ def __init__(self, framework: ops.Framework): def _on_start_action(self, event: ops.ActionEvent): event.defer() - fake_script.write(f"{cmd_type}-get", - """echo '{"foo-name": "name", "silent": true}'""") + fake_script.write(f'{cmd_type}-get', """echo '{"foo-name": "name", "silent": true}'""") monkeypatch.setenv(f'JUJU_{cmd_type.upper()}_NAME', 'start') meta = _get_action_test_meta() framework = create_framework(request, meta=meta) @@ -643,9 +630,9 @@ def test_containers_storage(): - arm """) assert isinstance(meta.containers['test1'], ops.ContainerMeta) - assert isinstance(meta.containers['test1'].mounts["data"], ops.ContainerStorageMeta) - assert meta.containers['test1'].mounts["data"].location == '/test/storagemount' - assert meta.containers['test1'].mounts["other"].location == '/test/otherdata' + assert isinstance(meta.containers['test1'].mounts['data'], ops.ContainerStorageMeta) + assert meta.containers['test1'].mounts['data'].location == '/test/storagemount' + assert meta.containers['test1'].mounts['other'].location == '/test/otherdata' assert meta.storages['other'].properties == ['transient'] assert meta.containers['test1'].resource == 'ubuntu-22.10' assert meta.containers['test2'].bases is not None @@ -686,13 +673,12 @@ def test_containers_storage_multiple_mounts(): location: /test/otherdata """) assert isinstance(meta.containers['test1'], ops.ContainerMeta) - assert isinstance(meta.containers['test1'].mounts["data"], ops.ContainerStorageMeta) - assert meta.containers['test1'].mounts["data"].locations[0] == \ - '/test/storagemount' - assert meta.containers['test1'].mounts["data"].locations[1] == '/test/otherdata' + assert isinstance(meta.containers['test1'].mounts['data'], ops.ContainerStorageMeta) + assert meta.containers['test1'].mounts['data'].locations[0] == '/test/storagemount' + assert meta.containers['test1'].mounts['data'].locations[1] == '/test/otherdata' with pytest.raises(RuntimeError): - meta.containers["test1"].mounts["data"].location + meta.containers['test1'].mounts['data'].location def test_secret_events(request: pytest.FixtureRequest): @@ -899,14 +885,17 @@ def _on_collect_status(self, event: ops.CollectStatusEvent): ops.charm._evaluate_status(charm) -@pytest.mark.parametrize("statuses,expected", [ - (['blocked', 'error'], 'error'), - (['waiting', 'blocked'], 'blocked'), - (['waiting', 'maintenance'], 'maintenance'), - (['active', 'waiting'], 'waiting'), - (['active', 'unknown'], 'active'), - (['unknown'], 'unknown') -]) +@pytest.mark.parametrize( + 'statuses,expected', + [ + (['blocked', 'error'], 'error'), + (['waiting', 'blocked'], 'blocked'), + (['waiting', 'maintenance'], 'maintenance'), + (['active', 'waiting'], 'waiting'), + (['active', 'unknown'], 'active'), + (['unknown'], 'unknown'), + ], +) def test_collect_status_priority( request: pytest.FixtureRequest, fake_script: FakeScript, @@ -930,11 +919,8 @@ def _on_collect_status(self, event: ops.CollectStatusEvent): charm = MyCharm(framework, statuses=statuses) ops.charm._evaluate_status(charm) - status_set_calls = [call for call in fake_script.calls(True) - if call[0] == 'status-set'] - assert status_set_calls == [ - ['status-set', '--application=True', expected, ''] - ] + status_set_calls = [call for call in fake_script.calls(True) if call[0] == 'status-set'] + assert status_set_calls == [['status-set', '--application=True', expected, '']] def test_meta_links(): @@ -964,10 +950,8 @@ def test_meta_links(): - https://features.example.com """) assert meta.links.websites == ['https://example.com', 'https://example.org'] - assert meta.links.sources == [ - 'https://git.example.com', 'https://bzr.example.com'] - assert meta.links.issues == [ - 'https://bugs.example.com', 'https://features.example.com'] + assert meta.links.sources == ['https://git.example.com', 'https://bzr.example.com'] + assert meta.links.issues == ['https://bugs.example.com', 'https://features.example.com'] def test_meta_links_charmcraft_yaml(): @@ -1015,8 +999,10 @@ def test_meta_assumes(): assert meta.assumes.features == [ 'k8s-api', ops.JujuAssumes( - [ops.JujuAssumes(['juju >= 2.9.44', 'juju < 3']), - ops.JujuAssumes(['juju >= 3.1.5', 'juju < 4'])], - ops.JujuAssumesCondition.ANY + [ + ops.JujuAssumes(['juju >= 2.9.44', 'juju < 3']), + ops.JujuAssumes(['juju >= 3.1.5', 'juju < 4']), + ], + ops.JujuAssumesCondition.ANY, ), ] diff --git a/test/test_framework.py b/test/test_framework.py index 025fda0bf..7be174a09 100644 --- a/test/test_framework.py +++ b/test/test_framework.py @@ -24,7 +24,6 @@ import sys import typing from pathlib import Path -from test.test_helpers import FakeScript from unittest.mock import patch import pytest @@ -33,6 +32,7 @@ from ops.framework import _BREAKPOINT_WELCOME_MESSAGE, _event_regex from ops.model import _ModelBackend from ops.storage import JujuStorage, NoSnapshotError, SQLiteStorage +from test.test_helpers import FakeScript def create_model(): @@ -56,10 +56,10 @@ def create_framework( same dir (e.g. for storing state). """ if tmpdir is None: - data_fpath = ":memory:" + data_fpath = ':memory:' charm_dir = 'non-existant' else: - data_fpath = tmpdir / "framework.data" + data_fpath = tmpdir / 'framework.data' charm_dir = tmpdir patcher = patch('ops.storage.SQLiteStorage.DB_LOCK_TIMEOUT', datetime.timedelta(0)) @@ -68,7 +68,8 @@ def create_framework( SQLiteStorage(data_fpath), charm_dir, meta=model._cache._meta if model else ops.CharmMeta(), - model=model) # type: ignore + model=model, # type: ignore + ) request.addfinalizer(framework.close) request.addfinalizer(patcher.stop) return framework @@ -84,16 +85,17 @@ def test_deprecated_init(self, caplog: pytest.LogCaptureFixture): # For 0.7, this still works, but it is deprecated. with caplog.at_level(logging.WARNING): framework = ops.Framework(':memory:', None, None, None) # type: ignore - assert "WARNING:ops.framework:deprecated: Framework now takes a Storage not a path" in [ - f"{record.levelname}:{record.name}:{record.message}" for record in caplog.records] + assert 'WARNING:ops.framework:deprecated: Framework now takes a Storage not a path' in [ + f'{record.levelname}:{record.name}:{record.message}' for record in caplog.records + ] assert isinstance(framework._storage, SQLiteStorage) def test_handle_path(self): cases = [ - (ops.Handle(None, "root", None), "root"), - (ops.Handle(None, "root", "1"), "root[1]"), - (ops.Handle(ops.Handle(None, "root", None), "child", None), "root/child"), - (ops.Handle(ops.Handle(None, "root", "1"), "child", "2"), "root[1]/child[2]"), + (ops.Handle(None, 'root', None), 'root'), + (ops.Handle(None, 'root', '1'), 'root[1]'), + (ops.Handle(ops.Handle(None, 'root', None), 'child', None), 'root/child'), + (ops.Handle(ops.Handle(None, 'root', '1'), 'child', '2'), 'root[1]/child[2]'), ] for handle, path in cases: assert str(handle) == path @@ -116,7 +118,7 @@ def test_restore_unknown(self, request: pytest.FixtureRequest): class Foo(ops.Object): pass - handle = ops.Handle(None, "a_foo", "some_key") + handle = ops.Handle(None, 'a_foo', 'some_key') framework.register_type(Foo, None, handle.kind) # type: ignore @@ -124,9 +126,9 @@ class Foo(ops.Object): framework.load_snapshot(handle) except NoSnapshotError as e: assert e.handle_path == str(handle) - assert str(e) == "no snapshot data found for a_foo[some_key] object" + assert str(e) == 'no snapshot data found for a_foo[some_key] object' else: - pytest.fail("exception NoSnapshotError not raised") + pytest.fail('exception NoSnapshotError not raised') def test_snapshot_roundtrip(self, request: pytest.FixtureRequest, tmp_path: pathlib.Path): class Foo: @@ -137,12 +139,12 @@ def __init__(self, handle: ops.Handle, n: int): self.my_n = n def snapshot(self) -> typing.Dict[str, int]: - return {"My N!": self.my_n} + return {'My N!': self.my_n} def restore(self, snapshot: typing.Dict[str, int]): - self.my_n = snapshot["My N!"] + 1 + self.my_n = snapshot['My N!'] + 1 - handle = ops.Handle(None, "a_foo", "some_key") + handle = ops.Handle(None, 'a_foo', 'some_key') event = Foo(handle, 1) framework1 = create_framework(request, tmpdir=tmp_path) @@ -191,29 +193,29 @@ def __init__(self, parent: ops.Object, key: str): self.reprs: typing.List[str] = [] def on_any(self, event: ops.EventBase): - self.seen.append(f"on_any:{event.handle.kind}") + self.seen.append(f'on_any:{event.handle.kind}') self.reprs.append(repr(event)) def on_foo(self, event: ops.EventBase): - self.seen.append(f"on_foo:{event.handle.kind}") + self.seen.append(f'on_foo:{event.handle.kind}') self.reprs.append(repr(event)) - pub = MyNotifier(framework, "1") - obs = MyObserver(framework, "1") + pub = MyNotifier(framework, '1') + obs = MyObserver(framework, '1') framework.observe(pub.foo, obs.on_any) framework.observe(pub.bar, obs.on_any) - with pytest.raises(TypeError, match="^Framework.observe requires a method"): + with pytest.raises(TypeError, match='^Framework.observe requires a method'): framework.observe(pub.baz, obs) # type: ignore pub.foo.emit() pub.bar.emit() - assert obs.seen == ["on_any:foo", "on_any:bar"] + assert obs.seen == ['on_any:foo', 'on_any:bar'] assert obs.reprs == [ - "", - "", + '', + '', ] def test_event_observer_more_args(self, request: pytest.FixtureRequest): @@ -235,23 +237,23 @@ def __init__(self, parent: ops.Object, key: str): self.reprs: typing.List[str] = [] def on_foo(self, event: ops.EventBase): - self.seen.append(f"on_foo:{event.handle.kind}") + self.seen.append(f'on_foo:{event.handle.kind}') self.reprs.append(repr(event)) def on_bar(self, event: ops.EventBase, _: int = 1): - self.seen.append(f"on_bar:{event.handle.kind}") + self.seen.append(f'on_bar:{event.handle.kind}') self.reprs.append(repr(event)) def on_baz(self, event: ops.EventBase, *, _: int = 1): - self.seen.append(f"on_baz:{event.handle.kind}") + self.seen.append(f'on_baz:{event.handle.kind}') self.reprs.append(repr(event)) def on_qux(self, event: ops.EventBase, *args, **kwargs): # type: ignore - self.seen.append(f"on_qux:{event.handle.kind}") + self.seen.append(f'on_qux:{event.handle.kind}') self.reprs.append(repr(event)) - pub = MyNotifier(framework, "1") - obs = MyObserver(framework, "1") + pub = MyNotifier(framework, '1') + obs = MyObserver(framework, '1') framework.observe(pub.foo, obs.on_foo) framework.observe(pub.bar, obs.on_bar) @@ -265,14 +267,13 @@ def on_qux(self, event: ops.EventBase, *args, **kwargs): # type: ignore assert obs.seen == ['on_foo:foo', 'on_bar:bar', 'on_baz:baz', 'on_qux:qux'] assert obs.reprs == [ - "", - "", - "", - "", + '', + '', + '', + '', ] def test_bad_sig_observer(self, request: pytest.FixtureRequest): - class MyEvent(ops.EventBase): pass @@ -289,19 +290,21 @@ def _on_foo(self): def _on_bar(self, event: ops.EventBase, extra: typing.Any): assert False, 'should not be reached' - def _on_baz(self, - event: ops.EventBase, - extra: typing.Optional[typing.Any] = None, - *, - k: typing.Any): + def _on_baz( + self, + event: ops.EventBase, + extra: typing.Optional[typing.Any] = None, + *, + k: typing.Any, + ): assert False, 'should not be reached' def _on_qux(self, event: ops.EventBase, extra: typing.Optional[typing.Any] = None): assert False, 'should not be reached' framework = create_framework(request) - pub = MyNotifier(framework, "pub") - obs = MyObserver(framework, "obs") + pub = MyNotifier(framework, 'pub') + obs = MyObserver(framework, 'obs') with pytest.raises(TypeError, match="only 'self' and the 'event'"): framework.observe(pub.foo, obs._on_foo) # type: ignore @@ -315,7 +318,6 @@ def test_on_pre_commit_emitted(self, request: pytest.FixtureRequest, tmp_path: p framework = create_framework(request, tmpdir=tmp_path) class PreCommitObserver(ops.Object): - _stored = ops.StoredState() def __init__(self, parent: ops.Object, key: typing.Optional[str]): @@ -380,10 +382,10 @@ def on_any(self, event: ops.EventBase): if not self.done.get(event.handle.kind): event.defer() - pub1 = MyNotifier1(framework, "1") - pub2 = MyNotifier2(framework, "1") - obs1 = MyObserver(framework, "1") - obs2 = MyObserver(framework, "2") + pub1 = MyNotifier1(framework, '1') + pub2 = MyNotifier2(framework, '1') + obs1 = MyObserver(framework, '1') + obs2 = MyObserver(framework, '2') framework.observe(pub1.a, obs1.on_any) framework.observe(pub1.b, obs1.on_any) @@ -396,30 +398,30 @@ def on_any(self, event: ops.EventBase): pub2.c.emit() # Events remain stored because they were deferred. - ev_a_handle = ops.Handle(pub1, "a", "1") + ev_a_handle = ops.Handle(pub1, 'a', '1') framework.load_snapshot(ev_a_handle) - ev_b_handle = ops.Handle(pub1, "b", "2") + ev_b_handle = ops.Handle(pub1, 'b', '2') framework.load_snapshot(ev_b_handle) - ev_c_handle = ops.Handle(pub2, "c", "3") + ev_c_handle = ops.Handle(pub2, 'c', '3') framework.load_snapshot(ev_c_handle) # make sure the objects are gone before we reemit them gc.collect() framework.reemit() - obs1.done["a"] = True - obs2.done["b"] = True + obs1.done['a'] = True + obs2.done['b'] = True framework.reemit() framework.reemit() - obs1.done["b"] = True - obs2.done["a"] = True + obs1.done['b'] = True + obs2.done['a'] = True framework.reemit() - obs2.done["c"] = True + obs2.done['c'] = True framework.reemit() framework.reemit() framework.reemit() - assert " ".join(obs1.seen) == "a b a b a b b b" - assert " ".join(obs2.seen) == "a b c a b c a b c a c a c c" + assert ' '.join(obs1.seen) == 'a b a b a b b b' + assert ' '.join(obs2.seen) == 'a b c a b c a b c a c a c c' # Now the event objects must all be gone from storage. pytest.raises(NoSnapshotError, framework.load_snapshot, ev_a_handle) @@ -435,11 +437,11 @@ def __init__(self, handle: ops.Handle, n: int): self.my_n = n def snapshot(self): - return {"My N!": self.my_n} + return {'My N!': self.my_n} def restore(self, snapshot: typing.Dict[str, typing.Any]): super().restore(snapshot) - self.my_n = snapshot["My N!"] + 1 + self.my_n = snapshot['My N!'] + 1 class MyNotifier(ops.Object): foo = ops.EventSource(MyEvent) @@ -450,11 +452,11 @@ def __init__(self, parent: ops.Object, key: str): self.seen: typing.List[str] = [] def _on_foo(self, event: MyEvent): - self.seen.append(f"on_foo:{event.handle.kind}={event.my_n}") + self.seen.append(f'on_foo:{event.handle.kind}={event.my_n}') event.defer() - pub = MyNotifier(framework, "1") - obs = MyObserver(framework, "1") + pub = MyNotifier(framework, '1') + obs = MyObserver(framework, '1') framework.observe(pub.foo, obs._on_foo) pub.foo.emit(1) @@ -470,7 +472,7 @@ def _on_foo(self, event: MyEvent): # from the one modified during the first restore (otherwise # we'd get a foo=3). # - assert obs.seen == ["on_foo:foo=2", "on_foo:foo=2"] + assert obs.seen == ['on_foo:foo=2', 'on_foo:foo=2'] def test_weak_observer(self, request: pytest.FixtureRequest): framework = create_framework(request) @@ -488,20 +490,20 @@ class MyNotifier(ops.Object): class MyObserver(ops.Object): def _on_foo(self, event: ops.EventBase): - observed_events.append("foo") + observed_events.append('foo') - pub = MyNotifier(framework, "1") - obs = MyObserver(framework, "2") + pub = MyNotifier(framework, '1') + obs = MyObserver(framework, '2') framework.observe(pub.on.foo, obs._on_foo) pub.on.foo.emit() - assert observed_events == ["foo"] + assert observed_events == ['foo'] # Now delete the observer, and note that when we emit the event, it # doesn't update the local slice again del obs gc.collect() pub.on.foo.emit() - assert observed_events == ["foo"] + assert observed_events == ['foo'] def test_forget_and_multiple_objects(self, request: pytest.FixtureRequest): framework = create_framework(request) @@ -513,23 +515,23 @@ def snapshot(self) -> typing.Dict[str, typing.Any]: def restore(self, snapshot: typing.Dict[str, typing.Any]) -> None: raise NotImplementedError() - o1 = MyObject(framework, "path") + o1 = MyObject(framework, 'path') # Creating a second object at the same path should fail with RuntimeError with pytest.raises(RuntimeError): - o2 = MyObject(framework, "path") + o2 = MyObject(framework, 'path') # Unless we _forget the object first framework._forget(o1) - o2 = MyObject(framework, "path") + o2 = MyObject(framework, 'path') assert o1.handle.path == o2.handle.path # Deleting the tracked object should also work del o2 gc.collect() - o3 = MyObject(framework, "path") + o3 = MyObject(framework, 'path') assert o1.handle.path == o3.handle.path framework.close() # Or using a second framework framework_copy = create_framework(request) - o_copy = MyObject(framework_copy, "path") + o_copy = MyObject(framework_copy, 'path') assert o1.handle.path == o_copy.handle.path def test_forget_and_multiple_objects_with_load_snapshot( @@ -545,13 +547,13 @@ def __init__(self, parent: ops.Object, name: str): self.value = name def snapshot(self): - return {"value": self.value} + return {'value': self.value} def restore(self, snapshot: typing.Dict[str, typing.Any]): - self.value = snapshot["value"] + self.value = snapshot['value'] framework.register_type(MyObject, None, MyObject.handle_kind) - o1 = MyObject(framework, "path") + o1 = MyObject(framework, 'path') framework.save_snapshot(o1) # type: ignore framework.commit() o_handle = o1.handle @@ -569,18 +571,18 @@ def restore(self, snapshot: typing.Dict[str, typing.Any]): assert o2.value == o3.value # A loaded object also prevents direct creation of an object with pytest.raises(RuntimeError): - MyObject(framework, "path") + MyObject(framework, 'path') framework.close() # But we can create an object, or load a snapshot in a copy of the framework framework_copy1 = create_framework(request, tmpdir=tmp_path) - o_copy1 = MyObject(framework_copy1, "path") - assert o_copy1.value == "path" + o_copy1 = MyObject(framework_copy1, 'path') + assert o_copy1.value == 'path' framework_copy1.close() framework_copy2 = create_framework(request, tmpdir=tmp_path) framework_copy2.register_type(MyObject, None, MyObject.handle_kind) o_copy2 = framework_copy2.load_snapshot(o_handle) o_copy2 = typing.cast(MyObject, o_copy2) - assert o_copy2.value == "path" + assert o_copy2.value == 'path' def test_events_base(self, request: pytest.FixtureRequest): framework = create_framework(request) @@ -601,14 +603,14 @@ def __init__(self, parent: ops.Object, key: str): self.seen: typing.List[str] = [] def _on_foo(self, event: ops.EventBase): - self.seen.append(f"on_foo:{event.handle.kind}") + self.seen.append(f'on_foo:{event.handle.kind}') event.defer() def _on_bar(self, event: ops.EventBase): - self.seen.append(f"on_bar:{event.handle.kind}") + self.seen.append(f'on_bar:{event.handle.kind}') - pub = MyNotifier(framework, "1") - obs = MyObserver(framework, "1") + pub = MyNotifier(framework, '1') + obs = MyObserver(framework, '1') # Confirm that temporary persistence of BoundEvents doesn't cause errors, # and that events can be observed. @@ -618,9 +620,9 @@ def _on_bar(self, event: ops.EventBase): # Confirm that events can be emitted and seen. pub.on.foo.emit() - assert obs.seen == ["on_foo:foo"] - fqn = f"{pub.on.__class__.__module__}.{pub.on.__class__.__qualname__}" - assert repr(pub.on) == f"<{fqn}: bar, foo>" + assert obs.seen == ['on_foo:foo'] + fqn = f'{pub.on.__class__.__module__}.{pub.on.__class__.__qualname__}' + assert repr(pub.on) == f'<{fqn}: bar, foo>' def test_conflicting_event_attributes(self): class MyEvent(ops.EventBase): @@ -632,27 +634,29 @@ class MyEvents(ops.ObjectEvents): foo = event with pytest.raises(RuntimeError) as excinfo: + class OtherEvents(ops.ObjectEvents): # type: ignore foo = event + # Python 3.12+ raises the original exception with a note, but earlier # Python chains the exceptions. - if hasattr(excinfo.value, "__notes__"): + if hasattr(excinfo.value, '__notes__'): cause = str(excinfo.value) else: cause = str(excinfo.value.__cause__) - assert cause == \ - "EventSource(MyEvent) reused as MyEvents.foo and OtherEvents.foo" + assert cause == 'EventSource(MyEvent) reused as MyEvents.foo and OtherEvents.foo' with pytest.raises(RuntimeError) as excinfo: + class MyNotifier(ops.Object): # type: ignore on = MyEvents() # type: ignore bar = event - if hasattr(excinfo.value, "__notes__"): + + if hasattr(excinfo.value, '__notes__'): cause = str(excinfo.value) else: cause = str(excinfo.value.__cause__) - assert cause == \ - "EventSource(MyEvent) reused as MyEvents.foo and MyNotifier.bar" + assert cause == 'EventSource(MyEvent) reused as MyEvents.foo and MyNotifier.bar' def test_reemit_ignores_unknown_event_type(self, request: pytest.FixtureRequest): # The event type may have been gone for good, and nobody cares, @@ -661,7 +665,7 @@ def test_reemit_ignores_unknown_event_type(self, request: pytest.FixtureRequest) framework = create_framework(request) class MyEvent(ops.EventBase): - handle_kind = "test" + handle_kind = 'test' class MyNotifier(ops.Object): foo = ops.EventSource(MyEvent) @@ -675,14 +679,14 @@ def _on_foo(self, event: ops.EventBase): self.seen.append(event.handle) event.defer() - pub = MyNotifier(framework, "1") - obs = MyObserver(framework, "1") + pub = MyNotifier(framework, '1') + obs = MyObserver(framework, '1') framework.observe(pub.foo, obs._on_foo) pub.foo.emit() event_handle = obs.seen[0] - assert event_handle.kind == "foo" + assert event_handle.kind == 'foo' framework.commit() framework.close() @@ -718,15 +722,15 @@ def __init__(self, parent: ops.Object, key: str): self.seen: typing.List[str] = [] def _on_foo(self, event: ops.EventBase): - self.seen.append(f"on_foo:{type(event).__name__}:{event.handle.kind}") + self.seen.append(f'on_foo:{type(event).__name__}:{event.handle.kind}') event.defer() def _on_bar(self, event: ops.EventBase): - self.seen.append(f"on_bar:{type(event).__name__}:{event.handle.kind}") + self.seen.append(f'on_bar:{type(event).__name__}:{event.handle.kind}') event.defer() - pub = MyNotifier(framework, "1") - obs = MyObserver(framework, "1") + pub = MyNotifier(framework, '1') + obs = MyObserver(framework, '1') pub.on.foo.emit() pub.bar.emit() @@ -737,7 +741,7 @@ def _on_bar(self, event: ops.EventBase): pub.on.foo.emit() pub.bar.emit() - assert obs.seen == ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"] + assert obs.seen == ['on_foo:MyFoo:foo', 'on_bar:MyBar:bar'] def test_dynamic_event_types(self, request: pytest.FixtureRequest): framework = create_framework(request) @@ -758,15 +762,15 @@ def __init__(self, parent: ops.Object, key: str): self.seen: typing.List[str] = [] def _on_foo(self, event: ops.EventBase): - self.seen.append(f"on_foo:{type(event).__name__}:{event.handle.kind}") + self.seen.append(f'on_foo:{type(event).__name__}:{event.handle.kind}') event.defer() def _on_bar(self, event: ops.EventBase): - self.seen.append(f"on_bar:{type(event).__name__}:{event.handle.kind}") + self.seen.append(f'on_bar:{type(event).__name__}:{event.handle.kind}') event.defer() - pub = MyNotifier(framework, "1") - obs = MyObserver(framework, "1") + pub = MyNotifier(framework, '1') + obs = MyObserver(framework, '1') class MyFoo(ops.EventBase): pass @@ -780,8 +784,8 @@ class DeadBeefEvent(ops.EventBase): class NoneEvent(ops.EventBase): pass - pub.on_a.define_event("foo", MyFoo) - pub.on_b.define_event("bar", MyBar) + pub.on_a.define_event('foo', MyFoo) + pub.on_b.define_event('bar', MyBar) framework.observe(pub.on_a.foo, obs._on_foo) framework.observe(pub.on_b.bar, obs._on_bar) @@ -789,7 +793,7 @@ class NoneEvent(ops.EventBase): pub.on_a.foo.emit() pub.on_b.bar.emit() - assert obs.seen == ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"] + assert obs.seen == ['on_foo:MyFoo:foo', 'on_bar:MyBar:bar'] # Definitions remained local to the specific type. pytest.raises(AttributeError, lambda: pub.on_a.bar) @@ -797,15 +801,15 @@ class NoneEvent(ops.EventBase): # Try to use an event name which is not a valid python identifier. with pytest.raises(RuntimeError): - pub.on_a.define_event("dead-beef", DeadBeefEvent) + pub.on_a.define_event('dead-beef', DeadBeefEvent) # Try to use a python keyword for an event name. with pytest.raises(RuntimeError): - pub.on_a.define_event("None", NoneEvent) + pub.on_a.define_event('None', NoneEvent) # Try to override an existing attribute. with pytest.raises(RuntimeError): - pub.on_a.define_event("foo", MyFoo) + pub.on_a.define_event('foo', MyFoo) def test_event_key_roundtrip(self, request: pytest.FixtureRequest, tmp_path: pathlib.Path): class MyEvent(ops.EventBase): @@ -837,8 +841,8 @@ def _on_foo(self, event: MyEvent): MyObserver.has_deferred = True framework1 = create_framework(request, tmpdir=tmp_path) - pub1 = MyNotifier(framework1, "pub") - obs1 = MyObserver(framework1, "obs") + pub1 = MyNotifier(framework1, 'pub') + obs1 = MyObserver(framework1, 'obs') framework1.observe(pub1.foo, obs1._on_foo) pub1.foo.emit('first') assert obs1.seen == [('1', 'first')] @@ -848,8 +852,8 @@ def _on_foo(self, event: MyEvent): del framework1 framework2 = create_framework(request, tmpdir=tmp_path) - pub2 = MyNotifier(framework2, "pub") - obs2 = MyObserver(framework2, "obs") + pub2 = MyNotifier(framework2, 'pub') + obs2 = MyObserver(framework2, 'obs') framework2.observe(pub2.foo, obs2._on_foo) pub2.foo.emit('second') framework2.reemit() @@ -881,15 +885,15 @@ def test_ban_concurrent_frameworks( def test_snapshot_saving_restricted_to_simple_types(self, request: pytest.FixtureRequest): # this can not be saved, as it has not simple types! - to_be_saved = {"bar": TestFramework} + to_be_saved = {'bar': TestFramework} class FooEvent(ops.EventBase): - handle_kind = "test" + handle_kind = 'test' def snapshot(self): return to_be_saved - handle = ops.Handle(None, "a_foo", "some_key") + handle = ops.Handle(None, 'a_foo', 'some_key') event = FooEvent(handle) framework = create_framework(request) @@ -897,8 +901,9 @@ def snapshot(self): with pytest.raises(ValueError) as excinfo: framework.save_snapshot(event) expected = ( - "unable to save the data for FooEvent, it must contain only simple types: " - "{'bar': }") + 'unable to save the data for FooEvent, it must contain only simple types: ' + "{'bar': }" + ) assert str(excinfo.value) == expected def test_unobserved_events_dont_leave_cruft(self, request: pytest.FixtureRequest): @@ -915,7 +920,7 @@ class Emitter(ops.Object): framework = create_framework(request) e = Emitter(framework, 'key') e.on.foo.emit() - ev_1_handle = ops.Handle(e.on, "foo", "1") + ev_1_handle = ops.Handle(e.on, 'foo', '1') with pytest.raises(NoSnapshotError): framework.load_snapshot(ev_1_handle) # Committing will save the framework's state, but no other snapshots should be saved @@ -967,20 +972,22 @@ def _on_event(self, event: ops.EventBase): framework.save_snapshot(event) assert list(framework._storage.list_snapshots()) == [handle.path] o.on.event.emit() - assert list(framework._storage.notices('')) == \ - [('ObjectWithStorage[obj]/on/event[1]', 'ObjectWithStorage[obj]', '_on_event')] + assert list(framework._storage.notices('')) == [ + ('ObjectWithStorage[obj]/on/event[1]', 'ObjectWithStorage[obj]', '_on_event') + ] framework.commit() - assert sorted(framework._storage.list_snapshots()) == \ - sorted(['ObjectWithStorage[obj]/on/event[100]', - 'StoredStateData[_stored]', - 'ObjectWithStorage[obj]/StoredStateData[_stored]', - 'ObjectWithStorage[obj]/on/event[1]']) + assert sorted(framework._storage.list_snapshots()) == sorted([ + 'ObjectWithStorage[obj]/on/event[100]', + 'StoredStateData[_stored]', + 'ObjectWithStorage[obj]/StoredStateData[_stored]', + 'ObjectWithStorage[obj]/on/event[1]', + ]) framework.remove_unreferenced_events() - assert sorted(framework._storage.list_snapshots()) == \ - sorted([ - 'StoredStateData[_stored]', - 'ObjectWithStorage[obj]/StoredStateData[_stored]', - 'ObjectWithStorage[obj]/on/event[1]']) + assert sorted(framework._storage.list_snapshots()) == sorted([ + 'StoredStateData[_stored]', + 'ObjectWithStorage[obj]/StoredStateData[_stored]', + 'ObjectWithStorage[obj]/on/event[1]', + ]) def test_wrapped_handler(self, request: pytest.FixtureRequest): # It's fine to wrap the event handler, as long as the framework can @@ -988,14 +995,14 @@ def test_wrapped_handler(self, request: pytest.FixtureRequest): def add_arg(func: typing.Callable[..., None]) -> typing.Callable[..., None]: @functools.wraps(func) def wrapper(charm: ops.CharmBase, event: ops.EventBase): - return func(charm, event, "extra-arg") + return func(charm, event, 'extra-arg') return wrapper class MyCharm(ops.CharmBase): @add_arg def _on_event(self, _, another_arg: str): - assert another_arg == "extra-arg" + assert another_arg == 'extra-arg' framework = create_framework(request) charm = MyCharm(framework) @@ -1050,14 +1057,22 @@ def _on_event(self, _: ops.EventBase): class TestStoredState: def test_stored_dict_repr(self): - assert repr(ops.StoredDict(None, {})) == "ops.framework.StoredDict()" # type: ignore - assert repr(ops.StoredDict(None, {"a": 1}) # type: ignore - ) == "ops.framework.StoredDict({'a': 1})" + assert repr(ops.StoredDict(None, {})) == 'ops.framework.StoredDict()' # type: ignore + assert ( + repr( + ops.StoredDict(None, {'a': 1}) # type: ignore + ) + == "ops.framework.StoredDict({'a': 1})" + ) def test_stored_list_repr(self): - assert repr(ops.StoredList(None, [])) == "ops.framework.StoredList()" # type: ignore - assert repr(ops.StoredList(None, [1, 2, 3]) # type: ignore - ) == 'ops.framework.StoredList([1, 2, 3])' # type: ignore + assert repr(ops.StoredList(None, [])) == 'ops.framework.StoredList()' # type: ignore + assert ( + repr( + ops.StoredList(None, [1, 2, 3]) # type: ignore + ) + == 'ops.framework.StoredList([1, 2, 3])' + ) # type: ignore def test_stored_set_repr(self): assert repr(ops.StoredSet(None, set())) == 'ops.framework.StoredSet()' # type: ignore @@ -1129,7 +1144,7 @@ class _StoredProtocol(typing.Protocol): _stored: ops.StoredState framework = create_framework(request, tmpdir=tmp_path) - obj = cls(framework, "1") + obj = cls(framework, '1') assert isinstance(obj, _StoredProtocol) try: @@ -1137,18 +1152,18 @@ class _StoredProtocol(typing.Protocol): except AttributeError as e: assert str(e) == "attribute 'foo' is not stored" else: - pytest.fail("AttributeError not raised") + pytest.fail('AttributeError not raised') try: - obj._stored.on = "nonono" # type: ignore + obj._stored.on = 'nonono' # type: ignore except AttributeError as e: assert str(e) == "attribute 'on' is reserved and cannot be set" else: - pytest.fail("AttributeError not raised") + pytest.fail('AttributeError not raised') obj._stored.foo = 41 obj._stored.foo = 42 - obj._stored.bar = "s" + obj._stored.bar = 's' obj._stored.baz = 4.2 obj._stored.bing = True @@ -1163,10 +1178,10 @@ class _StoredProtocol(typing.Protocol): # Since this has the same absolute object handle, it will get its state back. framework_copy = create_framework(request, tmpdir=tmp_path) - obj_copy = cls(framework_copy, "1") + obj_copy = cls(framework_copy, '1') assert isinstance(obj_copy, _StoredProtocol) assert obj_copy._stored.foo == 42 - assert obj_copy._stored.bar == "s" + assert obj_copy._stored.bar == 's' assert obj_copy._stored.baz == 4.2 assert obj_copy._stored.bing @@ -1192,7 +1207,7 @@ class SubB(Base): z = Base(framework, None) a._stored.foo = 42 - b._stored.foo = "hello" + b._stored.foo = 'hello' z._stored.foo = {1} framework.commit() @@ -1204,7 +1219,7 @@ class SubB(Base): z2 = Base(framework2, None) assert a2._stored.foo == 42 - assert b2._stored.foo == "hello" + assert b2._stored.foo == 'hello' assert z2._stored.foo == {1} def test_two_names_one_state(self, request: pytest.FixtureRequest): @@ -1224,8 +1239,8 @@ class Mine(ops.Object): framework.close() # make sure we're not changing the object on failure - assert "_stored" not in obj.__dict__ - assert "_stored2" not in obj.__dict__ + assert '_stored' not in obj.__dict__ + assert '_stored2' not in obj.__dict__ def test_same_name_two_classes(self, request: pytest.FixtureRequest): class Base(ops.Object): @@ -1246,12 +1261,12 @@ class B(Base): a._stored.foo = 42 with pytest.raises(RuntimeError): - b._stored.foo = "xyzzy" + b._stored.foo = 'xyzzy' framework.close() # make sure we're not changing the object on failure - assert "_stored" not in b.__dict__ + assert '_stored' not in b.__dict__ def test_mutable_types_invalid(self, request: pytest.FixtureRequest): framework = create_framework(request) @@ -1261,12 +1276,16 @@ class SomeObject(ops.Object): obj = SomeObject(framework, '1') try: + class CustomObject: pass + obj._stored.foo = CustomObject() except AttributeError as e: - assert str(e) == \ - "attribute 'foo' cannot be a CustomObject: must be int/float/dict/list/etc" + assert ( + str(e) + == "attribute 'foo' cannot be a CustomObject: must be int/float/dict/list/etc" + ) else: pytest.fail('AttributeError not raised') @@ -1283,164 +1302,193 @@ def _assert_raises_type_error(a: typing.Any, b: typing.Any): with pytest.raises(TypeError): a.add(b) - test_operations: typing.List[MutableTypesTestCase] = [( - lambda: {}, - None, - {}, - lambda a, b: None, - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: {}, - {'a': {}}, - {'a': {}}, - lambda a, b: a.update(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: {'a': {}}, - {'b': 'c'}, - {'a': {'b': 'c'}}, - lambda a, b: a['a'].update(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: {'a': {'b': 'c'}}, - {'d': 'e'}, - {'a': {'b': 'c', 'd': 'e'}}, - lambda a, b: a['a'].update(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: {'a': {'b': 'c', 'd': 'e'}}, - 'd', - {'a': {'b': 'c'}}, - lambda a, b: a['a'].pop(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: {'s': set()}, # type: ignore - 'a', - {'s': {'a'}}, - lambda a, b: a['s'].add(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: {'s': {'a'}}, - 'a', - {'s': set()}, - lambda a, b: a['s'].discard(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: [], - None, - [], - lambda a, b: None, - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: [], - 'a', - ['a'], - lambda a, b: a.append(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: ['a'], - ['c'], - ['a', ['c']], - lambda a, b: a.append(b), - lambda res, expected_res: ( - _assert_equal(res, expected_res) and _assert_is_instance(res[1], ops.StoredList), - ) - ), ( - lambda: ['a', ['c']], - 'b', - ['b', 'a', ['c']], - lambda a, b: a.insert(0, b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: ['b', 'a', ['c']], - ['d'], - ['b', ['d'], 'a', ['c']], - lambda a, b: a.insert(1, b), - lambda res, expected_res: ( - _assert_equal(res, expected_res) and _assert_is_instance(res[1], ops.StoredList) + test_operations: typing.List[MutableTypesTestCase] = [ + ( + lambda: {}, + None, + {}, + lambda a, b: None, + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: {}, + {'a': {}}, + {'a': {}}, + lambda a, b: a.update(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: {'a': {}}, + {'b': 'c'}, + {'a': {'b': 'c'}}, + lambda a, b: a['a'].update(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: {'a': {'b': 'c'}}, + {'d': 'e'}, + {'a': {'b': 'c', 'd': 'e'}}, + lambda a, b: a['a'].update(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: {'a': {'b': 'c', 'd': 'e'}}, + 'd', + {'a': {'b': 'c'}}, + lambda a, b: a['a'].pop(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: {'s': set()}, # type: ignore + 'a', + {'s': {'a'}}, + lambda a, b: a['s'].add(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: {'s': {'a'}}, + 'a', + {'s': set()}, + lambda a, b: a['s'].discard(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: [], + None, + [], + lambda a, b: None, + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: [], + 'a', + ['a'], + lambda a, b: a.append(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: ['a'], + ['c'], + ['a', ['c']], + lambda a, b: a.append(b), + lambda res, expected_res: ( + _assert_equal(res, expected_res) + and _assert_is_instance(res[1], ops.StoredList), + ), ), - ), ( - lambda: ['b', 'a', ['c']], - ['d'], - ['b', ['d'], ['c']], - # a[1] = b - lambda a, b: a.__setitem__(1, b), - lambda res, expected_res: ( - _assert_equal(res, expected_res) and _assert_is_instance(res[1], ops.StoredList) + ( + lambda: ['a', ['c']], + 'b', + ['b', 'a', ['c']], + lambda a, b: a.insert(0, b), + lambda res, expected_res: _assert_equal(res, expected_res), ), - ), ( - lambda: ['b', ['d'], 'a', ['c']], - 0, - [['d'], 'a', ['c']], - lambda a, b: a.pop(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: [['d'], 'a', ['c']], - ['d'], - ['a', ['c']], - lambda a, b: a.remove(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: ['a', ['c']], - 'd', - ['a', ['c', 'd']], - lambda a, b: a[1].append(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: ['a', ['c', 'd']], - 1, - ['a', ['c']], - lambda a, b: a[1].pop(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: ['a', ['c']], - 'd', - ['a', ['c', 'd']], - lambda a, b: a[1].insert(1, b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: ['a', ['c', 'd']], - 'd', - ['a', ['c']], - lambda a, b: a[1].remove(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: set(), - None, - set(), - lambda a, b: None, - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: set(), - 'a', - {'a'}, - lambda a, b: a.add(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: {'a'}, - 'a', - set(), - lambda a, b: a.discard(b), - lambda res, expected_res: _assert_equal(res, expected_res) - ), ( - lambda: set(), - {'a'}, - set(), - # Nested sets are not allowed as sets themselves are not hashable. - lambda a, b: _assert_raises_type_error(a, b), - lambda res, expected_res: _assert_equal(res, expected_res) - )] + ( + lambda: ['b', 'a', ['c']], + ['d'], + ['b', ['d'], 'a', ['c']], + lambda a, b: a.insert(1, b), + lambda res, expected_res: ( + _assert_equal(res, expected_res) + and _assert_is_instance(res[1], ops.StoredList) + ), + ), + ( + lambda: ['b', 'a', ['c']], + ['d'], + ['b', ['d'], ['c']], + # a[1] = b + lambda a, b: a.__setitem__(1, b), + lambda res, expected_res: ( + _assert_equal(res, expected_res) + and _assert_is_instance(res[1], ops.StoredList) + ), + ), + ( + lambda: ['b', ['d'], 'a', ['c']], + 0, + [['d'], 'a', ['c']], + lambda a, b: a.pop(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: [['d'], 'a', ['c']], + ['d'], + ['a', ['c']], + lambda a, b: a.remove(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: ['a', ['c']], + 'd', + ['a', ['c', 'd']], + lambda a, b: a[1].append(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: ['a', ['c', 'd']], + 1, + ['a', ['c']], + lambda a, b: a[1].pop(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: ['a', ['c']], + 'd', + ['a', ['c', 'd']], + lambda a, b: a[1].insert(1, b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: ['a', ['c', 'd']], + 'd', + ['a', ['c']], + lambda a, b: a[1].remove(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: set(), + None, + set(), + lambda a, b: None, + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: set(), + 'a', + {'a'}, + lambda a, b: a.add(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: {'a'}, + 'a', + set(), + lambda a, b: a.discard(b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ( + lambda: set(), + {'a'}, + set(), + # Nested sets are not allowed as sets themselves are not hashable. + lambda a, b: _assert_raises_type_error(a, b), + lambda res, expected_res: _assert_equal(res, expected_res), + ), + ] class SomeObject(ops.Object): _stored = ops.StoredState() class WrappedFramework(ops.Framework): - def __init__(self, - store: typing.Union[SQLiteStorage, JujuStorage], - charm_dir: typing.Union[str, Path], - meta: ops.CharmMeta, - model: ops.Model, - event_name: str): + def __init__( + self, + store: typing.Union[SQLiteStorage, JujuStorage], + charm_dir: typing.Union[str, Path], + meta: ops.CharmMeta, + model: ops.Model, + event_name: str, + ): super().__init__(store, charm_dir, meta, model, event_name) self.snapshots: typing.List[typing.Any] = [] @@ -1451,8 +1499,8 @@ def save_snapshot(self, value: typing.Union[ops.StoredStateData, ops.EventBase]) # Validate correctness of modification operations. for get_a, b, expected_res, op, validate_op in test_operations: - storage = SQLiteStorage(tmp_path / "framework.data") - framework = WrappedFramework(storage, tmp_path, None, None, "foo") # type: ignore + storage = SQLiteStorage(tmp_path / 'framework.data') + framework = WrappedFramework(storage, tmp_path, None, None, 'foo') # type: ignore obj = SomeObject(framework, '1') obj._stored.a = get_a() @@ -1477,9 +1525,8 @@ def save_snapshot(self, value: typing.Union[ops.StoredStateData, ops.EventBase]) framework.commit() framework.close() - storage_copy = SQLiteStorage(tmp_path / "framework.data") - framework_copy = WrappedFramework( - storage_copy, tmp_path, None, None, "foo") # type: ignore + storage_copy = SQLiteStorage(tmp_path / 'framework.data') + framework_copy = WrappedFramework(storage_copy, tmp_path, None, None, 'foo') # type: ignore obj_copy2 = SomeObject(framework_copy, '1') @@ -1493,86 +1540,34 @@ def save_snapshot(self, value: typing.Union[ops.StoredStateData, ops.EventBase]) framework_copy.close() def test_comparison_operations(self, request: pytest.FixtureRequest): - test_operations: typing.List[ComparisonOperationsTestCase] = [( - {"1"}, - {"1", "2"}, - lambda a, b: a < b, - True, - False, - ), ( - {"1"}, - {"1", "2"}, - lambda a, b: a > b, - False, - True - ), ( - # Empty set comparison. - set(), - set(), - lambda a, b: a == b, - True, - True - ), ( - {"a", "c"}, - {"c", "a"}, - lambda a, b: a == b, - True, - True - ), ( - dict(), - dict(), - lambda a, b: a == b, - True, - True - ), ( - {"1": "2"}, - {"1": "2"}, - lambda a, b: a == b, - True, - True - ), ( - {"1": "2"}, - {"1": "3"}, - lambda a, b: a == b, - False, - False - ), ( - [], - [], - lambda a, b: a == b, - True, - True - ), ( - [1, 2], - [1, 2], - lambda a, b: a == b, - True, - True - ), ( - [1, 2, 5, 6], - [1, 2, 5, 8, 10], - lambda a, b: a <= b, - True, - False - ), ( - [1, 2, 5, 6], - [1, 2, 5, 8, 10], - lambda a, b: a < b, - True, - False - ), ( - [1, 2, 5, 8], - [1, 2, 5, 6, 10], - lambda a, b: a > b, - True, - False - ), ( - [1, 2, 5, 8], - [1, 2, 5, 6, 10], - lambda a, b: a >= b, - True, - False - )] + test_operations: typing.List[ComparisonOperationsTestCase] = [ + ( + {'1'}, + {'1', '2'}, + lambda a, b: a < b, + True, + False, + ), + ({'1'}, {'1', '2'}, lambda a, b: a > b, False, True), + ( + # Empty set comparison. + set(), + set(), + lambda a, b: a == b, + True, + True, + ), + ({'a', 'c'}, {'c', 'a'}, lambda a, b: a == b, True, True), + (dict(), dict(), lambda a, b: a == b, True, True), + ({'1': '2'}, {'1': '2'}, lambda a, b: a == b, True, True), + ({'1': '2'}, {'1': '3'}, lambda a, b: a == b, False, False), + ([], [], lambda a, b: a == b, True, True), + ([1, 2], [1, 2], lambda a, b: a == b, True, True), + ([1, 2, 5, 6], [1, 2, 5, 8, 10], lambda a, b: a <= b, True, False), + ([1, 2, 5, 6], [1, 2, 5, 8, 10], lambda a, b: a < b, True, False), + ([1, 2, 5, 8], [1, 2, 5, 6, 10], lambda a, b: a > b, True, False), + ([1, 2, 5, 8], [1, 2, 5, 6, 10], lambda a, b: a >= b, True, False), + ] class SomeObject(ops.Object): _stored = ops.StoredState() @@ -1586,32 +1581,13 @@ class SomeObject(ops.Object): assert op(b, obj._stored.a) == op_ba def test_set_operations(self, request: pytest.FixtureRequest): - test_operations: typing.List[SetOperationsTestCase] = [( - {"1"}, - lambda a, b: a | b, - {"1", "a", "b"}, - {"1", "a", "b"} - ), ( - {"a", "c"}, - lambda a, b: a - b, - {"b"}, - {"c"} - ), ( - {"a", "c"}, - lambda a, b: a & b, - {"a"}, - {"a"} - ), ( - {"a", "c", "d"}, - lambda a, b: a ^ b, - {"b", "c", "d"}, - {"b", "c", "d"} - ), ( - set(), - lambda a, b: set(a), - {"a", "b"}, - set() - )] + test_operations: typing.List[SetOperationsTestCase] = [ + ({'1'}, lambda a, b: a | b, {'1', 'a', 'b'}, {'1', 'a', 'b'}), + ({'a', 'c'}, lambda a, b: a - b, {'b'}, {'c'}), + ({'a', 'c'}, lambda a, b: a & b, {'a'}, {'a'}), + ({'a', 'c', 'd'}, lambda a, b: a ^ b, {'b', 'c', 'd'}, {'b', 'c', 'd'}), + (set(), lambda a, b: set(a), {'a', 'b'}, set()), + ] class SomeObject(ops.Object): _stored = ops.StoredState() @@ -1624,12 +1600,13 @@ class SomeObject(ops.Object): # original sets are not changed or used as a result. for i, (variable_operand, operation, ab_res, ba_res) in enumerate(test_operations): obj = SomeObject(framework, str(i)) - obj._stored.set = {"a", "b"} + obj._stored.set = {'a', 'b'} assert isinstance(obj._stored.set, ops.StoredSet) for a, b, expected in [ - (obj._stored.set, variable_operand, ab_res), - (variable_operand, obj._stored.set, ba_res)]: + (obj._stored.set, variable_operand, ab_res), + (variable_operand, obj._stored.set, ba_res), + ]: old_a = set(a) old_b = set(b) @@ -1648,6 +1625,7 @@ def test_set_default(self, request: pytest.FixtureRequest): class StatefulObject(ops.Object): _stored = ops.StoredState() + parent = StatefulObject(framework, 'key') parent._stored.set_default(foo=1) assert parent._stored.foo == 1 @@ -1681,7 +1659,6 @@ def callback_method(self, event: ops.EventBase): class TestBreakpoint: - def test_ignored( self, request: pytest.FixtureRequest, @@ -1699,10 +1676,11 @@ def test_ignored( framework.breakpoint() warning_logs = [ - record for record in caplog.records if record.levelno == logging.WARNING] + record for record in caplog.records if record.levelno == logging.WARNING + ] assert len(warning_logs) == 0 assert mock.call_count == 0 - assert fake_stderr.getvalue() == "" + assert fake_stderr.getvalue() == '' def test_pdb_properly_called(self, request: pytest.FixtureRequest): # The debugger needs to leave the user in the frame where the breakpoint is executed, @@ -1785,17 +1763,20 @@ def test_breakpoint_builtin_unset(self, request: pytest.FixtureRequest): assert mock.call_count == 0 - @pytest.mark.parametrize("name", [ - 'foobar', - 'foo-bar-baz', - 'foo-------bar', - 'foo123', - '778', - '77-xx', - 'a-b', - 'ab', - 'x', - ]) + @pytest.mark.parametrize( + 'name', + [ + 'foobar', + 'foo-bar-baz', + 'foo-------bar', + 'foo123', + '778', + '77-xx', + 'a-b', + 'ab', + 'x', + ], + ) def test_breakpoint_good_names(self, request: pytest.FixtureRequest, name: str): framework = create_framework(request) # Name rules: @@ -1803,20 +1784,22 @@ def test_breakpoint_good_names(self, request: pytest.FixtureRequest, name: str): # - only contain lowercase alphanumeric characters, or the hyphen "-" framework.breakpoint(name) - @pytest.mark.parametrize("name", [ - '', - '.', - '-', - '...foo', - 'foo.bar', - 'bar--' - 'FOO', - 'FooBar', - 'foo bar', - 'foo_bar', - '/foobar', - 'break-here-☚', - ]) + @pytest.mark.parametrize( + 'name', + [ + '', + '.', + '-', + '...foo', + 'foo.bar', + 'bar--' 'FOO', + 'FooBar', + 'foo bar', + 'foo_bar', + '/foobar', + 'break-here-☚', + ], + ) def test_breakpoint_bad_names(self, request: pytest.FixtureRequest, name: str): framework = create_framework(request) msg = 'breakpoint names must look like "foo" or "foo-bar"' @@ -1824,10 +1807,13 @@ def test_breakpoint_bad_names(self, request: pytest.FixtureRequest, name: str): framework.breakpoint(name) assert str(excinfo.value) == msg - @pytest.mark.parametrize("name", [ - 'all', - 'hook', - ]) + @pytest.mark.parametrize( + 'name', + [ + 'all', + 'hook', + ], + ) def test_breakpoint_reserved_names(self, request: pytest.FixtureRequest, name: str): framework = create_framework(request) msg = 'breakpoint names "all" and "hook" are reserved' @@ -1835,11 +1821,14 @@ def test_breakpoint_reserved_names(self, request: pytest.FixtureRequest, name: s framework.breakpoint(name) assert str(excinfo.value) == msg - @pytest.mark.parametrize("name", [ - 123, - 1.1, - False, - ]) + @pytest.mark.parametrize( + 'name', + [ + 123, + 1.1, + False, + ], + ) def test_breakpoint_not_really_names(self, request: pytest.FixtureRequest, name: typing.Any): framework = create_framework(request) with pytest.raises(TypeError) as excinfo: @@ -1882,12 +1871,13 @@ def test_named_indicated_unnamed( self.check_trace_set(request, 'some-breakpoint', None, 0) expected_log = [ - "WARNING:ops.framework:Breakpoint None skipped " + 'WARNING:ops.framework:Breakpoint None skipped ' "(not found in the requested breakpoints: {'some-breakpoint'})" ] assert expected_log == [ - f"{record.levelname}:{record.name}:{record.message}" for record in caplog.records] + f'{record.levelname}:{record.name}:{record.message}' for record in caplog.records + ] def test_named_indicated_somethingelse( self, @@ -1902,7 +1892,8 @@ def test_named_indicated_somethingelse( "(not found in the requested breakpoints: {'some-breakpoint'})" ] assert expected_log == [ - f"{record.levelname}:{record.name}:{record.message}" for record in caplog.records] + f'{record.levelname}:{record.name}:{record.message}' for record in caplog.records + ] def test_named_indicated_ingroup(self, request: pytest.FixtureRequest): # A multiple breakpoint was indicated, and the framework call used a name among those. @@ -1918,7 +1909,6 @@ def test_named_indicated_hook(self, request: pytest.FixtureRequest): class TestDebugHook: - def test_envvar_parsing_missing(self, request: pytest.FixtureRequest): with patch.dict(os.environ): os.environ.pop('JUJU_DEBUG_AT', None) @@ -1944,8 +1934,8 @@ def test_basic_interruption_enabled(self, request: pytest.FixtureRequest): framework = create_framework(request) framework._juju_debug_at = {'hook'} - publisher = ops.CharmEvents(framework, "1") - observer = GenericObserver(framework, "1") + publisher = ops.CharmEvents(framework, '1') + observer = GenericObserver(framework, '1') framework.observe(publisher.install, observer.callback_method) with patch('sys.stderr', new_callable=io.StringIO) as fake_stderr: @@ -1977,10 +1967,10 @@ def test_interruption_enabled_with_all( class CustomEvents(ops.ObjectEvents): foobar_action = ops.EventSource(ops.ActionEvent) - publisher = CustomEvents(framework, "1") - observer = GenericObserver(framework, "1") + publisher = CustomEvents(framework, '1') + observer = GenericObserver(framework, '1') framework.observe(publisher.foobar_action, observer.callback_method) - fake_script.write('action-get', "echo {}") + fake_script.write('action-get', 'echo {}') with patch('sys.stderr', new_callable=io.StringIO): with patch('pdb.runcall') as mock: @@ -2001,10 +1991,10 @@ def test_actions_are_interrupted( class CustomEvents(ops.ObjectEvents): foobar_action = ops.EventSource(ops.ActionEvent) - publisher = CustomEvents(framework, "1") - observer = GenericObserver(framework, "1") + publisher = CustomEvents(framework, '1') + observer = GenericObserver(framework, '1') framework.observe(publisher.foobar_action, observer.callback_method) - fake_script.write('action-get', "echo {}") + fake_script.write('action-get', 'echo {}') with patch('sys.stderr', new_callable=io.StringIO): with patch('pdb.runcall') as mock: @@ -2016,13 +2006,14 @@ class CustomEvents(ops.ObjectEvents): def test_internal_events_not_interrupted(self, request: pytest.FixtureRequest): class MyNotifier(ops.Object): """Generic notifier for the tests.""" + bar = ops.EventSource(ops.EventBase) framework = create_framework(request) framework._juju_debug_at = {'hook'} - publisher = MyNotifier(framework, "1") - observer = GenericObserver(framework, "1") + publisher = MyNotifier(framework, '1') + observer = GenericObserver(framework, '1') framework.observe(publisher.bar, observer.callback_method) with patch('pdb.runcall') as mock: @@ -2035,8 +2026,8 @@ def test_envvar_mixed(self, request: pytest.FixtureRequest): framework = create_framework(request) framework._juju_debug_at = {'foo', 'hook', 'all', 'whatever'} - publisher = ops.CharmEvents(framework, "1") - observer = GenericObserver(framework, "1") + publisher = ops.CharmEvents(framework, '1') + observer = GenericObserver(framework, '1') framework.observe(publisher.install, observer.callback_method) with patch('sys.stderr', new_callable=io.StringIO): @@ -2050,8 +2041,8 @@ def test_no_registered_method(self, request: pytest.FixtureRequest): framework = create_framework(request) framework._juju_debug_at = {'hook'} - publisher = ops.CharmEvents(framework, "1") - observer = GenericObserver(framework, "1") + publisher = ops.CharmEvents(framework, '1') + observer = GenericObserver(framework, '1') with patch('pdb.runcall') as mock: publisher.install.emit() @@ -2063,8 +2054,8 @@ def test_envvar_nohook(self, request: pytest.FixtureRequest): framework = create_framework(request) framework._juju_debug_at = {'something-else'} - publisher = ops.CharmEvents(framework, "1") - observer = GenericObserver(framework, "1") + publisher = ops.CharmEvents(framework, '1') + observer = GenericObserver(framework, '1') framework.observe(publisher.install, observer.callback_method) with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'something-else'}): @@ -2078,8 +2069,8 @@ def test_envvar_missing(self, request: pytest.FixtureRequest): framework = create_framework(request) framework._juju_debug_at = set() - publisher = ops.CharmEvents(framework, "1") - observer = GenericObserver(framework, "1") + publisher = ops.CharmEvents(framework, '1') + observer = GenericObserver(framework, '1') framework.observe(publisher.install, observer.callback_method) with patch('pdb.runcall') as mock: @@ -2092,8 +2083,8 @@ def test_welcome_message_not_multiple(self, request: pytest.FixtureRequest): framework = create_framework(request) framework._juju_debug_at = {'hook'} - publisher = ops.CharmEvents(framework, "1") - observer = GenericObserver(framework, "1") + publisher = ops.CharmEvents(framework, '1') + observer = GenericObserver(framework, '1') framework.observe(publisher.install, observer.callback_method) with patch('sys.stderr', new_callable=io.StringIO) as fake_stderr: diff --git a/test/test_helpers.py b/test/test_helpers.py index ee75268f9..393fd7551 100644 --- a/test/test_helpers.py +++ b/test/test_helpers.py @@ -30,8 +30,8 @@ def fake_script(test_case: unittest.TestCase, name: str, content: str): if not hasattr(test_case, 'fake_script_path'): fake_script_path = tempfile.mkdtemp('-fake_script') - old_path = os.environ["PATH"] - os.environ['PATH'] = os.pathsep.join([fake_script_path, os.environ["PATH"]]) + old_path = os.environ['PATH'] + os.environ['PATH'] = os.pathsep.join([fake_script_path, os.environ['PATH']]) def cleanup(): shutil.rmtree(fake_script_path) @@ -51,18 +51,21 @@ def cleanup(): # Before executing the provided script, dump the provided arguments in calls.txt. # ASCII 1E is RS 'record separator', and 1C is FS 'file separator', which seem appropriate. f.write( # type: ignore - '''#!/bin/sh + """#!/bin/sh {{ printf {name}; printf "\\036%s" "$@"; printf "\\034"; }} >> {path}/calls.txt -{content}'''.format_map(template_args)) +{content}""".format_map(template_args) + ) os.chmod(str(path), 0o755) # type: ignore # noqa: S103 # TODO: this hardcodes the path to bash.exe, which works for now but might # need to be set via environ or something like that. - path.with_suffix(".bat").write_text( # type: ignore - f'@"C:\\Program Files\\git\\bin\\bash.exe" {path} %*\n') + path.with_suffix('.bat').write_text( # type: ignore + f'@"C:\\Program Files\\git\\bin\\bash.exe" {path} %*\n' + ) -def fake_script_calls(test_case: unittest.TestCase, - clear: bool = False) -> typing.List[typing.List[str]]: +def fake_script_calls( + test_case: unittest.TestCase, clear: bool = False +) -> typing.List[typing.List[str]]: calls_file: pathlib.Path = test_case.fake_script_path / 'calls.txt' # type: ignore if not calls_file.exists(): # type: ignore return [] @@ -77,13 +80,13 @@ def fake_script_calls(test_case: unittest.TestCase, def create_framework( - request: pytest.FixtureRequest, - *, - meta: typing.Optional[ops.CharmMeta] = None): + request: pytest.FixtureRequest, *, meta: typing.Optional[ops.CharmMeta] = None +): env_backup = os.environ.copy() os.environ['PATH'] = os.pathsep.join([ str(pathlib.Path(__file__).parent / 'bin'), - os.environ['PATH']]) + os.environ['PATH'], + ]) os.environ['JUJU_UNIT_NAME'] = 'local/0' tmpdir = pathlib.Path(tempfile.mkdtemp()) @@ -148,14 +151,16 @@ def write(self, name: str, content: str): # Before executing the provided script, dump the provided arguments in calls.txt. # RS 'record separator' (octal 036 in ASCII), FS 'file separator' (octal 034 in ASCII). f.write( - '''#!/bin/sh + """#!/bin/sh {{ printf {name}; printf "\\036%s" "$@"; printf "\\034"; }} >> {path}/calls.txt -{content}'''.format_map(template_args)) +{content}""".format_map(template_args) + ) path.chmod(0o755) # TODO: this hardcodes the path to bash.exe, which works for now but might # need to be set via environ or something like that. - path.with_suffix(".bat").write_text( # type: ignore - f'@"C:\\Program Files\\git\\bin\\bash.exe" {path} %*\n') + path.with_suffix('.bat').write_text( # type: ignore + f'@"C:\\Program Files\\git\\bin\\bash.exe" {path} %*\n' + ) def calls(self, clear: bool = False) -> typing.List[typing.List[str]]: calls_file: pathlib.Path = self.path / 'calls.txt' @@ -172,7 +177,6 @@ def calls(self, clear: bool = False) -> typing.List[typing.List[str]]: class FakeScriptTest(unittest.TestCase): - def test_fake_script_works(self): fake_script(self, 'foo', 'echo foo runs') fake_script(self, 'bar', 'echo bar runs') diff --git a/test/test_infra.py b/test/test_infra.py index 1e8639f30..6efab1806 100644 --- a/test/test_infra.py +++ b/test/test_infra.py @@ -20,17 +20,20 @@ import pytest -@pytest.mark.parametrize("mod_name", [ - 'charm', - 'framework', - 'main', - 'model', - 'testing', -]) +@pytest.mark.parametrize( + 'mod_name', + [ + 'charm', + 'framework', + 'main', + 'model', + 'testing', + ], +) def test_import(mod_name: str, tmp_path: pathlib.Path): - template = "from ops import {module_name}" + template = 'from ops import {module_name}' - testfile = tmp_path / "foo.py" + testfile = tmp_path / 'foo.py' with open(testfile, 'w', encoding='utf8') as fh: fh.write(template.format(module_name=mod_name)) diff --git a/test/test_jujuversion.py b/test/test_jujuversion.py index a2399f74f..07d42079b 100644 --- a/test/test_jujuversion.py +++ b/test/test_jujuversion.py @@ -20,17 +20,20 @@ import ops -@pytest.mark.parametrize("vs,major,minor,tag,patch,build", [ - ("0.0.0", 0, 0, '', 0, 0), - ("0.0.2", 0, 0, '', 2, 0), - ("0.1.0", 0, 1, '', 0, 0), - ("0.2.3", 0, 2, '', 3, 0), - ("10.234.3456", 10, 234, '', 3456, 0), - ("10.234.3456.1", 10, 234, '', 3456, 1), - ("1.21-alpha12", 1, 21, 'alpha', 12, 0), - ("1.21-alpha1.34", 1, 21, 'alpha', 1, 34), - ("2.7", 2, 7, '', 0, 0) -]) +@pytest.mark.parametrize( + 'vs,major,minor,tag,patch,build', + [ + ('0.0.0', 0, 0, '', 0, 0), + ('0.0.2', 0, 0, '', 2, 0), + ('0.1.0', 0, 1, '', 0, 0), + ('0.2.3', 0, 2, '', 3, 0), + ('10.234.3456', 10, 234, '', 3456, 0), + ('10.234.3456.1', 10, 234, '', 3456, 1), + ('1.21-alpha12', 1, 21, 'alpha', 12, 0), + ('1.21-alpha1.34', 1, 21, 'alpha', 1, 34), + ('2.7', 2, 7, '', 0, 0), + ], +) def test_parsing(vs: str, major: int, minor: int, tag: str, patch: int, build: int): v = ops.JujuVersion(vs) assert v.major == major @@ -97,77 +100,94 @@ def test_supports_exec_service_context(): assert ops.JujuVersion('3.4.0').supports_exec_service_context -@pytest.mark.parametrize("invalid_version", [ - "xyz", - "foo.bar", - "foo.bar.baz", - "dead.beef.ca.fe", - "1234567890.2.1", # The major version is too long. - "0.2..1", # Two periods next to each other. - "1.21.alpha1", # Tag comes after period. - "1.21-alpha", # No patch number but a tag is present. - "1.21-alpha1beta", # Non-numeric string after the patch number. - "1.21-alpha-dev", # Tag duplication. - "1.21-alpha_dev3", # Underscore in a tag. - "1.21-alpha123dev3", # Non-numeric string after the patch number. -]) +@pytest.mark.parametrize( + 'invalid_version', + [ + 'xyz', + 'foo.bar', + 'foo.bar.baz', + 'dead.beef.ca.fe', + # The major version is too long. + '1234567890.2.1', + # Two periods next to each other. + '0.2..1', + # Tag comes after period. + '1.21.alpha1', + # No patch number but a tag is present. + '1.21-alpha', + # Non-numeric string after the patch number. + '1.21-alpha1beta', + # Tag duplication. + '1.21-alpha-dev', + # Underscore in a tag. + '1.21-alpha_dev3', + # Non-numeric string after the patch number. + '1.21-alpha123dev3', + ], +) def test_parsing_errors(invalid_version: str): with pytest.raises(RuntimeError): ops.JujuVersion(invalid_version) -@pytest.mark.parametrize("a,b,expected", [ - ("1.0.0", "1.0.0", True), - ("01.0.0", "1.0.0", True), - ("10.0.0", "9.0.0", False), - ("1.0.0", "1.0.1", False), - ("1.0.1", "1.0.0", False), - ("1.0.0", "1.1.0", False), - ("1.1.0", "1.0.0", False), - ("1.0.0", "2.0.0", False), - ("1.2-alpha1", "1.2.0", False), - ("1.2-alpha2", "1.2-alpha1", False), - ("1.2-alpha2.1", "1.2-alpha2", False), - ("1.2-alpha2.2", "1.2-alpha2.1", False), - ("1.2-beta1", "1.2-alpha1", False), - ("1.2-beta1", "1.2-alpha2.1", False), - ("1.2-beta1", "1.2.0", False), - ("1.2.1", "1.2.0", False), - ("2.0.0", "1.0.0", False), - ("2.0.0.0", "2.0.0", True), - ("2.0.0.0", "2.0.0.0", True), - ("2.0.0.1", "2.0.0.0", False), - ("2.0.1.10", "2.0.0.0", False), -]) +@pytest.mark.parametrize( + 'a,b,expected', + [ + ('1.0.0', '1.0.0', True), + ('01.0.0', '1.0.0', True), + ('10.0.0', '9.0.0', False), + ('1.0.0', '1.0.1', False), + ('1.0.1', '1.0.0', False), + ('1.0.0', '1.1.0', False), + ('1.1.0', '1.0.0', False), + ('1.0.0', '2.0.0', False), + ('1.2-alpha1', '1.2.0', False), + ('1.2-alpha2', '1.2-alpha1', False), + ('1.2-alpha2.1', '1.2-alpha2', False), + ('1.2-alpha2.2', '1.2-alpha2.1', False), + ('1.2-beta1', '1.2-alpha1', False), + ('1.2-beta1', '1.2-alpha2.1', False), + ('1.2-beta1', '1.2.0', False), + ('1.2.1', '1.2.0', False), + ('2.0.0', '1.0.0', False), + ('2.0.0.0', '2.0.0', True), + ('2.0.0.0', '2.0.0.0', True), + ('2.0.0.1', '2.0.0.0', False), + ('2.0.1.10', '2.0.0.0', False), + ], +) def test_equality(a: str, b: str, expected: bool): assert (ops.JujuVersion(a) == ops.JujuVersion(b)) == expected assert (ops.JujuVersion(a) == b) == expected -@pytest.mark.parametrize("a,b,expected_strict,expected_weak", [ - ("1.0.0", "1.0.0", False, True), - ("01.0.0", "1.0.0", False, True), - ("10.0.0", "9.0.0", False, False), - ("1.0.0", "1.0.1", True, True), - ("1.0.1", "1.0.0", False, False), - ("1.0.0", "1.1.0", True, True), - ("1.1.0", "1.0.0", False, False), - ("1.0.0", "2.0.0", True, True), - ("1.2-alpha1", "1.2.0", True, True), - ("1.2-alpha2", "1.2-alpha1", False, False), - ("1.2-alpha2.1", "1.2-alpha2", False, False), - ("1.2-alpha2.2", "1.2-alpha2.1", False, False), - ("1.2-beta1", "1.2-alpha1", False, False), - ("1.2-beta1", "1.2-alpha2.1", False, False), - ("1.2-beta1", "1.2.0", True, True), - ("1.2.1", "1.2.0", False, False), - ("2.0.0", "1.0.0", False, False), - ("2.0.0.0", "2.0.0", False, True), - ("2.0.0.0", "2.0.0.0", False, True), - ("2.0.0.1", "2.0.0.0", False, False), - ("2.0.1.10", "2.0.0.0", False, False), - ("2.10.0", "2.8.0", False, False), -]) +@pytest.mark.parametrize( + 'a,b,expected_strict,expected_weak', + [ + ('1.0.0', '1.0.0', False, True), + ('01.0.0', '1.0.0', False, True), + ('10.0.0', '9.0.0', False, False), + ('1.0.0', '1.0.1', True, True), + ('1.0.1', '1.0.0', False, False), + ('1.0.0', '1.1.0', True, True), + ('1.1.0', '1.0.0', False, False), + ('1.0.0', '2.0.0', True, True), + ('1.2-alpha1', '1.2.0', True, True), + ('1.2-alpha2', '1.2-alpha1', False, False), + ('1.2-alpha2.1', '1.2-alpha2', False, False), + ('1.2-alpha2.2', '1.2-alpha2.1', False, False), + ('1.2-beta1', '1.2-alpha1', False, False), + ('1.2-beta1', '1.2-alpha2.1', False, False), + ('1.2-beta1', '1.2.0', True, True), + ('1.2.1', '1.2.0', False, False), + ('2.0.0', '1.0.0', False, False), + ('2.0.0.0', '2.0.0', False, True), + ('2.0.0.0', '2.0.0.0', False, True), + ('2.0.0.1', '2.0.0.0', False, False), + ('2.0.1.10', '2.0.0.0', False, False), + ('2.10.0', '2.8.0', False, False), + ], +) def test_comparison(a: str, b: str, expected_strict: bool, expected_weak: bool): assert (ops.JujuVersion(a) < ops.JujuVersion(b)) == expected_strict assert (ops.JujuVersion(a) <= ops.JujuVersion(b)) == expected_weak diff --git a/test/test_lib.py b/test/test_lib.py index 7ac6a7995..4459f0022 100644 --- a/test/test_lib.py +++ b/test/test_lib.py @@ -32,7 +32,7 @@ # ModuleSpec to pass when we know it will not be used but we want the # type to match. -_dummy_spec = ModuleSpec("", loader=None) +_dummy_spec = ModuleSpec('', loader=None) def _mklib(topdir: str, pkgname: str, libname: str) -> Path: @@ -64,8 +64,9 @@ def _mklib(topdir: str, pkgname: str, libname: str) -> Path: def _flatten(specgen: typing.Iterable[ModuleSpec]) -> typing.List[str]: - return sorted([os.path.dirname(spec.origin if spec.origin is not None else "") - for spec in specgen]) + return sorted([ + os.path.dirname(spec.origin if spec.origin is not None else '') for spec in specgen + ]) class TestLibFinder: @@ -73,16 +74,17 @@ def test_single(self, tmp_path: pathlib.Path): tmpdir = str(tmp_path) assert list(ops.lib._find_all_specs([tmpdir])) == [] - _mklib(tmpdir, "foo", "bar").write_text("") + _mklib(tmpdir, 'foo', 'bar').write_text('') - assert _flatten(ops.lib._find_all_specs([tmpdir])) == \ - [os.path.join(tmpdir, 'foo', 'opslib', 'bar')] + assert _flatten(ops.lib._find_all_specs([tmpdir])) == [ + os.path.join(tmpdir, 'foo', 'opslib', 'bar') + ] def test_multi(self, tmp_path: pathlib.Path): - tmp_dir_a = tmp_path / "temp_dir1" + tmp_dir_a = tmp_path / 'temp_dir1' tmp_dir_a.mkdir() - tmp_dir_b = tmp_path / "temp_dir2" + tmp_dir_b = tmp_path / 'temp_dir2' tmp_dir_b.mkdir() if tmp_dir_a > tmp_dir_b: @@ -92,19 +94,19 @@ def test_multi(self, tmp_path: pathlib.Path): dirs = [str(tmp_dir_a), str(tmp_dir_b)] for top in dirs: - for pkg in ["bar", "baz"]: - for lib in ["meep", "quux"]: - _mklib(top, pkg, lib).write_text("") + for pkg in ['bar', 'baz']: + for lib in ['meep', 'quux']: + _mklib(top, pkg, lib).write_text('') expected = [ - os.path.join(tmp_dir_a, "bar", "opslib", "meep"), - os.path.join(tmp_dir_a, "bar", "opslib", "quux"), - os.path.join(tmp_dir_a, "baz", "opslib", "meep"), - os.path.join(tmp_dir_a, "baz", "opslib", "quux"), - os.path.join(tmp_dir_b, "bar", "opslib", "meep"), - os.path.join(tmp_dir_b, "bar", "opslib", "quux"), - os.path.join(tmp_dir_b, "baz", "opslib", "meep"), - os.path.join(tmp_dir_b, "baz", "opslib", "quux"), + os.path.join(tmp_dir_a, 'bar', 'opslib', 'meep'), + os.path.join(tmp_dir_a, 'bar', 'opslib', 'quux'), + os.path.join(tmp_dir_a, 'baz', 'opslib', 'meep'), + os.path.join(tmp_dir_a, 'baz', 'opslib', 'quux'), + os.path.join(tmp_dir_b, 'bar', 'opslib', 'meep'), + os.path.join(tmp_dir_b, 'bar', 'opslib', 'quux'), + os.path.join(tmp_dir_b, 'baz', 'opslib', 'meep'), + os.path.join(tmp_dir_b, 'baz', 'opslib', 'quux'), ] assert _flatten(ops.lib._find_all_specs(dirs)) == expected @@ -113,28 +115,28 @@ def test_cwd(self, tmp_path: pathlib.Path): tmpcwd = str(tmp_path) os.chdir(tmpcwd) - dirs = [""] + dirs = [''] assert list(ops.lib._find_all_specs(dirs)) == [] - _mklib(tmpcwd, "foo", "bar").write_text("") + _mklib(tmpcwd, 'foo', 'bar').write_text('') paths = _flatten(ops.lib._find_all_specs(dirs)) - assert [os.path.relpath(p) for p in paths] == \ - [os.path.join('foo', 'opslib', 'bar')] + assert [os.path.relpath(p) for p in paths] == [os.path.join('foo', 'opslib', 'bar')] def test_bogus_topdir(self, tmp_path: pathlib.Path): """Check that having one bogus dir in sys.path doesn't cause the finder to abort.""" tmpdir = str(tmp_path) - dirs = [tmpdir, "/bogus"] + dirs = [tmpdir, '/bogus'] assert list(ops.lib._find_all_specs(dirs)) == [] - _mklib(tmpdir, "foo", "bar").write_text("") + _mklib(tmpdir, 'foo', 'bar').write_text('') - assert _flatten(ops.lib._find_all_specs(dirs)) == \ - [os.path.join(tmpdir, 'foo', 'opslib', 'bar')] + assert _flatten(ops.lib._find_all_specs(dirs)) == [ + os.path.join(tmpdir, 'foo', 'opslib', 'bar') + ] def test_bogus_opsdir(self, tmp_path: pathlib.Path): """Check that having one bogus opslib doesn't cause the finder to abort.""" @@ -142,14 +144,15 @@ def test_bogus_opsdir(self, tmp_path: pathlib.Path): assert list(ops.lib._find_all_specs([tmpdir])) == [] - _mklib(tmpdir, "foo", "bar").write_text('') + _mklib(tmpdir, 'foo', 'bar').write_text('') path = Path(tmpdir) / 'baz' path.mkdir() (path / 'opslib').write_text('') - assert _flatten(ops.lib._find_all_specs([tmpdir])) == \ - [os.path.join(tmpdir, 'foo', 'opslib', 'bar')] + assert _flatten(ops.lib._find_all_specs([tmpdir])) == [ + os.path.join(tmpdir, 'foo', 'opslib', 'bar') + ] def test_namespace(self, tmp_path: pathlib.Path): """Check that namespace packages are ignored.""" @@ -157,7 +160,7 @@ def test_namespace(self, tmp_path: pathlib.Path): assert list(ops.lib._find_all_specs([tmpdir])) == [] - _mklib(tmpdir, "foo", "bar") # no __init__.py => a namespace package + _mklib(tmpdir, 'foo', 'bar') # no __init__.py => a namespace package assert list(ops.lib._find_all_specs([tmpdir])) == [] @@ -176,75 +179,100 @@ def _mkmod( def test_simple(self, tmp_path: pathlib.Path): """Check that we can load a reasonably straightforward lib.""" - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME = "foo" LIBEACH = float('-inf') LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com" LIBANANA = True - ''') + """, + ) lib = ops.lib._parse_lib(m) - assert lib == ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 2, 42) + assert lib == ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 2, 42) # also check the repr while we're at it assert repr(lib) == '<_Lib foo by alice@example.com, API 2, patch 42>' def test_libauthor_has_dashes(self, tmp_path: pathlib.Path): - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME = "foo" LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice-someone@example.com" LIBANANA = True - ''') + """, + ) lib = ops.lib._parse_lib(m) - assert lib == ops.lib._Lib(_dummy_spec, "foo", "alice-someone@example.com", 2, 42) + assert lib == ops.lib._Lib(_dummy_spec, 'foo', 'alice-someone@example.com', 2, 42) # also check the repr while we're at it assert repr(lib) == '<_Lib foo by alice-someone@example.com, API 2, patch 42>' def test_lib_definitions_without_spaces(self, tmp_path: pathlib.Path): - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME="foo" LIBAPI=2 LIBPATCH=42 LIBAUTHOR="alice@example.com" LIBANANA=True - ''') + """, + ) lib = ops.lib._parse_lib(m) - assert lib == ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 2, 42) + assert lib == ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 2, 42) # also check the repr while we're at it assert repr(lib) == '<_Lib foo by alice@example.com, API 2, patch 42>' def test_lib_definitions_trailing_comments(self, tmp_path: pathlib.Path): - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME = "foo" # comment style 1 LIBAPI = 2 = comment style 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com"anything after the quote is a comment LIBANANA = True - ''') + """, + ) lib = ops.lib._parse_lib(m) - assert lib == ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 2, 42) + assert lib == ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 2, 42) # also check the repr while we're at it assert repr(lib) == '<_Lib foo by alice@example.com, API 2, patch 42>' def test_incomplete(self, tmp_path: pathlib.Path): """Check that if anything is missing, nothing is returned.""" - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME = "foo" LIBAPI = 2 LIBPATCH = 42 - ''') + """, + ) assert ops.lib._parse_lib(m) is None def test_too_long(self, tmp_path: pathlib.Path): """Check that if the file is too long, nothing is returned.""" - m = self._mkmod(tmp_path, 'foo', '\n' * ops.lib._MAX_LIB_LINES + ''' + m = self._mkmod( + tmp_path, + 'foo', + '\n' * ops.lib._MAX_LIB_LINES + + """ LIBNAME = "foo" LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com" - ''') + """, + ) assert ops.lib._parse_lib(m) is None def test_no_origin(self): @@ -262,52 +290,72 @@ def test_bogus_origin(self): def test_bogus_lib(self, tmp_path: pathlib.Path): """Check our behaviour when the lib is messed up.""" # note the syntax error (that is carefully chosen to pass the initial regexp) - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME = "1' LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com" - ''') + """, + ) assert ops.lib._parse_lib(m) is None def test_name_is_number(self, tmp_path: pathlib.Path): """Check our behaviour when the name in the lib is a number.""" - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME = 1 LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com" - ''') + """, + ) assert ops.lib._parse_lib(m) is None def test_api_is_string(self, tmp_path: pathlib.Path): """Check our behaviour when the api in the lib is a string.""" - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME = 'foo' LIBAPI = '2' LIBPATCH = 42 LIBAUTHOR = "alice@example.com" - ''') + """, + ) assert ops.lib._parse_lib(m) is None def test_patch_is_string(self, tmp_path: pathlib.Path): """Check our behaviour when the patch in the lib is a string.""" - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME = 'foo' LIBAPI = 2 LIBPATCH = '42' LIBAUTHOR = "alice@example.com" - ''') + """, + ) assert ops.lib._parse_lib(m) is None def test_author_is_number(self, tmp_path: pathlib.Path): """Check our behaviour when the author in the lib is a number.""" - m = self._mkmod(tmp_path, 'foo', ''' + m = self._mkmod( + tmp_path, + 'foo', + """ LIBNAME = 'foo' LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = 43 - ''') + """, + ) assert ops.lib._parse_lib(m) is None def test_other_encoding(self, tmp_path: pathlib.Path): @@ -319,50 +367,58 @@ def test_other_encoding(self, tmp_path: pathlib.Path): assert m.origin is not None return with open(m.origin, 'w', encoding='latin-1') as f: - f.write(dedent(''' + f.write( + dedent(""" LIBNAME = "foo" LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com" LIBANANA = "Ñoño" - ''')) + """) + ) assert ops.lib._parse_lib(m) is None class TestLib: def test_lib_comparison(self): - assert ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 0) != \ - ops.lib._Lib(_dummy_spec, "bar", "bob@example.com", 0, 1) - assert ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 1) == \ - ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 1) - - assert ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 0) < \ - ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 1) - assert ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 0, 1) < \ - ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 1) - assert ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 1) < \ - ops.lib._Lib(_dummy_spec, "foo", "bob@example.com", 1, 1) - assert ops.lib._Lib(_dummy_spec, "bar", "alice@example.com", 1, 1) < \ - ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 1) + assert ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 1, 0) != ops.lib._Lib( + _dummy_spec, 'bar', 'bob@example.com', 0, 1 + ) + assert ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 1, 1) == ops.lib._Lib( + _dummy_spec, 'foo', 'alice@example.com', 1, 1 + ) + + assert ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 1, 0) < ops.lib._Lib( + _dummy_spec, 'foo', 'alice@example.com', 1, 1 + ) + assert ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 0, 1) < ops.lib._Lib( + _dummy_spec, 'foo', 'alice@example.com', 1, 1 + ) + assert ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 1, 1) < ops.lib._Lib( + _dummy_spec, 'foo', 'bob@example.com', 1, 1 + ) + assert ops.lib._Lib(_dummy_spec, 'bar', 'alice@example.com', 1, 1) < ops.lib._Lib( + _dummy_spec, 'foo', 'alice@example.com', 1, 1 + ) with pytest.raises(TypeError): - 42 < ops.lib._Lib(_dummy_spec, "bar", "alice@example.com", 1, 1) # type:ignore # noqa: B015, SIM300 + 42 < ops.lib._Lib(_dummy_spec, 'bar', 'alice@example.com', 1, 1) # type:ignore # noqa: B015, SIM300 with pytest.raises(TypeError): - ops.lib._Lib(_dummy_spec, "bar", "alice@example.com", 1, 1) < 42 # type: ignore # noqa: B015 + ops.lib._Lib(_dummy_spec, 'bar', 'alice@example.com', 1, 1) < 42 # type: ignore # noqa: B015 # these two might be surprising in that they don't raise an exception, # but they are correct: our __eq__ bailing means Python falls back to # its default of checking object identity. - assert ops.lib._Lib(_dummy_spec, "bar", "alice@example.com", 1, 1) != 42 - assert ops.lib._Lib(_dummy_spec, "bar", "alice@example.com", 1, 1) != 42 + assert ops.lib._Lib(_dummy_spec, 'bar', 'alice@example.com', 1, 1) != 42 + assert ops.lib._Lib(_dummy_spec, 'bar', 'alice@example.com', 1, 1) != 42 - @pytest.mark.parametrize("execution_number", range(20)) + @pytest.mark.parametrize('execution_number', range(20)) def test_lib_order(self, execution_number: range): - a = ops.lib._Lib(_dummy_spec, "bar", "alice@example.com", 1, 0) - b = ops.lib._Lib(_dummy_spec, "bar", "alice@example.com", 1, 1) - c = ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 0) - d = ops.lib._Lib(_dummy_spec, "foo", "alice@example.com", 1, 1) - e = ops.lib._Lib(_dummy_spec, "foo", "bob@example.com", 1, 1) + a = ops.lib._Lib(_dummy_spec, 'bar', 'alice@example.com', 1, 0) + b = ops.lib._Lib(_dummy_spec, 'bar', 'alice@example.com', 1, 1) + c = ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 1, 0) + d = ops.lib._Lib(_dummy_spec, 'foo', 'alice@example.com', 1, 1) + e = ops.lib._Lib(_dummy_spec, 'foo', 'bob@example.com', 1, 1) libs = [a, b, c, d, e] shuffle(libs) @@ -392,12 +448,14 @@ def test_use_finds_subs(self, tmp_path: pathlib.Path): tmpdir = str(tmp_path) sys.path = [tmpdir] - _mklib(tmpdir, "foo", "bar").write_text(dedent(""" + _mklib(tmpdir, 'foo', 'bar').write_text( + dedent(""" LIBNAME = "baz" LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com" - """)) + """) + ) # autoimport to reset things ops.lib.autoimport() @@ -409,9 +467,9 @@ def test_use_finds_subs(self, tmp_path: pathlib.Path): assert baz.LIBPATCH == 42 assert baz.LIBAUTHOR == 'alice@example.com' - @pytest.mark.parametrize("pkg_a", ["foo", "fooA"]) - @pytest.mark.parametrize("lib_a", ["bar", "barA"]) - @pytest.mark.parametrize("patch_a", [38, 42]) + @pytest.mark.parametrize('pkg_a', ['foo', 'fooA']) + @pytest.mark.parametrize('lib_a', ['bar', 'barA']) + @pytest.mark.parametrize('patch_a', [38, 42]) def test_use_finds_best_same_toplevel( self, tmp_path: pathlib.Path, @@ -420,8 +478,8 @@ def test_use_finds_best_same_toplevel( patch_a: int, ): """Test that ops.lib.use("baz") works when there are two baz in the same toplevel.""" - pkg_b = "foo" - lib_b = "bar" + pkg_b = 'foo' + lib_b = 'bar' patch_b = 40 if (pkg_a, lib_a) == (pkg_b, lib_b): @@ -430,19 +488,23 @@ def test_use_finds_best_same_toplevel( tmpdir = str(tmp_path) sys.path = [tmpdir] - _mklib(tmpdir, pkg_a, lib_a).write_text(dedent(""" + _mklib(tmpdir, pkg_a, lib_a).write_text( + dedent(""" LIBNAME = "baz" LIBAPI = 2 LIBPATCH = {} LIBAUTHOR = "alice@example.com" - """).format(patch_a)) + """).format(patch_a) + ) - _mklib(tmpdir, pkg_b, lib_b).write_text(dedent(""" + _mklib(tmpdir, pkg_b, lib_b).write_text( + dedent(""" LIBNAME = "baz" LIBAPI = 2 LIBPATCH = {} LIBAUTHOR = "alice@example.com" - """).format(patch_b)) + """).format(patch_b) + ) # autoimport to reset things ops.lib.autoimport() @@ -454,9 +516,9 @@ def test_use_finds_best_same_toplevel( assert max(patch_a, patch_b) == baz.LIBPATCH assert baz.LIBAUTHOR == 'alice@example.com' - @pytest.mark.parametrize("pkg_a", ["foo", "fooA"]) - @pytest.mark.parametrize("lib_a", ["bar", "barA"]) - @pytest.mark.parametrize("patch_a", [38, 42]) + @pytest.mark.parametrize('pkg_a', ['foo', 'fooA']) + @pytest.mark.parametrize('lib_a', ['bar', 'barA']) + @pytest.mark.parametrize('patch_a', [38, 42]) def test_use_finds_best_diff_toplevel( self, tmp_path: pathlib.Path, @@ -465,31 +527,35 @@ def test_use_finds_best_diff_toplevel( patch_a: int, ): """Test that ops.lib.use("baz") works when there are two baz in the different toplevels.""" - pkg_b = "foo" - lib_b = "bar" + pkg_b = 'foo' + lib_b = 'bar' patch_b = 40 - tmp_dir_a = tmp_path / "temp_dir1" + tmp_dir_a = tmp_path / 'temp_dir1' tmp_dir_a.mkdir() - tmp_dir_b = tmp_path / "temp_dir2" + tmp_dir_b = tmp_path / 'temp_dir2' tmp_dir_b.mkdir() sys.path = [tmp_dir_a, tmp_dir_b] - _mklib(str(tmp_dir_a), pkg_a, lib_a).write_text(dedent(""" + _mklib(str(tmp_dir_a), pkg_a, lib_a).write_text( + dedent(""" LIBNAME = "baz" LIBAPI = 2 LIBPATCH = {} LIBAUTHOR = "alice@example.com" - """).format(patch_a)) + """).format(patch_a) + ) - _mklib(str(tmp_dir_b), pkg_b, lib_b).write_text(dedent(""" + _mklib(str(tmp_dir_b), pkg_b, lib_b).write_text( + dedent(""" LIBNAME = "baz" LIBAPI = 2 LIBPATCH = {} LIBAUTHOR = "alice@example.com" - """).format(patch_b)) + """).format(patch_b) + ) # autoimport to reset things ops.lib.autoimport() @@ -509,12 +575,14 @@ def test_from_scratch(self, tmp_path: pathlib.Path): tmpdir = str(tmp_path) sys.path = [tmpdir] - _mklib(tmpdir, "foo", "bar").write_text(dedent(""" + _mklib(tmpdir, 'foo', 'bar').write_text( + dedent(""" LIBNAME = "baz" LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com" - """)) + """) + ) # hard reset ops.lib._libraries = None @@ -532,18 +600,22 @@ def _test_submodule( tmpdir = str(tmp_path) sys.path = [tmpdir] - path = _mklib(tmpdir, "foo", "bar") - path.write_text(dedent(""" + path = _mklib(tmpdir, 'foo', 'bar') + path.write_text( + dedent(""" LIBNAME = "baz" LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com" from {} import quux - """).format("." if relative else "foo.opslib.bar")) - (path.parent / 'quux.py').write_text(dedent(""" + """).format('.' if relative else 'foo.opslib.bar') + ) + (path.parent / 'quux.py').write_text( + dedent(""" this = 42 - """)) + """) + ) # reset ops.lib.autoimport() @@ -563,12 +635,14 @@ def test_others_found(self, tmp_path: pathlib.Path): tmpdir = str(tmp_path) sys.path = [tmpdir] - _mklib(tmpdir, "foo", "bar").write_text(dedent(""" + _mklib(tmpdir, 'foo', 'bar').write_text( + dedent(""" LIBNAME = "baz" LIBAPI = 2 LIBPATCH = 42 LIBAUTHOR = "alice@example.com" - """)) + """) + ) # reload ops.lib.autoimport() diff --git a/test/test_log.py b/test/test_log.py index 11f052a09..5682703d0 100644 --- a/test/test_log.py +++ b/test/test_log.py @@ -26,7 +26,6 @@ class FakeModelBackend(_ModelBackend): - def __init__(self): self._calls: typing.List[typing.Tuple[str, str]] = [] @@ -54,18 +53,23 @@ def logger(): class TestLogging: - @pytest.mark.parametrize("message,result", [ - ('critical', ('CRITICAL', 'critical')), - ('error', ('ERROR', 'error')), - ('warning', ('WARNING', 'warning')), - ('info', ('INFO', 'info')), - ('debug', ('DEBUG', 'debug')), - ]) - def test_default_logging(self, - backend: FakeModelBackend, - logger: logging.Logger, - message: str, - result: typing.Tuple[str, str]): + @pytest.mark.parametrize( + 'message,result', + [ + ('critical', ('CRITICAL', 'critical')), + ('error', ('ERROR', 'error')), + ('warning', ('WARNING', 'warning')), + ('info', ('INFO', 'info')), + ('debug', ('DEBUG', 'debug')), + ], + ) + def test_default_logging( + self, + backend: FakeModelBackend, + logger: logging.Logger, + message: str, + result: typing.Tuple[str, str], + ): ops.log.setup_root_logging(backend) assert logger.level == logging.DEBUG assert isinstance(logger.handlers[-1], ops.log.JujuLogHandler) @@ -97,7 +101,7 @@ def test_no_stderr_without_debug(self, backend: FakeModelBackend, logger: loggin ('WARNING', 'warning message'), ('CRITICAL', 'critical message'), ] - assert buffer.getvalue() == "" + assert buffer.getvalue() == '' def test_debug_logging(self, backend: FakeModelBackend, logger: logging.Logger): buffer = io.StringIO() @@ -114,11 +118,11 @@ def test_debug_logging(self, backend: FakeModelBackend, logger: logging.Logger): ('CRITICAL', 'critical message'), ] assert re.search( - r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d DEBUG debug message\n" - r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d INFO info message\n" - r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d WARNING warning message\n" - r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d CRITICAL critical message\n", - buffer.getvalue() + r'\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d DEBUG debug message\n' + r'\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d INFO info message\n' + r'\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d WARNING warning message\n' + r'\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d CRITICAL critical message\n', + buffer.getvalue(), ) def test_reduced_logging(self, backend: FakeModelBackend, logger: logging.Logger): @@ -146,7 +150,7 @@ def test_long_string_logging(self, backend: FakeModelBackend, logger: logging.Lo calls = backend.calls() assert len(calls) == 3 # Verify that we note that we are splitting the log message. - assert "Splitting into multiple chunks" in calls[0][1] + assert 'Splitting into multiple chunks' in calls[0][1] # Verify that it got split into the expected chunks. assert len(calls[1][1]) == MAX_LOG_LINE_LEN diff --git a/test/test_main.py b/test/test_main.py index b5bee2f72..48cfbbc46 100644 --- a/test/test_main.py +++ b/test/test_main.py @@ -38,10 +38,13 @@ # This relies on the expected repository structure to find a path to # source of the charm under test. -TEST_CHARM_DIR = Path(f"{__file__}/../charms/test_main").resolve() +TEST_CHARM_DIR = Path(f'{__file__}/../charms/test_main').resolve() VERSION_LOGLINE = [ - 'juju-log', '--log-level', 'DEBUG', '--', + 'juju-log', + '--log-level', + 'DEBUG', + '--', f'ops {ops.__version__} up and running.', ] @@ -94,11 +97,11 @@ def __init__( @patch('ops.main._emit_charm_event', new=lambda *a, **kw: None) # type: ignore @patch('ops.charm._evaluate_status', new=lambda *a, **kw: None) # type: ignore class TestCharmInit: - @patch('sys.stderr', new_callable=io.StringIO) def test_breakpoint(self, fake_stderr: io.StringIO): class MyCharm(ops.CharmBase): pass + self._check(MyCharm, extra_environ={'JUJU_DEBUG_AT': 'all'}) with patch('pdb.Pdb.set_trace') as mock: @@ -110,6 +113,7 @@ class MyCharm(ops.CharmBase): def test_no_debug_breakpoint(self): class MyCharm(ops.CharmBase): pass + self._check(MyCharm, extra_environ={'JUJU_DEBUG_AT': ''}) with patch('pdb.Pdb.set_trace') as mock: @@ -145,7 +149,6 @@ def _check( def test_init_signature_passthrough(self): class MyCharm(ops.CharmBase): - def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -155,7 +158,6 @@ def __init__(self, framework: ops.Framework): def test_init_signature_old_key_argument(self): class MyCharm(ops.CharmBase): - def __init__(self, framework: ops.Framework, somekey: typing.Any): super().__init__(framework, somekey) # type: ignore @@ -165,7 +167,6 @@ def __init__(self, framework: ops.Framework, somekey: typing.Any): def test_init_signature_only_framework(self): class MyCharm(ops.CharmBase): - def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -178,8 +179,9 @@ def test_storage_no_storage(self): with patch('ops.storage.juju_backend_available') as juju_backend_available: juju_backend_available.return_value = False with pytest.raises( - RuntimeError, - match='charm set use_juju_for_storage=True, but Juju .* does not support it'): + RuntimeError, + match='charm set use_juju_for_storage=True, but Juju .* does not support it', + ): self._check(ops.CharmBase, use_juju_for_storage=True) def test_storage_with_storage(self): @@ -197,12 +199,13 @@ def test_controller_storage_deprecated(self): self._check(ops.CharmBase, use_juju_for_storage=True) -@patch('sys.argv', new=("hooks/config-changed",)) +@patch('sys.argv', new=('hooks/config-changed',)) @patch('ops.main._Manager._setup_root_logging', new=lambda *a, **kw: None) # type: ignore @patch('ops.charm._evaluate_status', new=lambda *a, **kw: None) # type: ignore class TestDispatch: def _check(self, *, with_dispatch: bool = False, dispatch_path: str = ''): """Helper for below tests.""" + class MyCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -252,9 +255,7 @@ def test_with_dispatch_path_but_no_dispatch(self): assert event == 'foo' -_event_test = typing.List[typing.Tuple[ - EventSpec, - typing.Dict[str, typing.Union[str, int, None]]]] +_event_test = typing.List[typing.Tuple[EventSpec, typing.Dict[str, typing.Union[str, int, None]]]] @pytest.fixture @@ -263,7 +264,6 @@ def fake_script(request: pytest.FixtureRequest): class _TestMain(abc.ABC): - @abc.abstractmethod def _setup_entry_point(self, directory: Path, entry_point: str): """Set up the given entry point in the given directory. @@ -285,7 +285,7 @@ def _call_event( return NotImplemented @abc.abstractmethod - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_setup_event_links(self): """Test auto-creation of symlinks caused by initial events. @@ -302,10 +302,12 @@ def setup_charm(self, request: pytest.FixtureRequest, fake_script: FakeScript): # We use a subclass temporarily to prevent these side effects from leaking. class TestCharmEvents(ops.CharmEvents): pass + ops.CharmBase.on = TestCharmEvents() # type: ignore def cleanup(): ops.CharmBase.on = ops.CharmEvents() # type: ignore + request.addfinalizer(cleanup) fake_script.write('is-leader', 'echo true') @@ -330,7 +332,7 @@ def cleanup(): self.charm_exec_path = os.path.relpath(charm_path, str(self.hooks_dir)) shutil.copytree(str(TEST_CHARM_DIR), str(self.JUJU_CHARM_DIR)) - charm_spec = importlib.util.spec_from_file_location("charm", charm_path) + charm_spec = importlib.util.spec_from_file_location('charm', charm_path) assert charm_spec is not None self.charm_module = importlib.util.module_from_spec(charm_spec) assert charm_spec.loader is not None @@ -360,14 +362,13 @@ def _read_and_clear_state( if self._charm_state_file.stat().st_size: storage = SQLiteStorage(self._charm_state_file) with (self.JUJU_CHARM_DIR / 'metadata.yaml').open() as m: - af = (self.JUJU_CHARM_DIR / 'actions.yaml') + af = self.JUJU_CHARM_DIR / 'actions.yaml' if af.exists(): with af.open() as a: meta = ops.CharmMeta.from_yaml(m, a) else: meta = ops.CharmMeta.from_yaml(m) - framework = ops.Framework(storage, self.JUJU_CHARM_DIR, meta, - None, event_name) # type: ignore + framework = ops.Framework(storage, self.JUJU_CHARM_DIR, meta, None, event_name) # type: ignore class ThisCharmEvents(MyCharmEvents): pass @@ -452,7 +453,7 @@ def _simulate_event(self, fake_script: FakeScript, event_spec: EventSpec): 'JUJU_NOTICE_KEY': event_spec.notice_key, }) if issubclass(event_spec.event_type, ops.ActionEvent): - event_filename = event_spec.event_name[:-len('_action')].replace('_', '-') + event_filename = event_spec.event_name[: -len('_action')].replace('_', '-') assert event_spec.env_var is not None env.update({ event_spec.env_var: event_filename, @@ -470,7 +471,7 @@ def _simulate_event(self, fake_script: FakeScript, event_spec: EventSpec): self._call_event(fake_script, Path(event_dir, event_filename), env) return self._read_and_clear_state(event_spec.event_name) - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_event_reemitted(self, fake_script: FakeScript): # First run "install" to make sure all hooks are set up. state = self._simulate_event(fake_script, EventSpec(ops.InstallEvent, 'install')) @@ -478,22 +479,19 @@ def test_event_reemitted(self, fake_script: FakeScript): assert list(state.observed_event_types) == ['InstallEvent'] state = self._simulate_event( - fake_script, - EventSpec(ops.ConfigChangedEvent, 'config-changed') + fake_script, EventSpec(ops.ConfigChangedEvent, 'config-changed') ) assert isinstance(state, ops.BoundStoredState) assert list(state.observed_event_types) == ['ConfigChangedEvent'] # Re-emit should pick the deferred config-changed. state = self._simulate_event( - fake_script, - EventSpec(ops.UpdateStatusEvent, 'update-status') + fake_script, EventSpec(ops.UpdateStatusEvent, 'update-status') ) assert isinstance(state, ops.BoundStoredState) - assert list(state.observed_event_types) == \ - ['ConfigChangedEvent', 'UpdateStatusEvent'] + assert list(state.observed_event_types) == ['ConfigChangedEvent', 'UpdateStatusEvent'] - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_no_reemission_on_collect_metrics(self, fake_script: FakeScript): fake_script.write('add-metric', 'exit 0') @@ -503,8 +501,7 @@ def test_no_reemission_on_collect_metrics(self, fake_script: FakeScript): assert list(state.observed_event_types) == ['InstallEvent'] state = self._simulate_event( - fake_script, - EventSpec(ops.ConfigChangedEvent, 'config-changed') + fake_script, EventSpec(ops.ConfigChangedEvent, 'config-changed') ) assert isinstance(state, ops.BoundStoredState) assert list(state.observed_event_types) == ['ConfigChangedEvent'] @@ -512,13 +509,12 @@ def test_no_reemission_on_collect_metrics(self, fake_script: FakeScript): # Re-emit should not pick the deferred config-changed because # collect-metrics runs in a restricted context. state = self._simulate_event( - fake_script, - EventSpec(ops.CollectMetricsEvent, 'collect-metrics') + fake_script, EventSpec(ops.CollectMetricsEvent, 'collect-metrics') ) assert isinstance(state, ops.BoundStoredState) assert list(state.observed_event_types) == ['CollectMetricsEvent'] - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_multiple_events_handled(self, fake_script: FakeScript): self._prepare_actions() @@ -526,141 +522,207 @@ def test_multiple_events_handled(self, fake_script: FakeScript): # Sample events with a different amount of dashes used # and with endpoints from different sections of metadata.yaml - events_under_test: _event_test = [( - EventSpec(ops.InstallEvent, 'install'), - {}, - ), ( - EventSpec(ops.StartEvent, 'start'), - {}, - ), ( - EventSpec(ops.UpdateStatusEvent, 'update_status'), - {}, - ), ( - EventSpec(ops.LeaderSettingsChangedEvent, 'leader_settings_changed'), - {}, - ), ( - EventSpec(ops.RelationJoinedEvent, 'db_relation_joined', - relation_id=1, - remote_app='remote', - remote_unit='remote/0'), - {'relation_name': 'db', - 'relation_id': 1, - 'app_name': 'remote', - 'unit_name': 'remote/0'}, - ), ( - EventSpec(ops.RelationChangedEvent, 'mon_relation_changed', - relation_id=2, - remote_app='remote', - remote_unit='remote/0'), - {'relation_name': 'mon', - 'relation_id': 2, - 'app_name': 'remote', - 'unit_name': 'remote/0'}, - ), ( - EventSpec(ops.RelationChangedEvent, 'mon_relation_changed', - relation_id=2, - remote_app='remote', - remote_unit=None), - {'relation_name': 'mon', - 'relation_id': 2, - 'app_name': 'remote'}, - ), ( - EventSpec(ops.RelationDepartedEvent, 'mon_relation_departed', - relation_id=2, - remote_app='remote', - remote_unit='remote/0', - departing_unit_name='remote/42'), - {'relation_name': 'mon', - 'relation_id': 2, - 'app_name': 'remote', - 'unit_name': 'remote/0', - 'departing_unit': 'remote/42'}, - ), ( - EventSpec(ops.RelationBrokenEvent, 'ha_relation_broken', - relation_id=3), - {'relation_name': 'ha', - 'relation_id': 3}, - ), ( - # Events without a remote app specified (for Juju < 2.7). - EventSpec(ops.RelationJoinedEvent, 'db_relation_joined', - relation_id=1, - remote_unit='remote/0'), - {'relation_name': 'db', - 'relation_id': 1, - 'app_name': 'remote', - 'unit_name': 'remote/0'}, - ), ( - EventSpec(ops.RelationChangedEvent, 'mon_relation_changed', - relation_id=2, - remote_unit='remote/0'), - {'relation_name': 'mon', - 'relation_id': 2, - 'app_name': 'remote', - 'unit_name': 'remote/0'}, - ), ( - EventSpec(ops.RelationDepartedEvent, 'mon_relation_departed', - relation_id=2, - remote_unit='remote/0', - departing_unit_name='remote/42'), - {'relation_name': 'mon', - 'relation_id': 2, - 'app_name': 'remote', - 'unit_name': 'remote/0', - 'departing_unit': 'remote/42'}, - ), ( - EventSpec(ops.ActionEvent, 'start_action', - env_var='JUJU_ACTION_NAME', - set_in_env={'JUJU_ACTION_UUID': '1'}), - {}, - ), ( - EventSpec(ops.ActionEvent, 'foo_bar_action', - env_var='JUJU_ACTION_NAME', - set_in_env={'JUJU_ACTION_UUID': '2'}), - {}, - ), ( - EventSpec(ops.PebbleReadyEvent, 'test_pebble_ready', - workload_name='test'), - {'container_name': 'test'}, - ), ( - EventSpec(ops.PebbleCustomNoticeEvent, 'test_pebble_custom_notice', - workload_name='test', - notice_id='123', - notice_type='custom', - notice_key='example.com/a'), - {'container_name': 'test', - 'notice_id': '123', - 'notice_type': 'custom', - 'notice_key': 'example.com/a'}, - ), ( - EventSpec(ops.SecretChangedEvent, 'secret_changed', - secret_id='secret:12345', - secret_label='foo'), - {'id': 'secret:12345', - 'label': 'foo'} - ), ( - EventSpec(ops.SecretRotateEvent, 'secret_rotate', - secret_id='secret:12345', - secret_label='foo', - secret_revision='42'), - {'id': 'secret:12345', - 'label': 'foo'} - ), ( - EventSpec(ops.SecretRemoveEvent, 'secret_remove', - secret_id='secret:12345', - secret_label='foo', - secret_revision='42'), - {'id': 'secret:12345', - 'label': 'foo', - 'revision': 42} - ), ( - EventSpec(ops.SecretExpiredEvent, 'secret_expired', - secret_id='secret:12345', - secret_label='foo', - secret_revision='42'), - {'id': 'secret:12345', - 'label': 'foo', - 'revision': 42} - )] + events_under_test: _event_test = [ + ( + EventSpec(ops.InstallEvent, 'install'), + {}, + ), + ( + EventSpec(ops.StartEvent, 'start'), + {}, + ), + ( + EventSpec(ops.UpdateStatusEvent, 'update_status'), + {}, + ), + ( + EventSpec(ops.LeaderSettingsChangedEvent, 'leader_settings_changed'), + {}, + ), + ( + EventSpec( + ops.RelationJoinedEvent, + 'db_relation_joined', + relation_id=1, + remote_app='remote', + remote_unit='remote/0', + ), + { + 'relation_name': 'db', + 'relation_id': 1, + 'app_name': 'remote', + 'unit_name': 'remote/0', + }, + ), + ( + EventSpec( + ops.RelationChangedEvent, + 'mon_relation_changed', + relation_id=2, + remote_app='remote', + remote_unit='remote/0', + ), + { + 'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0', + }, + ), + ( + EventSpec( + ops.RelationChangedEvent, + 'mon_relation_changed', + relation_id=2, + remote_app='remote', + remote_unit=None, + ), + {'relation_name': 'mon', 'relation_id': 2, 'app_name': 'remote'}, + ), + ( + EventSpec( + ops.RelationDepartedEvent, + 'mon_relation_departed', + relation_id=2, + remote_app='remote', + remote_unit='remote/0', + departing_unit_name='remote/42', + ), + { + 'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0', + 'departing_unit': 'remote/42', + }, + ), + ( + EventSpec(ops.RelationBrokenEvent, 'ha_relation_broken', relation_id=3), + {'relation_name': 'ha', 'relation_id': 3}, + ), + ( + # Events without a remote app specified (for Juju < 2.7). + EventSpec( + ops.RelationJoinedEvent, + 'db_relation_joined', + relation_id=1, + remote_unit='remote/0', + ), + { + 'relation_name': 'db', + 'relation_id': 1, + 'app_name': 'remote', + 'unit_name': 'remote/0', + }, + ), + ( + EventSpec( + ops.RelationChangedEvent, + 'mon_relation_changed', + relation_id=2, + remote_unit='remote/0', + ), + { + 'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0', + }, + ), + ( + EventSpec( + ops.RelationDepartedEvent, + 'mon_relation_departed', + relation_id=2, + remote_unit='remote/0', + departing_unit_name='remote/42', + ), + { + 'relation_name': 'mon', + 'relation_id': 2, + 'app_name': 'remote', + 'unit_name': 'remote/0', + 'departing_unit': 'remote/42', + }, + ), + ( + EventSpec( + ops.ActionEvent, + 'start_action', + env_var='JUJU_ACTION_NAME', + set_in_env={'JUJU_ACTION_UUID': '1'}, + ), + {}, + ), + ( + EventSpec( + ops.ActionEvent, + 'foo_bar_action', + env_var='JUJU_ACTION_NAME', + set_in_env={'JUJU_ACTION_UUID': '2'}, + ), + {}, + ), + ( + EventSpec(ops.PebbleReadyEvent, 'test_pebble_ready', workload_name='test'), + {'container_name': 'test'}, + ), + ( + EventSpec( + ops.PebbleCustomNoticeEvent, + 'test_pebble_custom_notice', + workload_name='test', + notice_id='123', + notice_type='custom', + notice_key='example.com/a', + ), + { + 'container_name': 'test', + 'notice_id': '123', + 'notice_type': 'custom', + 'notice_key': 'example.com/a', + }, + ), + ( + EventSpec( + ops.SecretChangedEvent, + 'secret_changed', + secret_id='secret:12345', + secret_label='foo', + ), + {'id': 'secret:12345', 'label': 'foo'}, + ), + ( + EventSpec( + ops.SecretRotateEvent, + 'secret_rotate', + secret_id='secret:12345', + secret_label='foo', + secret_revision='42', + ), + {'id': 'secret:12345', 'label': 'foo'}, + ), + ( + EventSpec( + ops.SecretRemoveEvent, + 'secret_remove', + secret_id='secret:12345', + secret_label='foo', + secret_revision='42', + ), + {'id': 'secret:12345', 'label': 'foo', 'revision': 42}, + ), + ( + EventSpec( + ops.SecretExpiredEvent, + 'secret_expired', + secret_id='secret:12345', + secret_label='foo', + secret_revision='42', + ), + {'id': 'secret:12345', 'label': 'foo', 'revision': 42}, + ), + ] logger.debug('Expected events %s', events_under_test) @@ -672,7 +734,7 @@ def test_multiple_events_handled(self, fake_script: FakeScript): state = self._simulate_event(fake_script, event_spec) assert isinstance(state, ops.BoundStoredState) - state_key = f"on_{event_spec.event_name}" + state_key = f'on_{event_spec.event_name}' handled_events = getattr(state, state_key, []) # Make sure that a handler for that event was called once. @@ -684,10 +746,9 @@ def test_multiple_events_handled(self, fake_script: FakeScript): assert list(state.observed_event_types) == [event_spec.event_type.__name__] if expected_event_data: - assert getattr(state, f"{event_spec.event_name}_data") == \ - expected_event_data + assert getattr(state, f'{event_spec.event_name}_data') == expected_event_data - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_event_not_implemented(self, fake_script: FakeScript): """Make sure events without implementation do not cause non-zero exit.""" # Simulate a scenario where there is a symlink for an event that @@ -697,34 +758,30 @@ def test_event_not_implemented(self, fake_script: FakeScript): hook_path.symlink_to('install') try: - self._simulate_event( - fake_script, - EventSpec(ops.HookEvent, 'not-implemented-event') - ) + self._simulate_event(fake_script, EventSpec(ops.HookEvent, 'not-implemented-event')) except subprocess.CalledProcessError: - pytest.fail('Event simulation for an unsupported event' - ' results in a non-zero exit code returned') + pytest.fail( + 'Event simulation for an unsupported event' + ' results in a non-zero exit code returned' + ) - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_no_actions(self, fake_script: FakeScript): (self.JUJU_CHARM_DIR / 'actions.yaml').unlink() self._simulate_event(fake_script, EventSpec(ops.InstallEvent, 'install')) - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_empty_actions(self, fake_script: FakeScript): (self.JUJU_CHARM_DIR / 'actions.yaml').write_text('') self._simulate_event(fake_script, EventSpec(ops.InstallEvent, 'install')) - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_collect_metrics(self, fake_script: FakeScript): fake_script.write('add-metric', 'exit 0') self._simulate_event(fake_script, EventSpec(ops.InstallEvent, 'install')) # Clear the calls during 'install' fake_script.calls(clear=True) - self._simulate_event( - fake_script, - EventSpec(ops.CollectMetricsEvent, 'collect_metrics') - ) + self._simulate_event(fake_script, EventSpec(ops.CollectMetricsEvent, 'collect_metrics')) expected = [ VERSION_LOGLINE, @@ -736,7 +793,7 @@ def test_collect_metrics(self, fake_script: FakeScript): assert calls == expected - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_custom_event(self, fake_script: FakeScript): self._simulate_event(fake_script, EventSpec(ops.InstallEvent, 'install')) # Clear the calls during 'install' @@ -744,10 +801,8 @@ def test_custom_event(self, fake_script: FakeScript): self._simulate_event( fake_script, EventSpec( - ops.UpdateStatusEvent, - 'update-status', - set_in_env={'EMIT_CUSTOM_EVENT': "1"} - ) + ops.UpdateStatusEvent, 'update-status', set_in_env={'EMIT_CUSTOM_EVENT': '1'} + ), ) calls = fake_script.calls() @@ -764,27 +819,48 @@ def test_custom_event(self, fake_script: FakeScript): calls[2][-1] = custom_event_prefix assert calls == expected - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_logger(self, fake_script: FakeScript): fake_script.write('action-get', "echo '{}'") - test_cases = [( - EventSpec(ops.ActionEvent, 'log_critical_action', env_var='JUJU_ACTION_NAME', - set_in_env={'JUJU_ACTION_UUID': '1'}), - ['juju-log', '--log-level', 'CRITICAL', '--', 'super critical'], - ), ( - EventSpec(ops.ActionEvent, 'log_error_action', - env_var='JUJU_ACTION_NAME', set_in_env={'JUJU_ACTION_UUID': '2'}), - ['juju-log', '--log-level', 'ERROR', '--', 'grave error'], - ), ( - EventSpec(ops.ActionEvent, 'log_warning_action', - env_var='JUJU_ACTION_NAME', set_in_env={'JUJU_ACTION_UUID': '3'}), - ['juju-log', '--log-level', 'WARNING', '--', 'wise warning'], - ), ( - EventSpec(ops.ActionEvent, 'log_info_action', - env_var='JUJU_ACTION_NAME', set_in_env={'JUJU_ACTION_UUID': '4'}), - ['juju-log', '--log-level', 'INFO', '--', 'useful info'], - )] + test_cases = [ + ( + EventSpec( + ops.ActionEvent, + 'log_critical_action', + env_var='JUJU_ACTION_NAME', + set_in_env={'JUJU_ACTION_UUID': '1'}, + ), + ['juju-log', '--log-level', 'CRITICAL', '--', 'super critical'], + ), + ( + EventSpec( + ops.ActionEvent, + 'log_error_action', + env_var='JUJU_ACTION_NAME', + set_in_env={'JUJU_ACTION_UUID': '2'}, + ), + ['juju-log', '--log-level', 'ERROR', '--', 'grave error'], + ), + ( + EventSpec( + ops.ActionEvent, + 'log_warning_action', + env_var='JUJU_ACTION_NAME', + set_in_env={'JUJU_ACTION_UUID': '3'}, + ), + ['juju-log', '--log-level', 'WARNING', '--', 'wise warning'], + ), + ( + EventSpec( + ops.ActionEvent, + 'log_info_action', + env_var='JUJU_ACTION_NAME', + set_in_env={'JUJU_ACTION_UUID': '4'}, + ), + ['juju-log', '--log-level', 'INFO', '--', 'useful info'], + ), + ] # Set up action symlinks. self._simulate_event(fake_script, EventSpec(ops.InstallEvent, 'install')) @@ -793,12 +869,12 @@ def test_logger(self, fake_script: FakeScript): self._simulate_event(fake_script, event_spec) assert calls in fake_script.calls(clear=True) - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_excepthook(self, fake_script: FakeScript): with pytest.raises(subprocess.CalledProcessError): self._simulate_event( fake_script, - EventSpec(ops.InstallEvent, 'install', set_in_env={'TRY_EXCEPTHOOK': '1'}) + EventSpec(ops.InstallEvent, 'install', set_in_env={'TRY_EXCEPTHOOK': '1'}), ) calls = [' '.join(i) for i in fake_script.calls()] @@ -806,16 +882,17 @@ def test_excepthook(self, fake_script: FakeScript): assert calls.pop(0) == ' '.join(VERSION_LOGLINE) assert re.search('Using local storage: not a Kubernetes podspec charm', calls.pop(0)) assert re.search('Initializing SQLite local storage: ', calls.pop(0)) - assert re.search( '(?ms)juju-log --log-level ERROR -- Uncaught exception while in charm code:\n' 'Traceback .most recent call last.:\n' ' .*' - ' raise RuntimeError."failing as requested".\n' - 'RuntimeError: failing as requested', calls[0]) - assert len(calls) == 1, f"expected 1 call, but got extra: {calls[1:]}" + " raise RuntimeError.'failing as requested'.\n" + 'RuntimeError: failing as requested', + calls[0], + ) + assert len(calls) == 1, f'expected 1 call, but got extra: {calls[1:]}' - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_sets_model_name(self, fake_script: FakeScript): self._prepare_actions() @@ -827,33 +904,42 @@ def test_sets_model_name(self, fake_script: FakeScript): 'get_model_name_action', env_var='JUJU_ACTION_NAME', model_name='test-model-name', - set_in_env={'JUJU_ACTION_UUID': '1'} - ) + set_in_env={'JUJU_ACTION_UUID': '1'}, + ), ) assert isinstance(state, ops.BoundStoredState) assert state._on_get_model_name_action == ['test-model-name'] - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_has_valid_status(self, fake_script: FakeScript): self._prepare_actions() fake_script.write('action-get', "echo '{}'") - fake_script.write('status-get', - """echo '{"status": "unknown", "message": ""}'""") - state = self._simulate_event(fake_script, EventSpec( - ops.ActionEvent, 'get_status_action', - env_var='JUJU_ACTION_NAME', - set_in_env={'JUJU_ACTION_UUID': '1'})) + fake_script.write('status-get', """echo '{"status": "unknown", "message": ""}'""") + state = self._simulate_event( + fake_script, + EventSpec( + ops.ActionEvent, + 'get_status_action', + env_var='JUJU_ACTION_NAME', + set_in_env={'JUJU_ACTION_UUID': '1'}, + ), + ) assert isinstance(state, ops.BoundStoredState) assert state.status_name == 'unknown' assert state.status_message == '' fake_script.write( - 'status-get', - """echo '{"status": "blocked", "message": "help meeee"}'""") - state = self._simulate_event(fake_script, EventSpec( - ops.ActionEvent, 'get_status_action', - env_var='JUJU_ACTION_NAME', - set_in_env={'JUJU_ACTION_UUID': '1'})) + 'status-get', """echo '{"status": "blocked", "message": "help meeee"}'""" + ) + state = self._simulate_event( + fake_script, + EventSpec( + ops.ActionEvent, + 'get_status_action', + env_var='JUJU_ACTION_NAME', + set_in_env={'JUJU_ACTION_UUID': '1'}, + ), + ) assert isinstance(state, ops.BoundStoredState) assert state.status_name == 'blocked' assert state.status_message == 'help meeee' @@ -878,7 +964,7 @@ def _call_event( # Note that sys.executable is used to make sure we are using the same # interpreter for the child process to support virtual environments. fake_script.write( - "storage-get", + 'storage-get', """ if [ "$1" = "-s" ]; then id=${2#*/} @@ -910,16 +996,16 @@ def _call_event( """, ) fake_script.write( - "storage-list", + 'storage-list', """ echo '["disks/0"]' """, ) subprocess.run( - [sys.executable, str(event_file)], - check=True, env=env, cwd=str(self.JUJU_CHARM_DIR)) + [sys.executable, str(event_file)], check=True, env=env, cwd=str(self.JUJU_CHARM_DIR) + ) - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_setup_event_links( self, request: pytest.FixtureRequest, @@ -938,13 +1024,13 @@ def test_setup_event_links( EventSpec(ops.StartEvent, 'start'), EventSpec(ops.UpgradeCharmEvent, 'upgrade-charm'), } - initial_hooks = {f"hooks/{ev.event_name}" for ev in initial_events} + initial_hooks = {f'hooks/{ev.event_name}' for ev in initial_events} def _assess_event_links(event_spec: EventSpec): assert self.hooks_dir / event_spec.event_name in self.hooks_dir.iterdir() for event_hook in all_event_hooks: hook_path = self.JUJU_CHARM_DIR / event_hook - assert hook_path.exists(), f"Missing hook: {event_hook}" + assert hook_path.exists(), f'Missing hook: {event_hook}' if self.hooks_are_symlinks: assert hook_path.is_symlink() assert os.readlink(str(hook_path)) == self.charm_exec_path @@ -965,7 +1051,7 @@ def _assess_event_links(event_spec: EventSpec): self._simulate_event(fake_script, initial_event) _assess_event_links(initial_event) - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_setup_action_links(self, fake_script: FakeScript): self._simulate_event(fake_script, EventSpec(ops.InstallEvent, 'install')) # foo-bar is one of the actions defined in actions.yaml @@ -1008,7 +1094,7 @@ def _setup_entry_point(self, directory: Path, entry_point: str): class _TestMainWithDispatch(_TestMain): has_dispatch = True - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_setup_event_links( self, request: pytest.FixtureRequest, @@ -1018,8 +1104,9 @@ def test_setup_event_links( Symlink creation caused by initial events should _not_ happen when using dispatch. """ - all_event_hooks = [f"hooks/{e.replace('_', '-')}" - for e in self.charm_module.Charm.on.events()] + all_event_hooks = [ + f"hooks/{e.replace('_', '-')}" for e in self.charm_module.Charm.on.events() + ] initial_events = { EventSpec(ops.InstallEvent, 'install'), EventSpec(ops.StorageAttachedEvent, 'disks-storage-attached'), @@ -1030,8 +1117,9 @@ def test_setup_event_links( def _assess_event_links(event_spec: EventSpec): assert self.hooks_dir / event_spec.event_name not in self.hooks_dir.iterdir() for event_hook in all_event_hooks: - assert not (self.JUJU_CHARM_DIR / event_hook).exists(), \ - f"Spurious hook: {event_hook}" + assert not ( + self.JUJU_CHARM_DIR / event_hook + ).exists(), f'Spurious hook: {event_hook}' for initial_event in initial_events: self._setup_charm_dir(request) @@ -1039,7 +1127,7 @@ def _assess_event_links(event_spec: EventSpec): self._simulate_event(fake_script, initial_event) _assess_event_links(initial_event) - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_hook_and_dispatch( self, request: pytest.FixtureRequest, @@ -1057,23 +1145,25 @@ def test_hook_and_dispatch( hook = Path('hooks/install') expected = [ VERSION_LOGLINE, - ['juju-log', '--log-level', 'INFO', '--', - f'Running legacy {hook}.'], - ['juju-log', '--log-level', 'DEBUG', '--', - f'Legacy {hook} exited with status 0.'], - ['juju-log', '--log-level', 'DEBUG', '--', - 'Using local storage: not a Kubernetes podspec charm'], - ['juju-log', '--log-level', 'DEBUG', '--', - 'Emitting Juju event install.'], + ['juju-log', '--log-level', 'INFO', '--', f'Running legacy {hook}.'], + ['juju-log', '--log-level', 'DEBUG', '--', f'Legacy {hook} exited with status 0.'], + [ + 'juju-log', + '--log-level', + 'DEBUG', + '--', + 'Using local storage: not a Kubernetes podspec charm', + ], + ['juju-log', '--log-level', 'DEBUG', '--', 'Emitting Juju event install.'], ['is-leader', '--format=json'], ] calls = fake_script.calls() assert re.search('Initializing SQLite local storage: ', ' '.join(calls.pop(-3))) assert calls == expected - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_non_executable_hook_and_dispatch(self, fake_script: FakeScript): - (self.hooks_dir / "install").write_text("") + (self.hooks_dir / 'install').write_text('') state = self._simulate_event(fake_script, EventSpec(ops.InstallEvent, 'install')) assert isinstance(state, ops.BoundStoredState) @@ -1081,19 +1171,28 @@ def test_non_executable_hook_and_dispatch(self, fake_script: FakeScript): expected = [ VERSION_LOGLINE, - ['juju-log', '--log-level', 'WARNING', '--', - 'Legacy hooks/install exists but is not executable.'], - ['juju-log', '--log-level', 'DEBUG', '--', - 'Using local storage: not a Kubernetes podspec charm'], - ['juju-log', '--log-level', 'DEBUG', '--', - 'Emitting Juju event install.'], + [ + 'juju-log', + '--log-level', + 'WARNING', + '--', + 'Legacy hooks/install exists but is not executable.', + ], + [ + 'juju-log', + '--log-level', + 'DEBUG', + '--', + 'Using local storage: not a Kubernetes podspec charm', + ], + ['juju-log', '--log-level', 'DEBUG', '--', 'Emitting Juju event install.'], ['is-leader', '--format=json'], ] calls = fake_script.calls() assert re.search('Initializing SQLite local storage: ', ' '.join(calls.pop(-3))) assert calls == expected - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_hook_and_dispatch_with_failing_hook( self, request: pytest.FixtureRequest, @@ -1117,16 +1216,15 @@ def test_hook_and_dispatch_with_failing_hook( expected = [ VERSION_LOGLINE, ['juju-log', '--log-level', 'INFO', '--', f'Running legacy {hook}.'], - ['juju-log', '--log-level', 'WARNING', '--', - f'Legacy {hook} exited with status 42.'], + ['juju-log', '--log-level', 'WARNING', '--', f'Legacy {hook} exited with status 42.'], ] assert calls == expected - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_hook_and_dispatch_but_hook_is_dispatch(self, fake_script: FakeScript): event = EventSpec(ops.InstallEvent, 'install') hook_path = self.hooks_dir / 'install' - for ((rel, ind), path) in { + for (rel, ind), path in { # relative and indirect (True, True): Path('../dispatch'), # relative and direct @@ -1151,7 +1249,7 @@ def test_hook_and_dispatch_but_hook_is_dispatch(self, fake_script: FakeScript): finally: hook_path.unlink() - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_hook_and_dispatch_but_hook_is_dispatch_copy(self, fake_script: FakeScript): hook_path = self.hooks_dir / 'install' path = (self.hooks_dir / self.charm_exec_path).resolve() @@ -1167,17 +1265,18 @@ def test_hook_and_dispatch_but_hook_is_dispatch_copy(self, fake_script: FakeScri hook = Path('hooks/install') expected = [ VERSION_LOGLINE, - ['juju-log', '--log-level', 'INFO', '--', - f'Running legacy {hook}.'], - VERSION_LOGLINE, # because it called itself - ['juju-log', '--log-level', 'DEBUG', '--', - f'Charm called itself via {hook}.'], - ['juju-log', '--log-level', 'DEBUG', '--', - f'Legacy {hook} exited with status 0.'], - ['juju-log', '--log-level', 'DEBUG', '--', - 'Using local storage: not a Kubernetes podspec charm'], - ['juju-log', '--log-level', 'DEBUG', '--', - 'Emitting Juju event install.'], + ['juju-log', '--log-level', 'INFO', '--', f'Running legacy {hook}.'], + VERSION_LOGLINE, # because it called itself + ['juju-log', '--log-level', 'DEBUG', '--', f'Charm called itself via {hook}.'], + ['juju-log', '--log-level', 'DEBUG', '--', f'Legacy {hook} exited with status 0.'], + [ + 'juju-log', + '--log-level', + 'DEBUG', + '--', + 'Using local storage: not a Kubernetes podspec charm', + ], + ['juju-log', '--log-level', 'DEBUG', '--', 'Emitting Juju event install.'], ['is-leader', '--format=json'], ] calls = fake_script.calls() @@ -1202,7 +1301,7 @@ def _call_event( env['JUJU_VERSION'] = '2.8.0' dispatch = self.JUJU_CHARM_DIR / 'dispatch' fake_script.write( - "storage-get", + 'storage-get', """ if [ "$1" = "-s" ]; then id=${2#*/} @@ -1234,7 +1333,7 @@ def _call_event( """, ) fake_script.write( - "storage-list", + 'storage-list', """ echo '["disks/0"]' """, @@ -1243,19 +1342,27 @@ def _call_event( [sys.executable, str(dispatch)], stdout=self.stdout, stderr=self.stderr, - check=True, env=env, cwd=str(self.JUJU_CHARM_DIR)) + check=True, + env=env, + cwd=str(self.JUJU_CHARM_DIR), + ) - @pytest.mark.usefixtures("setup_charm") + @pytest.mark.usefixtures('setup_charm') def test_crash_action(self, request: pytest.FixtureRequest, fake_script: FakeScript): self._prepare_actions() self.stderr = tempfile.TemporaryFile('w+t') request.addfinalizer(self.stderr.close) fake_script.write('action-get', "echo '{}'") with pytest.raises(subprocess.CalledProcessError): - self._simulate_event(fake_script, EventSpec( - ops.ActionEvent, 'keyerror_action', - env_var='JUJU_ACTION_NAME', - set_in_env={'JUJU_ACTION_UUID': '1'})) + self._simulate_event( + fake_script, + EventSpec( + ops.ActionEvent, + 'keyerror_action', + env_var='JUJU_ACTION_NAME', + set_in_env={'JUJU_ACTION_UUID': '1'}, + ), + ) self.stderr.seek(0) stderr = self.stderr.read() assert 'KeyError' in stderr @@ -1268,11 +1375,13 @@ class TestMainWithDispatchAsScript(_TestMainWithDispatch): has_dispatch = True def _setup_entry_point(self, directory: Path, entry_point: str): - path = (self.JUJU_CHARM_DIR / 'dispatch') + path = self.JUJU_CHARM_DIR / 'dispatch' if not path.exists(): - path.write_text('#!/bin/sh\nexec "{}" "{}"\n'.format( - sys.executable, - self.JUJU_CHARM_DIR / 'src/charm.py')) + path.write_text( + '#!/bin/sh\nexec "{}" "{}"\n'.format( + sys.executable, self.JUJU_CHARM_DIR / 'src/charm.py' + ) + ) path.chmod(0o755) def _call_event( @@ -1284,7 +1393,7 @@ def _call_event( env['JUJU_DISPATCH_PATH'] = str(rel_path) env['JUJU_VERSION'] = '2.8.0' fake_script.write( - "storage-get", + 'storage-get', """ if [ "$1" = "-s" ]; then id=${2#*/} @@ -1316,32 +1425,32 @@ def _call_event( """, ) fake_script.write( - "storage-list", + 'storage-list', """ echo '["disks/0"]' """, ) - dispatch = (self.JUJU_CHARM_DIR / 'dispatch') + dispatch = self.JUJU_CHARM_DIR / 'dispatch' subprocess.check_call([str(dispatch)], env=env, cwd=str(self.JUJU_CHARM_DIR)) class TestStorageHeuristics: def test_fallback_to_current_juju_version__too_old(self): - meta = ops.CharmMeta.from_yaml("series: [kubernetes]") - with patch.dict(os.environ, {"JUJU_VERSION": "1.0"}): - assert not _should_use_controller_storage(Path("/xyzzy"), meta) + meta = ops.CharmMeta.from_yaml('series: [kubernetes]') + with patch.dict(os.environ, {'JUJU_VERSION': '1.0'}): + assert not _should_use_controller_storage(Path('/xyzzy'), meta) def test_fallback_to_current_juju_version__new_enough(self): - meta = ops.CharmMeta.from_yaml("series: [kubernetes]") - with patch.dict(os.environ, {"JUJU_VERSION": "2.8"}): - assert _should_use_controller_storage(Path("/xyzzy"), meta) + meta = ops.CharmMeta.from_yaml('series: [kubernetes]') + with patch.dict(os.environ, {'JUJU_VERSION': '2.8'}): + assert _should_use_controller_storage(Path('/xyzzy'), meta) def test_not_if_not_in_k8s(self): - meta = ops.CharmMeta.from_yaml("series: [ecs]") - with patch.dict(os.environ, {"JUJU_VERSION": "2.8"}): - assert not _should_use_controller_storage(Path("/xyzzy"), meta) + meta = ops.CharmMeta.from_yaml('series: [ecs]') + with patch.dict(os.environ, {'JUJU_VERSION': '2.8'}): + assert not _should_use_controller_storage(Path('/xyzzy'), meta) def test_not_if_already_local(self): - meta = ops.CharmMeta.from_yaml("series: [kubernetes]") - with patch.dict(os.environ, {"JUJU_VERSION": "2.8"}), tempfile.NamedTemporaryFile() as fd: + meta = ops.CharmMeta.from_yaml('series: [kubernetes]') + with patch.dict(os.environ, {'JUJU_VERSION': '2.8'}), tempfile.NamedTemporaryFile() as fd: assert not _should_use_controller_storage(Path(fd.name), meta) diff --git a/test/test_model.py b/test/test_model.py index 0373a44b2..5c9435b12 100644 --- a/test/test_model.py +++ b/test/test_model.py @@ -23,7 +23,6 @@ import typing import unittest from collections import OrderedDict -from test.test_helpers import FakeScript from textwrap import dedent from unittest.mock import MagicMock, patch @@ -34,6 +33,7 @@ from ops import pebble from ops._private import yaml from ops.model import _ModelBackend +from test.test_helpers import FakeScript @pytest.fixture @@ -44,7 +44,9 @@ def fake_script(request: pytest.FixtureRequest) -> FakeScript: class TestModel: @pytest.fixture def harness(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: myapp provides: db0: @@ -58,7 +60,8 @@ def harness(self): resources: foo: {type: file, filename: foo.txt} bar: {type: file, filename: bar.txt} - ''', config=''' + """, + config=""" options: foo: type: string @@ -70,15 +73,17 @@ def harness(self): type: float secretfoo: type: secret - ''') + """, + ) yield harness harness.cleanup() def ensure_relation( - self, - harness: ops.testing.Harness[ops.CharmBase], - name: str = 'db1', - relation_id: typing.Optional[int] = None) -> ops.Relation: + self, + harness: ops.testing.Harness[ops.CharmBase], + name: str = 'db1', + relation_id: typing.Optional[int] = None, + ) -> ops.Relation: """Wrapper around harness.model.get_relation that enforces that None is not returned.""" rel_db1 = harness.model.get_relation(name, relation_id) assert rel_db1 is not None @@ -102,7 +107,7 @@ def test_model_name_from_backend(self, harness: ops.testing.Harness[ops.CharmBas m = ops.Model(ops.CharmMeta(), harness._backend) assert m.name == 'default' with pytest.raises(AttributeError): - m.name = "changes-disallowed" # type: ignore + m.name = 'changes-disallowed' # type: ignore def test_relations_keys(self, harness: ops.testing.Harness[ops.CharmBase]): rel_app1 = harness.add_relation('db1', 'remoteapp1') @@ -119,11 +124,14 @@ def test_relations_keys(self, harness: ops.testing.Harness[ops.CharmBase]): unit_from_rel = next(filter(lambda u: u.name == 'myapp/0', relation.data.keys())) assert harness.model.unit is unit_from_rel - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', rel_app1), - ('relation_list', rel_app2), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', rel_app1), + ('relation_list', rel_app2), + ], + ) def test_relations_immutable(self, harness: ops.testing.Harness[ops.CharmBase]): with pytest.raises(AttributeError): @@ -146,35 +154,47 @@ def test_get_relation(self, harness: ops.testing.Harness[ops.CharmBase]): harness.model.get_relation('db1', f'db1:{relation_id_db1}') # type: ignore rel_db1 = harness.model.get_relation('db1', relation_id_db1) assert isinstance(rel_db1, ops.Relation) - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', relation_id_db1), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', relation_id_db1), + ], + ) dead_rel = self.ensure_relation(harness, 'db1', 7) assert isinstance(dead_rel, ops.Relation) assert set(dead_rel.data.keys()) == {harness.model.unit, harness.model.unit.app} assert dead_rel.data[harness.model.unit] == {} - self.assertBackendCalls(harness, [ - ('relation_list', 7), - ('relation_remote_app_name', 7), - ('relation_get', 7, 'myapp/0', False), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_list', 7), + ('relation_remote_app_name', 7), + ('relation_get', 7, 'myapp/0', False), + ], + ) assert harness.model.get_relation('db2') is None - self.assertBackendCalls(harness, [ - ('relation_ids', 'db2'), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db2'), + ], + ) assert harness.model.get_relation('db1') is rel_db1 with pytest.raises(ops.TooManyRelatedAppsError): harness.model.get_relation('db0') - self.assertBackendCalls(harness, [ - ('relation_ids', 'db0'), - ('relation_list', relation_id_db0), - ('relation_remote_app_name', 0), - ('relation_list', relation_id_db0_b), - ('relation_remote_app_name', 2), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db0'), + ('relation_list', relation_id_db0), + ('relation_remote_app_name', 0), + ('relation_list', relation_id_db0_b), + ('relation_remote_app_name', 2), + ], + ) def test_peer_relation_app(self, harness: ops.testing.Harness[ops.CharmBase]): harness.add_relation('db2', 'myapp') @@ -191,10 +211,13 @@ def test_remote_units_is_our(self, harness: ops.testing.Harness[ops.CharmBase]): assert not u._is_our_unit assert not u.app._is_our_app - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', relation_id), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ], + ) def test_our_unit_is_our(self, harness: ops.testing.Harness[ops.CharmBase]): assert harness.model.unit._is_our_unit @@ -206,17 +229,11 @@ def test_invalid_type_relation_data(self, harness: ops.testing.Harness[ops.Charm with pytest.raises(ops.RelationDataError): with harness._event_context('foo_event'): - harness.update_relation_data( - relation_id, - 'remoteapp1/0', - {42: 'remoteapp1-0'}) # type: ignore + harness.update_relation_data(relation_id, 'remoteapp1/0', {42: 'remoteapp1-0'}) # type: ignore with pytest.raises(ops.RelationDataError): with harness._event_context('foo_event'): - harness.update_relation_data( - relation_id, - 'remoteapp1/0', - {'foo': 42}) # type: ignore + harness.update_relation_data(relation_id, 'remoteapp1/0', {'foo': 42}) # type: ignore def test_get_app_relation_data(self, harness: ops.testing.Harness[ops.CharmBase]): harness.begin() @@ -224,19 +241,16 @@ def test_get_app_relation_data(self, harness: ops.testing.Harness[ops.CharmBase] harness.add_relation_unit(relation_id, 'remote/0') local_app = harness.model.app.name with harness._event_context('foo_event'): - harness.update_relation_data( - relation_id, - local_app, - {'foo': 'bar'}) - assert harness.get_relation_data( - relation_id, harness.model.app) == harness.get_relation_data( - relation_id, local_app) == {'foo': 'bar'} - - @pytest.mark.parametrize('args,kwargs', [ - (({'foo': 'baz'}, ), {}), - (([('foo', 'baz')], ), {}), - ((), {'foo': 'baz'}) - ]) + harness.update_relation_data(relation_id, local_app, {'foo': 'bar'}) + assert ( + harness.get_relation_data(relation_id, harness.model.app) + == harness.get_relation_data(relation_id, local_app) + == {'foo': 'bar'} + ) + + @pytest.mark.parametrize( + 'args,kwargs', [(({'foo': 'baz'},), {}), (([('foo', 'baz')],), {}), ((), {'foo': 'baz'})] + ) def test_update_app_relation_data( self, args: typing.Tuple[typing.Any, ...], @@ -248,46 +262,41 @@ def test_update_app_relation_data( relation_id = harness.add_relation('db1', 'remote') harness.add_relation_unit(relation_id, 'remote/0') with harness._event_context('foo_event'): - harness.update_relation_data( - relation_id, - harness.model.app.name, - {'foo': 'bar'}) + harness.update_relation_data(relation_id, harness.model.app.name, {'foo': 'bar'}) rel = harness.model.get_relation('db1', relation_id) assert rel is not None rel.data[harness.model.app].update(*args, **kwargs) - assert harness.get_relation_data( - relation_id, harness.model.app) == {'foo': 'baz'} + assert harness.get_relation_data(relation_id, harness.model.app) == {'foo': 'baz'} def test_unit_relation_data(self, harness: ops.testing.Harness[ops.CharmBase]): relation_id = harness.add_relation('db1', 'remoteapp1') harness.add_relation_unit(relation_id, 'remoteapp1/0') with harness._event_context('foo_event'): - harness.update_relation_data( - relation_id, - 'remoteapp1/0', - {'host': 'remoteapp1-0'}) + harness.update_relation_data(relation_id, 'remoteapp1/0', {'host': 'remoteapp1-0'}) harness.model.relations._invalidate('db1') self.resetBackendCalls(harness) random_unit = harness.model.get_unit('randomunit/0') with pytest.raises(KeyError): self.ensure_relation(harness, 'db1').data[random_unit] - remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0', - self.ensure_relation(harness, 'db1').units)) - assert self.ensure_relation(harness, 'db1').data[remoteapp1_0] == \ - {'host': 'remoteapp1-0'} - - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', relation_id), - ('relation_get', relation_id, 'remoteapp1/0', False), - ]) + remoteapp1_0 = next( + filter(lambda u: u.name == 'remoteapp1/0', self.ensure_relation(harness, 'db1').units) + ) + assert self.ensure_relation(harness, 'db1').data[remoteapp1_0] == {'host': 'remoteapp1-0'} + + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'remoteapp1/0', False), + ], + ) def test_remote_app_relation_data(self, harness: ops.testing.Harness[ops.CharmBase]): relation_id = harness.add_relation('db1', 'remoteapp1') with harness._event_context('foo_event'): - harness.update_relation_data(relation_id, 'remoteapp1', - {'secret': 'cafedeadbeef'}) + harness.update_relation_data(relation_id, 'remoteapp1', {'secret': 'cafedeadbeef'}) harness.add_relation_unit(relation_id, 'remoteapp1/0') harness.add_relation_unit(relation_id, 'remoteapp1/1') self.resetBackendCalls(harness) @@ -301,29 +310,30 @@ def test_remote_app_relation_data(self, harness: ops.testing.Harness[ops.CharmBa remoteapp1 = rel_db1.app assert remoteapp1 is not None assert remoteapp1.name == 'remoteapp1' - assert rel_db1.data[remoteapp1] == \ - {'secret': 'cafedeadbeef'} - - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', relation_id), - ('relation_get', relation_id, 'remoteapp1', True), - ]) + assert rel_db1.data[remoteapp1] == {'secret': 'cafedeadbeef'} + + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'remoteapp1', True), + ], + ) def test_relation_data_modify_remote(self, harness: ops.testing.Harness[ops.CharmBase]): relation_id = harness.add_relation('db1', 'remoteapp1') with harness._event_context('foo_event'): - harness.update_relation_data(relation_id, 'remoteapp1', - {'secret': 'cafedeadbeef'}) + harness.update_relation_data(relation_id, 'remoteapp1', {'secret': 'cafedeadbeef'}) harness.add_relation_unit(relation_id, 'remoteapp1/0') - harness.update_relation_data(relation_id, 'remoteapp1/0', - {'host': 'remoteapp1/0'}) + harness.update_relation_data(relation_id, 'remoteapp1/0', {'host': 'remoteapp1/0'}) harness.model.relations._invalidate('db1') self.resetBackendCalls(harness) rel_db1 = self.ensure_relation(harness, 'db1') - remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0', - self.ensure_relation(harness, 'db1').units)) + remoteapp1_0 = next( + filter(lambda u: u.name == 'remoteapp1/0', self.ensure_relation(harness, 'db1').units) + ) # Force memory cache to be loaded. assert 'host' in rel_db1.data[remoteapp1_0] assert repr(rel_db1.data[remoteapp1_0]) == "{'host': 'remoteapp1/0'}" @@ -333,20 +343,24 @@ def test_relation_data_modify_remote(self, harness: ops.testing.Harness[ops.Char rel_db1.data[remoteapp1_0]['foo'] = 'bar' assert 'foo' not in rel_db1.data[remoteapp1_0] - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', relation_id), - ('relation_get', relation_id, 'remoteapp1/0', False), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'remoteapp1/0', False), + ], + ) # this will fire more backend calls with harness._event_context('foo_event'): data_repr = repr(rel_db1.data) - assert data_repr == \ - ('{: {}, ' - ': , ' - ": {'host': 'remoteapp1/0'}, " - ": {'secret': 'cafedeadbeef'}}") + assert data_repr == ( + '{: {}, ' + ': , ' + ": {'host': 'remoteapp1/0'}, " + ": {'secret': 'cafedeadbeef'}}" + ) def test_relation_data_modify_our(self, harness: ops.testing.Harness[ops.CharmBase]): relation_id = harness.add_relation('db1', 'remoteapp1') @@ -363,14 +377,16 @@ def test_relation_data_modify_our(self, harness: ops.testing.Harness[ops.CharmBa rel_db1.data[harness.model.unit]['host'] = 'bar' assert rel_db1.data[harness.model.unit]['host'] == 'bar' - self.assertBackendCalls(harness, [ - ('relation_get', relation_id, 'myapp/0', False), - ('update_relation_data', relation_id, harness.model.unit, 'host', 'bar'), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_get', relation_id, 'myapp/0', False), + ('update_relation_data', relation_id, harness.model.unit, 'host', 'bar'), + ], + ) def test_app_relation_data_modify_local_as_leader( - self, - harness: ops.testing.Harness[ops.CharmBase] + self, harness: ops.testing.Harness[ops.CharmBase] ): relation_id = harness.add_relation('db1', 'remoteapp1') harness.update_relation_data(relation_id, 'myapp', {'password': 'deadbeefcafe'}) @@ -387,12 +403,15 @@ def test_app_relation_data_modify_local_as_leader( assert rel_db1.data[local_app]['password'] == 'foo' - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', 0), - ('relation_get', 0, 'myapp', True), - ('update_relation_data', 0, harness.model.app, 'password', 'foo'), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', 0), + ('relation_get', 0, 'myapp', True), + ('update_relation_data', 0, harness.model.app, 'password', 'foo'), + ], + ) def test_app_relation_data_modify_local_as_minion( self, @@ -414,12 +433,15 @@ def test_app_relation_data_modify_local_as_minion( with pytest.raises(ops.RelationDataError): rel_db1.data[local_app]['password'] = 'foobar' - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', 0), - ('relation_get', 0, 'myapp', True), - ('is_leader',), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', 0), + ('relation_get', 0, 'myapp', True), + ('is_leader',), + ], + ) def test_relation_data_access_peer_leader(self, harness: ops.testing.Harness[ops.CharmBase]): r_id = harness.add_relation('db2', 'myapp') @@ -457,12 +479,15 @@ def test_relation_data_del_key(self, harness: ops.testing.Harness[ops.CharmBase] assert 'host' not in rel_db1.data[harness.model.unit] assert harness.get_relation_data(relation_id, 'myapp/0') == {} - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', relation_id), - ('relation_get', relation_id, 'myapp/0', False), - ('update_relation_data', relation_id, harness.model.unit, 'host', ''), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ('update_relation_data', relation_id, harness.model.unit, 'host', ''), + ], + ) def test_relation_data_del_missing_key(self, harness: ops.testing.Harness[ops.CharmBase]): relation_id = harness.add_relation('db1', 'remoteapp1') @@ -475,18 +500,20 @@ def test_relation_data_del_missing_key(self, harness: ops.testing.Harness[ops.Ch # Force memory cache to be loaded. assert 'host' in rel_db1.data[harness.model.unit] with harness._event_context('foo_event'): - rel_db1.data[harness.model.unit]['port'] = '' # Same as a delete, should not fail. + rel_db1.data[harness.model.unit]['port'] = '' # Same as a delete, should not fail. assert 'port' not in rel_db1.data[harness.model.unit] with harness._event_context('foo_event'): - assert harness.get_relation_data(relation_id, 'myapp/0') == \ - {'host': 'bar'} - - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', relation_id), - ('relation_get', relation_id, 'myapp/0', False), - ('update_relation_data', relation_id, harness.model.unit, 'port', ''), - ]) + assert harness.get_relation_data(relation_id, 'myapp/0') == {'host': 'bar'} + + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ('update_relation_data', relation_id, harness.model.unit, 'port', ''), + ], + ) def test_relation_set_fail(self, harness: ops.testing.Harness[ops.CharmBase]): relation_id = harness.add_relation('db1', 'remoteapp1') @@ -524,13 +551,16 @@ def broken_update_relation_data( del rel_db1.data[harness.model.unit]['host'] assert 'host' in rel_db1.data[harness.model.unit] - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', relation_id), - ('relation_get', relation_id, 'myapp/0', False), - ('update_relation_data', relation_id, harness.model.unit, 'host', 'bar'), - ('update_relation_data', relation_id, harness.model.unit, 'host', ''), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ('update_relation_data', relation_id, harness.model.unit, 'host', 'bar'), + ('update_relation_data', relation_id, harness.model.unit, 'host', ''), + ], + ) def test_relation_data_type_check(self, harness: ops.testing.Harness[ops.CharmBase]): relation_id = harness.add_relation('db1', 'remoteapp1') @@ -547,7 +577,7 @@ def test_relation_data_type_check(self, harness: ops.testing.Harness[ops.CharmBa (None, 'foo'), (('foo', 'bar'), 'foo'), (1, 1), - (None, None) + (None, None), ): with pytest.raises(ops.RelationDataError): with harness.framework._event_context('foo_event'): @@ -556,11 +586,14 @@ def test_relation_data_type_check(self, harness: ops.testing.Harness[ops.CharmBa # No data has actually been changed assert dict(rel_db1.data[harness.model.unit]) == {'host': 'myapp-0'} - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', relation_id), - ('relation_get', relation_id, 'myapp/0', False), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('relation_get', relation_id, 'myapp/0', False), + ], + ) def test_relation_local_app_data_readability_leader( self, @@ -593,10 +626,13 @@ def test_relation_local_app_data_readability_leader( assert rel_db1.data[local_app]['local'] == 'data' - self.assertBackendCalls(harness, [ - ('is_leader',), - ('relation_get', 0, 'myapp', True), - ]) + self.assertBackendCalls( + harness, + [ + ('is_leader',), + ('relation_get', 0, 'myapp', True), + ], + ) self.resetBackendCalls(harness) @@ -639,11 +675,11 @@ def test_relation_local_app_data_readability_follower( rel_db1.data[local_app]['local'] # we didn't even get to relation-get - self.assertBackendCalls(harness, [('is_leader', )]) + self.assertBackendCalls(harness, [('is_leader',)]) # we can't see it but repr() works assert repr(rel_db1.data[local_app]) == '' - self.assertBackendCalls(harness, [('is_leader', )]) + self.assertBackendCalls(harness, [('is_leader',)]) # as well as relation data repr() in general: assert isinstance(repr(rel_db1.data), str) @@ -653,7 +689,8 @@ def test_relation_local_app_data_readability_follower( ('is_leader',), ('relation_get', 0, 'remoteapp1/0', False), ('is_leader',), - ('relation_get', 0, 'remoteapp1', True)] + ('relation_get', 0, 'remoteapp1', True), + ] self.assertBackendCalls(harness, expected_backend_calls) def test_relation_no_units(self, harness: ops.testing.Harness[ops.CharmBase]): @@ -661,11 +698,14 @@ def test_relation_no_units(self, harness: ops.testing.Harness[ops.CharmBase]): rel = self.ensure_relation(harness, 'db1') assert rel.units == set() assert rel.app is harness.model.get_app('remoteapp1') - self.assertBackendCalls(harness, [ - ('relation_ids', 'db1'), - ('relation_list', 0), - ('relation_remote_app_name', 0), - ]) + self.assertBackendCalls( + harness, + [ + ('relation_ids', 'db1'), + ('relation_list', 0), + ('relation_remote_app_name', 0), + ], + ) def test_config(self, harness: ops.testing.Harness[ops.CharmBase]): harness._get_backend_calls(reset=True) @@ -715,23 +755,29 @@ def check_remote_units(): check_remote_units() - self.assertBackendCalls(harness, [ - ('is_leader',), - ('relation_ids', 'db1'), - ('relation_list', relation_id), - ('is_leader',), - ]) + self.assertBackendCalls( + harness, + [ + ('is_leader',), + ('relation_ids', 'db1'), + ('relation_list', relation_id), + ('is_leader',), + ], + ) def test_workload_version(self, harness: ops.testing.Harness[ops.CharmBase]): harness.model.unit.set_workload_version('1.2.3') - self.assertBackendCalls(harness, [ - ('application_version_set', '1.2.3'), - ]) + self.assertBackendCalls( + harness, + [ + ('application_version_set', '1.2.3'), + ], + ) def test_workload_version_invalid(self, harness: ops.testing.Harness[ops.CharmBase]): with pytest.raises(TypeError) as excinfo: harness.model.unit.set_workload_version(5) # type: ignore - assert str(excinfo.value) == "workload version must be a str, not int: 5" + assert str(excinfo.value) == 'workload version must be a str, not int: 5' self.assertBackendCalls(harness, []) def test_resources(self, harness: ops.testing.Harness[ops.CharmBase]): @@ -798,9 +844,9 @@ def test_status_eq(self): ] assert ops.UnknownStatus() == ops.UnknownStatus() - for (i, t1) in enumerate(status_types): + for i, t1 in enumerate(status_types): assert t1('') != ops.UnknownStatus() - for (j, t2) in enumerate(status_types): + for j, t2 in enumerate(status_types): assert t1('one') != t2('two') if i == j: assert t1('one') == t2('one') @@ -810,19 +856,27 @@ def test_status_eq(self): def test_active_message_default(self): assert ops.ActiveStatus().message == '' - @pytest.mark.parametrize("target_status,backend_call", [( - ops.ActiveStatus('Green'), - ('status_set', 'active', 'Green', {'is_app': False}), - ), ( - ops.MaintenanceStatus('Yellow'), - ('status_set', 'maintenance', 'Yellow', {'is_app': False}), - ), ( - ops.BlockedStatus('Red'), - ('status_set', 'blocked', 'Red', {'is_app': False}), - ), ( - ops.WaitingStatus('White'), - ('status_set', 'waiting', 'White', {'is_app': False}), - )]) + @pytest.mark.parametrize( + 'target_status,backend_call', + [ + ( + ops.ActiveStatus('Green'), + ('status_set', 'active', 'Green', {'is_app': False}), + ), + ( + ops.MaintenanceStatus('Yellow'), + ('status_set', 'maintenance', 'Yellow', {'is_app': False}), + ), + ( + ops.BlockedStatus('Red'), + ('status_set', 'blocked', 'Red', {'is_app': False}), + ), + ( + ops.WaitingStatus('White'), + ('status_set', 'waiting', 'White', {'is_app': False}), + ), + ], + ) def test_local_set_valid_unit_status( self, harness: ops.testing.Harness[ops.CharmBase], @@ -836,19 +890,27 @@ def test_local_set_valid_unit_status( assert harness.model.unit.status == target_status self.assertBackendCalls(harness, [backend_call, ('status_get', {'is_app': False})]) - @pytest.mark.parametrize("target_status,backend_call", [( - ops.ActiveStatus('Green'), - ('status_set', 'active', 'Green', {'is_app': True}), - ), ( - ops.MaintenanceStatus('Yellow'), - ('status_set', 'maintenance', 'Yellow', {'is_app': True}), - ), ( - ops.BlockedStatus('Red'), - ('status_set', 'blocked', 'Red', {'is_app': True}), - ), ( - ops.WaitingStatus('White'), - ('status_set', 'waiting', 'White', {'is_app': True}), - )]) + @pytest.mark.parametrize( + 'target_status,backend_call', + [ + ( + ops.ActiveStatus('Green'), + ('status_set', 'active', 'Green', {'is_app': True}), + ), + ( + ops.MaintenanceStatus('Yellow'), + ('status_set', 'maintenance', 'Yellow', {'is_app': True}), + ), + ( + ops.BlockedStatus('Red'), + ('status_set', 'blocked', 'Red', {'is_app': True}), + ), + ( + ops.WaitingStatus('White'), + ('status_set', 'waiting', 'White', {'is_app': True}), + ), + ], + ) def test_local_set_valid_app_status( self, harness: ops.testing.Harness[ops.CharmBase], @@ -864,9 +926,11 @@ def test_local_set_valid_app_status( # There is a backend call to check if we can set the value, # and then another check each time we assert the status above expected_calls = [ - ('is_leader',), backend_call, ('is_leader',), - ('is_leader',), ('status_get', {'is_app': True}), + backend_call, + ('is_leader',), + ('is_leader',), + ('status_get', {'is_app': True}), ] self.assertBackendCalls(harness, expected_calls) @@ -889,13 +953,16 @@ def test_set_app_status_invalid(self, harness: ops.testing.Harness[ops.CharmBase with pytest.raises(ops.InvalidStatusError): harness.model.app.status = 'blocked' # type: ignore - @pytest.mark.parametrize("target_status", [ - ops.UnknownStatus(), - ops.ActiveStatus('Green'), - ops.MaintenanceStatus('Yellow'), - ops.BlockedStatus('Red'), - ops.WaitingStatus('White'), - ]) + @pytest.mark.parametrize( + 'target_status', + [ + ops.UnknownStatus(), + ops.ActiveStatus('Green'), + ops.MaintenanceStatus('Yellow'), + ops.BlockedStatus('Red'), + ops.WaitingStatus('White'), + ], + ) def test_remote_unit_status( self, harness: ops.testing.Harness[ops.CharmBase], @@ -905,10 +972,7 @@ def test_remote_unit_status( harness.add_relation_unit(relation_id, 'remoteapp1/0') harness.add_relation_unit(relation_id, 'remoteapp1/1') remote_unit = next( - filter( - lambda u: u.name == 'remoteapp1/0', - self.ensure_relation(harness, 'db1').units - ) + filter(lambda u: u.name == 'remoteapp1/0', self.ensure_relation(harness, 'db1').units) ) self.resetBackendCalls(harness) @@ -920,13 +984,16 @@ def test_remote_unit_status( self.assertBackendCalls(harness, []) - @pytest.mark.parametrize("target_status", [ - ops.UnknownStatus(), - ops.ActiveStatus(), - ops.MaintenanceStatus('Upgrading software'), - ops.BlockedStatus('Awaiting manual resolution'), - ops.WaitingStatus('Awaiting related app updates'), - ]) + @pytest.mark.parametrize( + 'target_status', + [ + ops.UnknownStatus(), + ops.ActiveStatus(), + ops.MaintenanceStatus('Upgrading software'), + ops.BlockedStatus('Awaiting manual resolution'), + ops.WaitingStatus('Awaiting related app updates'), + ], + ) def test_remote_app_status( self, harness: ops.testing.Harness[ops.CharmBase], @@ -949,7 +1016,7 @@ def test_remote_app_status( def test_storage(self, fake_script: FakeScript): meta = ops.CharmMeta() - raw: 'ops.charm._StorageMetaDict' = { + raw: ops.charm._StorageMetaDict = { 'type': 'test', } meta.storages = { @@ -958,14 +1025,19 @@ def test_storage(self, fake_script: FakeScript): } model = ops.Model(meta, _ModelBackend('myapp/0')) - fake_script.write('storage-list', ''' + fake_script.write( + 'storage-list', + """ if [ "$1" = disks ]; then echo '["disks/0", "disks/1"]' else echo '[]' fi - ''') - fake_script.write('storage-get', ''' + """, + ) + fake_script.write( + 'storage-get', + """ if [ "$2" = disks/0 ]; then echo '"/var/srv/disks/0"' elif [ "$2" = disks/1 ]; then @@ -973,7 +1045,8 @@ def test_storage(self, fake_script: FakeScript): else exit 2 fi - ''') + """, + ) fake_script.write('storage-add', '') assert len(model.storages) == 2 @@ -1023,11 +1096,12 @@ def resetBackendCalls(self, harness: ops.testing.Harness[ops.CharmBase]): # noq harness._get_backend_calls(reset=True) def assertBackendCalls( # noqa: N802 - self, - harness: ops.testing.Harness[ops.CharmBase], - expected: typing.List[typing.Tuple[typing.Any, ...]], - *, - reset: bool = True): + self, + harness: ops.testing.Harness[ops.CharmBase], + expected: typing.List[typing.Tuple[typing.Any, ...]], + *, + reset: bool = True, + ): assert expected == harness._get_backend_calls(reset=reset) def test_run_error(self, fake_script: FakeScript): @@ -1038,17 +1112,20 @@ def test_run_error(self, fake_script: FakeScript): assert str(excinfo.value) == 'ERROR cannot get status\n' assert excinfo.value.args[0] == 'ERROR cannot get status\n' - @patch("grp.getgrgid") - @patch("pwd.getpwuid") + @patch('grp.getgrgid') + @patch('pwd.getpwuid') def test_push_path_unnamed(self, getpwuid: MagicMock, getgrgid: MagicMock): getpwuid.side_effect = KeyError getgrgid.side_effect = KeyError - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: foo: resource: foo-image - ''') + """, + ) harness.begin() harness.set_can_connect('foo', True) container = harness.model.unit.containers['foo'] @@ -1056,8 +1133,8 @@ def test_push_path_unnamed(self, getpwuid: MagicMock, getgrgid: MagicMock): with tempfile.TemporaryDirectory() as push_src: push_path = pathlib.Path(push_src) / 'src.txt' push_path.write_text('hello') - container.push_path(push_path, "/") - assert container.exists("/src.txt"), 'push_path failed: file "src.txt" missing' + container.push_path(push_path, '/') + assert container.exists('/src.txt'), 'push_path failed: file "src.txt" missing' class PushPullCase: @@ -1135,10 +1212,9 @@ def list_func_gen(file_list: typing.List[str]): for f in file_list: file_infos.append( pebble.FileInfo( - path=f, - name=os.path.basename(f), - type=pebble.FileType.FILE, - **args)) + path=f, name=os.path.basename(f), type=pebble.FileType.FILE, **args + ) + ) # collect all the directories for the test case's files dirpath = os.path.dirname(f) @@ -1149,7 +1225,9 @@ def list_func_gen(file_list: typing.List[str]): path=dirpath, name=os.path.basename(dirpath), type=pebble.FileType.DIRECTORY, - **args)) + **args, + ) + ) def inner(path: pathlib.Path): path_str = str(path) @@ -1158,14 +1236,16 @@ def inner(path: pathlib.Path): # exclude file infos for separate trees and also # for the directory we are listing itself - we only want its contents. if not info.path.startswith(path_str) or ( - info.type == pebble.FileType.DIRECTORY and path_str == info.path): + info.type == pebble.FileType.DIRECTORY and path_str == info.path + ): continue # exclude file infos for files that are in subdirectories of path. # we only want files that are directly in path. - if info.path[len(path_str):].find('/') > 0: + if info.path[len(path_str) :].find('/') > 0: continue matches.append(info) return matches + return inner # test raw business logic for recursion and dest path construction @@ -1174,15 +1254,11 @@ def inner(path: pathlib.Path): case.path = os.path.normpath(case.path) case.files = [os.path.normpath(f) for f in case.files] case.want = {os.path.normpath(f) for f in case.want} - for f in ops.Container._list_recursive( - list_func_gen( - case.files), pathlib.Path( - case.path)): + for f in ops.Container._list_recursive(list_func_gen(case.files), pathlib.Path(case.path)): path = f.path if case.dst is not None: # test destination path construction - _, path = f.path, ops.Container._build_destpath( - f.path, case.path, case.dst) + _, path = f.path, ops.Container._build_destpath(f.path, case.path, case.dst) files.add(path) assert case.want == files, f'case {case.name!r} has wrong files: want {case.want}, got {files}' @@ -1265,12 +1341,15 @@ def inner(path: pathlib.Path): @pytest.mark.parametrize('case', recursive_push_pull_cases) def test_recursive_push_and_pull(case: PushPullCase): # full "integration" test of push+pull - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: foo: resource: foo-image - ''') + """, + ) harness.begin() harness.set_can_connect('foo', True) c = harness.model.unit.containers['foo'] @@ -1291,8 +1370,9 @@ def test_recursive_push_and_pull(case: PushPullCase): if isinstance(case.path, list): # swap slash for dummy dir on root dir so Path.parent doesn't return tmpdir path component # otherwise remove leading slash so we can do the path join properly. - push_path = [os.path.join(push_src.name, p[1:] if len(p) > 1 else 'foo') - for p in case.path] + push_path = [ + os.path.join(push_src.name, p[1:] if len(p) > 1 else 'foo') for p in case.path + ] else: # swap slash for dummy dir on root dir so Path.parent doesn't return tmpdir path component # otherwise remove leading slash so we can do the path join properly. @@ -1305,10 +1385,11 @@ def test_recursive_push_and_pull(case: PushPullCase): except ops.MultiPushPullError as err: if not case.errors: raise - errors = {src[len(push_src.name):] for src, _ in err.errors} + errors = {src[len(push_src.name) :] for src, _ in err.errors} - assert case.errors == errors, \ - f'push_path gave wrong expected errors: want {case.errors}, got {errors}' + assert ( + case.errors == errors + ), f'push_path gave wrong expected errors: want {case.errors}, got {errors}' for fpath in case.want: assert c.exists(fpath), f'push_path failed: file {fpath} missing at destination' for fdir in case.want_dirs: @@ -1331,44 +1412,51 @@ def test_recursive_push_and_pull(case: PushPullCase): raise errors = {src for src, _ in err.errors} - assert case.errors == errors, \ - f'pull_path gave wrong expected errors: want {case.errors}, got {errors}' + assert ( + case.errors == errors + ), f'pull_path gave wrong expected errors: want {case.errors}, got {errors}' for fpath in case.want: assert c.exists(fpath), f'pull_path failed: file {fpath} missing at destination' for fdir in case.want_dirs: assert c.isdir(fdir), f'pull_path failed: dir {fdir} missing at destination' -@pytest.mark.parametrize('case', [ - PushPullCase( - name='push directory without trailing slash', - path='foo', - dst='/baz', - files=['foo/bar/baz.txt', 'foo/foobar.txt'], - want={'/baz/foo/foobar.txt', '/baz/foo/bar/baz.txt'}, - ), - PushPullCase( - name='push directory with trailing slash', - path='foo/', - dst='/baz', - files=['foo/bar/baz.txt', 'foo/foobar.txt'], - want={'/baz/foo/foobar.txt', '/baz/foo/bar/baz.txt'}, - ), - PushPullCase( - name='push directory relative pathing', - path='./foo', - dst='/baz', - files=['foo/bar/baz.txt', 'foo/foobar.txt'], - want={'/baz/foo/foobar.txt', '/baz/foo/bar/baz.txt'}, - ), -]) +@pytest.mark.parametrize( + 'case', + [ + PushPullCase( + name='push directory without trailing slash', + path='foo', + dst='/baz', + files=['foo/bar/baz.txt', 'foo/foobar.txt'], + want={'/baz/foo/foobar.txt', '/baz/foo/bar/baz.txt'}, + ), + PushPullCase( + name='push directory with trailing slash', + path='foo/', + dst='/baz', + files=['foo/bar/baz.txt', 'foo/foobar.txt'], + want={'/baz/foo/foobar.txt', '/baz/foo/bar/baz.txt'}, + ), + PushPullCase( + name='push directory relative pathing', + path='./foo', + dst='/baz', + files=['foo/bar/baz.txt', 'foo/foobar.txt'], + want={'/baz/foo/foobar.txt', '/baz/foo/bar/baz.txt'}, + ), + ], +) def test_push_path_relative(case: PushPullCase): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: foo: resource: foo-image - ''') + """, + ) harness.begin() harness.set_can_connect('foo', True) container = harness.model.unit.containers['foo'] @@ -1384,7 +1472,7 @@ def test_push_path_relative(case: PushPullCase): testfile_path = pathlib.Path(tmp / testfile) testfile_path.parent.mkdir(parents=True, exist_ok=True) testfile_path.touch(exist_ok=True) - testfile_path.write_text("test", encoding="utf-8") + testfile_path.write_text('test', encoding='utf-8') # push path under test to container assert case.dst is not None @@ -1401,7 +1489,9 @@ def test_push_path_relative(case: PushPullCase): class TestApplication: @pytest.fixture def harness(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: myapp provides: db0: @@ -1418,7 +1508,8 @@ def harness(self): containers: bar: k: v - ''') + """, + ) yield harness harness.cleanup() @@ -1427,10 +1518,15 @@ def test_mocked_get_services(self, harness: ops.testing.Harness[ops.CharmBase]): harness.begin() harness.set_can_connect('bar', True) c = harness.charm.unit.get_container('bar') - c.add_layer('layer1', { - 'summary': 'layer', - 'services': {"baz": {'override': 'replace', 'summary': 'echo', 'command': 'echo 1'}}, - }) + c.add_layer( + 'layer1', + { + 'summary': 'layer', + 'services': { + 'baz': {'override': 'replace', 'summary': 'echo', 'command': 'echo 1'} + }, + }, + ) s = c.get_service('baz') # So far, so good assert s @@ -1479,7 +1575,7 @@ def test_planned_units_garbage_values(self, harness: ops.testing.Harness[ops.Cha assert app.planned_units() == 1 with pytest.raises(TypeError): - harness.set_planned_units("foo") # type: ignore + harness.set_planned_units('foo') # type: ignore with pytest.raises(TypeError): harness.set_planned_units(-3423000102312321090) @@ -1618,15 +1714,21 @@ def test_restart(self, container: ops.Container): def test_restart_fallback(self, container: ops.Container): def restart_services(service_names: str): container.pebble.requests.append(('restart', service_names)) # type: ignore - raise pebble.APIError({}, 400, "", "") + raise pebble.APIError({}, 400, '', '') container.pebble.restart_services = restart_services # type: ignore # Setup the Pebble client to respond to a call to get_services() container.pebble.responses.append([ # type: ignore - pebble.ServiceInfo.from_dict( - {'name': 'foo', 'startup': 'enabled', 'current': 'active'}), - pebble.ServiceInfo.from_dict( - {'name': 'bar', 'startup': 'enabled', 'current': 'inactive'}), + pebble.ServiceInfo.from_dict({ + 'name': 'foo', + 'startup': 'enabled', + 'current': 'active', + }), + pebble.ServiceInfo.from_dict({ + 'name': 'bar', + 'startup': 'enabled', + 'current': 'inactive', + }), ]) container.restart('foo', 'bar') @@ -1637,12 +1739,12 @@ def restart_services(service_names: str): ('get_services', ('foo', 'bar')), ('stop', ('foo',)), # Then start all the specified services - ('start', ('foo', 'bar')) + ('start', ('foo', 'bar')), ] def test_restart_fallback_non_400_error(self, container: ops.Container): def restart_services(service_names: str): - raise pebble.APIError({}, 500, "", "") + raise pebble.APIError({}, 500, '', '') container.pebble.restart_services = restart_services # type: ignore with pytest.raises(pebble.APIError) as excinfo: @@ -1697,8 +1799,7 @@ def test_get_plan(self, container: ops.Container): @staticmethod def _make_service(name: str, startup: str, current: str): - return pebble.ServiceInfo.from_dict( - {'name': name, 'startup': startup, 'current': current}) + return pebble.ServiceInfo.from_dict({'name': name, 'startup': startup, 'current': current}) def test_get_services(self, container: ops.Container): two_services = [ @@ -1736,7 +1837,7 @@ def test_get_service(self, container: ops.Container): # Single service returned successfully container.pebble.responses.append([self._make_service('s1', 'enabled', 'active')]) # type: ignore s = container.get_service('s1') - assert container.pebble.requests == [('get_services', ('s1', ))] # type: ignore + assert container.pebble.requests == [('get_services', ('s1',))] # type: ignore assert s.name == 's1' assert s.startup == pebble.ServiceStartup.ENABLED assert s.current == pebble.ServiceStatus.ACTIVE @@ -1811,7 +1912,7 @@ def test_get_check(self, container: ops.Container): }) # type: ignore ]) c = container.get_check('c1') - assert container.pebble.requests == [('get_checks', None, ('c1', ))] # type: ignore + assert container.pebble.requests == [('get_checks', None, ('c1',))] # type: ignore assert c.name == 'c1' assert c.level == pebble.CheckLevel.UNSET assert c.status == pebble.CheckStatus.UP @@ -1862,13 +1963,20 @@ def test_pull(self, container: ops.Container): def test_push(self, container: ops.Container): container.push('/path/1', 'content1') assert container.pebble.requests == [ # type: ignore - ('push', '/path/1', 'content1', 'utf-8', False, None, - None, None, None, None), + ('push', '/path/1', 'content1', 'utf-8', False, None, None, None, None, None), ] container.pebble.requests = [] # type: ignore - container.push('/path/2', b'content2', make_dirs=True, - permissions=0o600, user_id=12, user='bob', group_id=34, group='staff') + container.push( + '/path/2', + b'content2', + make_dirs=True, + permissions=0o600, + user_id=12, + user='bob', + group_id=34, + group='staff', + ) assert container.pebble.requests == [ # type: ignore ('push', '/path/2', b'content2', 'utf-8', True, 0o600, 12, 'bob', 34, 'staff'), ] @@ -1896,8 +2004,15 @@ def test_make_dir(self, container: ops.Container): ] container.pebble.requests = [] # type: ignore - container.make_dir('/path/2', make_parents=True, permissions=0o700, - user_id=12, user='bob', group_id=34, group='staff') + container.make_dir( + '/path/2', + make_parents=True, + permissions=0o700, + user_id=12, + user='bob', + group_id=34, + group='staff', + ) assert container.pebble.requests == [ # type: ignore ('make_dir', '/path/2', True, 0o700, 12, 'bob', 34, 'staff'), ] @@ -1925,6 +2040,7 @@ def test_can_connect_connection_error( ): def raise_error(): raise pebble.ConnectionError('connection error!') + container.pebble.get_system_info = raise_error with caplog.at_level(level='DEBUG', logger='ops'): assert not container.can_connect() @@ -1938,6 +2054,7 @@ def test_can_connect_file_not_found_error( ): def raise_error(): raise FileNotFoundError('file not found!') + container.pebble.get_system_info = raise_error with caplog.at_level(level='DEBUG', logger='ops'): assert not container.can_connect() @@ -1951,6 +2068,7 @@ def test_can_connect_api_error( ): def raise_error(): raise pebble.APIError({'body': ''}, 404, 'status', 'api error!') + container.pebble.get_system_info = raise_error with caplog.at_level(level='WARNING', logger='ops'): assert not container.can_connect() @@ -1975,25 +2093,29 @@ def test_exec(self, container: ops.Container): stdin='STDIN', stdout=stdout, stderr=stderr, - encoding="encoding", + encoding='encoding', combine_stderr=True, ) assert container.pebble.requests == [ # type: ignore - ('exec', ['echo', 'foo'], dict( - service_context='srv1', - environment={'K1': 'V1', 'K2': 'V2'}, - working_dir='WD', - timeout=10.5, - user_id=1000, - user='bob', - group_id=1000, - group='staff', - stdin='STDIN', - stdout=stdout, - stderr=stderr, - encoding="encoding", - combine_stderr=True, - )) + ( + 'exec', + ['echo', 'foo'], + dict( + service_context='srv1', + environment={'K1': 'V1', 'K2': 'V2'}, + working_dir='WD', + timeout=10.5, + user_id=1000, + user='bob', + group_id=1000, + group='staff', + stdin='STDIN', + stdout=stdout, + stderr=stderr, + encoding='encoding', + combine_stderr=True, + ), + ) ] assert p == 'fake_exec_process' @@ -2019,16 +2141,18 @@ def test_send_signal(self, container: ops.Container): container.pebble.requests = [] # type: ignore def test_get_notice(self, container: ops.Container): - container.pebble.responses.append(pebble.Notice.from_dict({ # type: ignore - 'id': '123', - 'user-id': 1000, - 'type': 'custom', - 'key': 'example.com/a', - 'first-occurred': '2023-12-07T17:01:02.123456789Z', - 'last-occurred': '2023-12-07T17:01:03.123456789Z', - 'last-repeated': '2023-12-07T17:01:04.123456789Z', - 'occurrences': 8, - })) + container.pebble.responses.append( # type: ignore + pebble.Notice.from_dict({ + 'id': '123', + 'user-id': 1000, + 'type': 'custom', + 'key': 'example.com/a', + 'first-occurred': '2023-12-07T17:01:02.123456789Z', + 'last-occurred': '2023-12-07T17:01:03.123456789Z', + 'last-repeated': '2023-12-07T17:01:04.123456789Z', + 'occurrences': 8, + }) + ) notice = container.get_notice('123') assert notice.id == '123' @@ -2042,6 +2166,7 @@ def test_get_notice(self, container: ops.Container): def test_get_notice_not_found(self, container: ops.Container): def raise_error(id: str): raise pebble.APIError({'body': ''}, 404, 'status', 'api error!') + container.pebble.get_notice = raise_error with pytest.raises(ops.ModelError): container.get_notice('123') @@ -2071,12 +2196,17 @@ def test_get_notices(self, container: ops.Container): assert notices[0].type == pebble.NoticeType.CUSTOM assert notices[0].key == 'example.com/b' - assert container.pebble.requests == [('get_notices', dict( # type: ignore - user_id=1000, - users=pebble.NoticesUsers.ALL, - types=[pebble.NoticeType.CUSTOM], - keys=['example.com/a', 'example.com/b'], - ))] + assert container.pebble.requests == [ # type: ignore + ( + 'get_notices', + dict( + user_id=1000, + users=pebble.NoticesUsers.ALL, + types=[pebble.NoticeType.CUSTOM], + keys=['example.com/a', 'example.com/b'], + ), + ) + ] class MockPebbleBackend(_ModelBackend): @@ -2109,11 +2239,13 @@ def stop_services(self, service_names: str): def restart_services(self, service_names: str): self.requests.append(('restart', service_names)) - def add_layer(self, - label: str, - layer: typing.Union[str, ops.pebble.LayerDict, ops.pebble.Layer], - *, - combine: bool = False): + def add_layer( + self, + label: str, + layer: typing.Union[str, ops.pebble.LayerDict, ops.pebble.Layer], + *, + combine: bool = False, + ): if isinstance(layer, dict): layer = pebble.Layer(layer).to_yaml() elif isinstance(layer, pebble.Layer): @@ -2137,36 +2269,56 @@ def pull(self, path: str, *, encoding: str = 'utf-8'): return self.responses.pop(0) def push( - self, - path: str, - source: 'ops.pebble._IOSource', - *, - encoding: str = 'utf-8', - make_dirs: bool = False, - permissions: typing.Optional[int] = None, - user_id: typing.Optional[int] = None, - user: typing.Optional[str] = None, - group_id: typing.Optional[int] = None, - group: typing.Optional[str] = None): - self.requests.append(('push', path, source, encoding, make_dirs, permissions, - user_id, user, group_id, group)) + self, + path: str, + source: 'ops.pebble._IOSource', + *, + encoding: str = 'utf-8', + make_dirs: bool = False, + permissions: typing.Optional[int] = None, + user_id: typing.Optional[int] = None, + user: typing.Optional[str] = None, + group_id: typing.Optional[int] = None, + group: typing.Optional[str] = None, + ): + self.requests.append(( + 'push', + path, + source, + encoding, + make_dirs, + permissions, + user_id, + user, + group_id, + group, + )) def list_files(self, path: str, *, pattern: typing.Optional[str] = None, itself: bool = False): self.requests.append(('list_files', path, pattern, itself)) return self.responses.pop(0) def make_dir( - self, - path: str, - *, - make_parents: bool = False, - permissions: typing.Optional[int] = None, - user_id: typing.Optional[int] = None, - user: typing.Optional[str] = None, - group_id: typing.Optional[int] = None, - group: typing.Optional[str] = None): - self.requests.append(('make_dir', path, make_parents, permissions, user_id, user, - group_id, group)) + self, + path: str, + *, + make_parents: bool = False, + permissions: typing.Optional[int] = None, + user_id: typing.Optional[int] = None, + user: typing.Optional[str] = None, + group_id: typing.Optional[int] = None, + group: typing.Optional[str] = None, + ): + self.requests.append(( + 'make_dir', + path, + make_parents, + permissions, + user_id, + user, + group_id, + group, + )) def remove_path(self, path: str, *, recursive: bool = False): self.requests.append(('remove_path', path, recursive)) @@ -2193,19 +2345,21 @@ def model(self, fake_script: FakeScript): meta = ops.CharmMeta() meta.relations = { 'db0': ops.RelationMeta( - ops.RelationRole.provides, 'db0', {'interface': 'db0', 'scope': 'global'}), + ops.RelationRole.provides, 'db0', {'interface': 'db0', 'scope': 'global'} + ), 'db1': ops.RelationMeta( - ops.RelationRole.requires, 'db1', {'interface': 'db1', 'scope': 'global'}), + ops.RelationRole.requires, 'db1', {'interface': 'db1', 'scope': 'global'} + ), 'db2': ops.RelationMeta( - ops.RelationRole.peer, 'db2', {'interface': 'db2', 'scope': 'global'}), + ops.RelationRole.peer, 'db2', {'interface': 'db2', 'scope': 'global'} + ), } backend = _ModelBackend('myapp/0') model = ops.Model(meta, backend) - fake_script.write('relation-ids', - """([ "$1" = db0 ] && echo '["db0:4"]') || echo '[]'""") + fake_script.write('relation-ids', """([ "$1" = db0 ] && echo '["db0:4"]') || echo '[]'""") fake_script.write('relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""") - self.network_get_out = '''{ + self.network_get_out = """{ "bind-addresses": [ { "mac-address": "de:ad:be:ef:ca:fe", @@ -2257,14 +2411,11 @@ def model(self, fake_script: FakeScript): "dead:beef::1", "2001:db8::3" ] -}''' +}""" return model def ensure_relation( - self, - model: ops.Model, - name: str = 'db1', - relation_id: typing.Optional[int] = None + self, model: ops.Model, name: str = 'db1', relation_id: typing.Optional[int] = None ): """Wrapper around model.get_relation that enforces that None is not returned.""" rel_db1 = model.get_relation(name, relation_id) @@ -2290,7 +2441,7 @@ def _check_binding_data(self, binding_name: str, binding: ops.Binding): ipaddress.ip_network('2001:db8::3/128'), ] - for (i, (name, address, subnet)) in enumerate([ + for i, (name, address, subnet) in enumerate([ ('lo', '192.0.2.2', '192.0.2.0/24'), ('lo', 'dead:beef::1', 'dead:beef::/64'), ('tun', '192.0.3.3', '192.0.3.3/32'), @@ -2301,7 +2452,7 @@ def _check_binding_data(self, binding_name: str, binding: ops.Binding): assert binding.network.interfaces[i].address == ipaddress.ip_address(address) assert binding.network.interfaces[i].subnet == ipaddress.ip_network(subnet) - for (i, (name, address, subnet)) in enumerate([ + for i, (name, address, subnet) in enumerate([ ('lo', '192.0.2.2', '192.0.2.0/24'), ('lo', 'dead:beef::1', 'dead:beef::/64'), ('tun', '192.0.3.3', '192.0.3.3/32'), @@ -2321,14 +2472,15 @@ def test_invalid_keys(self, model: ops.Model): def test_dead_relations(self, fake_script: FakeScript, model: ops.Model): fake_script.write( 'network-get', - f''' + f""" if [ "$1" = db0 ] && [ "$2" = --format=json ]; then echo '{self.network_get_out}' else echo ERROR invalid value "$2" for option -r: relation not found >&2 exit 2 fi - ''') + """, + ) # Validate the behavior for dead relations. binding = ops.Binding('db0', 42, model._backend) assert binding.network.bind_address == ipaddress.ip_address('192.0.2.2') @@ -2341,16 +2493,20 @@ def test_broken_relations(self, fake_script: FakeScript): meta = ops.CharmMeta() meta.relations = { 'db0': ops.RelationMeta( - ops.RelationRole.provides, 'db0', {'interface': 'db0', 'scope': 'global'}), + ops.RelationRole.provides, 'db0', {'interface': 'db0', 'scope': 'global'} + ), 'db1': ops.RelationMeta( - ops.RelationRole.requires, 'db1', {'interface': 'db1', 'scope': 'global'}), + ops.RelationRole.requires, 'db1', {'interface': 'db1', 'scope': 'global'} + ), 'db2': ops.RelationMeta( - ops.RelationRole.peer, 'db2', {'interface': 'db2', 'scope': 'global'}), + ops.RelationRole.peer, 'db2', {'interface': 'db2', 'scope': 'global'} + ), } backend = _ModelBackend('myapp/0') model = ops.Model(meta, backend, broken_relation_id=8) - fake_script.write('relation-ids', - """if [ "$1" = "db0" ]; then + fake_script.write( + 'relation-ids', + """if [ "$1" = "db0" ]; then echo '["db0:4"]' elif [ "$1" = "db1" ]; then echo '["db1:8"]' @@ -2359,15 +2515,17 @@ def test_broken_relations(self, fake_script: FakeScript): else echo '[]' fi - """) + """, + ) fake_script.write('relation-list', """echo '""'""") assert model.relations['db0'] assert not model.relations['db1'] assert model.relations['db2'] def test_binding_by_relation_name(self, fake_script: FakeScript, model: ops.Model): - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{self.network_get_out}' || exit 1''') + fake_script.write( + 'network-get', f"""[ "$1" = db0 ] && echo '{self.network_get_out}' || exit 1""" + ) binding_name = 'db0' expected_calls = [['network-get', 'db0', '--format=json']] @@ -2376,8 +2534,9 @@ def test_binding_by_relation_name(self, fake_script: FakeScript, model: ops.Mode assert fake_script.calls(clear=True) == expected_calls def test_binding_by_relation(self, fake_script: FakeScript, model: ops.Model): - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{self.network_get_out}' || exit 1''') + fake_script.write( + 'network-get', f"""[ "$1" = db0 ] && echo '{self.network_get_out}' || exit 1""" + ) binding_name = 'db0' expected_calls = [ ['relation-ids', 'db0', '--format=json'], @@ -2395,25 +2554,16 @@ def test_binding_no_iface_name(self, fake_script: FakeScript, model: ops.Model): { 'mac-address': '', 'interface-name': '', - 'addresses': [ - { - 'hostname': '', - 'value': '10.1.89.35', - 'cidr': '' - } - ] + 'addresses': [{'hostname': '', 'value': '10.1.89.35', 'cidr': ''}], } ], - 'egress-subnets': [ - '10.152.183.158/32' - ], - 'ingress-addresses': [ - '10.152.183.158' - ] + 'egress-subnets': ['10.152.183.158/32'], + 'ingress-addresses': ['10.152.183.158'], } network_get_out = json.dumps(network_get_out_obj) - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{network_get_out}' || exit 1''') + fake_script.write( + 'network-get', f"""[ "$1" = db0 ] && echo '{network_get_out}' || exit 1""" + ) binding_name = 'db0' expected_calls = [['network-get', 'db0', '--format=json']] @@ -2425,37 +2575,35 @@ def test_binding_no_iface_name(self, fake_script: FakeScript, model: ops.Model): def test_missing_bind_addresses(self, fake_script: FakeScript, model: ops.Model): network_data = json.dumps({}) - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{network_data}' || exit 1''') + fake_script.write('network-get', f"""[ "$1" = db0 ] && echo '{network_data}' || exit 1""") binding_name = 'db0' binding = self.ensure_binding(model, self.ensure_relation(model, binding_name)) assert binding.network.interfaces == [] def test_empty_bind_addresses(self, fake_script: FakeScript, model: ops.Model): network_data = json.dumps({'bind-addresses': [{}]}) - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{network_data}' || exit 1''') + fake_script.write('network-get', f"""[ "$1" = db0 ] && echo '{network_data}' || exit 1""") binding_name = 'db0' binding = self.ensure_binding(model, self.ensure_relation(model, binding_name)) assert binding.network.interfaces == [] def test_no_bind_addresses(self, fake_script: FakeScript, model: ops.Model): network_data = json.dumps({'bind-addresses': [{'addresses': None}]}) - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{network_data}' || exit 1''') + fake_script.write('network-get', f"""[ "$1" = db0 ] && echo '{network_data}' || exit 1""") binding_name = 'db0' binding = self.ensure_binding(model, self.ensure_relation(model, binding_name)) assert binding.network.interfaces == [] def test_empty_interface_info(self, fake_script: FakeScript, model: ops.Model): network_data = json.dumps({ - 'bind-addresses': [{ - 'interface-name': 'eth0', - 'addresses': [{}], - }], + 'bind-addresses': [ + { + 'interface-name': 'eth0', + 'addresses': [{}], + } + ], }) - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{network_data}' || exit 1''') + fake_script.write('network-get', f"""[ "$1" = db0 ] && echo '{network_data}' || exit 1""") binding_name = 'db0' binding = self.ensure_binding(model, self.ensure_relation(model, binding_name)) assert len(binding.network.interfaces) == 1 @@ -2467,8 +2615,7 @@ def test_missing_ingress_addresses(self, fake_script: FakeScript, model: ops.Mod network_data = json.dumps({ 'bind-addresses': [], }) - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{network_data}' || exit 1''') + fake_script.write('network-get', f"""[ "$1" = db0 ] && echo '{network_data}' || exit 1""") binding_name = 'db0' binding = self.ensure_binding(model, self.ensure_relation(model, binding_name)) assert binding.network.ingress_addresses == [] @@ -2479,8 +2626,7 @@ def test_missing_egress_subnets(self, fake_script: FakeScript, model: ops.Model) 'bind-addresses': [], 'ingress-addresses': [], }) - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{network_data}' || exit 1''') + fake_script.write('network-get', f"""[ "$1" = db0 ] && echo '{network_data}' || exit 1""") binding_name = 'db0' binding = self.ensure_binding(model, self.ensure_relation(model, binding_name)) assert binding.network.egress_subnets == [] @@ -2489,12 +2635,9 @@ def test_unresolved_ingress_addresses(self, fake_script: FakeScript, model: ops. # sometimes juju fails to resolve an url to an IP, in which case # ingress-addresses will be the 'raw' url instead of an IP. network_data = json.dumps({ - 'ingress-addresses': [ - 'foo.bar.baz.com' - ], + 'ingress-addresses': ['foo.bar.baz.com'], }) - fake_script.write('network-get', - f'''[ "$1" = db0 ] && echo '{network_data}' || exit 1''') + fake_script.write('network-get', f"""[ "$1" = db0 ] && echo '{network_data}' || exit 1""") binding_name = 'db0' binding = self.ensure_binding(model, self.ensure_relation(model, binding_name)) assert binding.network.ingress_addresses == ['foo.bar.baz.com'] @@ -2535,9 +2678,9 @@ def test_relation_get_set_is_app_arg(self): self.backend.relation_get(1, 'fooentity', is_app=is_app_v) # type: ignore def test_is_leader_refresh(self, fake_script: FakeScript): - meta = ops.CharmMeta.from_yaml(''' + meta = ops.CharmMeta.from_yaml(""" name: myapp - ''') + """) model = ops.Model(meta, self.backend) fake_script.write('is-leader', 'echo false') assert not model.unit.is_leader() @@ -2559,47 +2702,56 @@ def test_relation_tool_errors(self, fake_script: FakeScript, monkeypatch: pytest monkeypatch.setenv('JUJU_VERSION', '2.8.0') err_msg = 'ERROR invalid value "$2" for option -r: relation not found' - test_cases = [( - lambda: fake_script.write('relation-list', 'echo fooerror >&2 ; exit 1'), - lambda: self.backend.relation_list(3), - ops.ModelError, - [['relation-list', '-r', '3', '--format=json']], - ), ( - lambda: fake_script.write('relation-list', f'echo {err_msg} >&2 ; exit 2'), - lambda: self.backend.relation_list(3), - ops.RelationNotFoundError, - [['relation-list', '-r', '3', '--format=json']], - ), ( - lambda: fake_script.write('relation-set', 'echo fooerror >&2 ; exit 1'), - lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), - ops.ModelError, - [['relation-set', '-r', '3', '--file', '-']], - ), ( - lambda: fake_script.write('relation-set', f'echo {err_msg} >&2 ; exit 2'), - lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), - ops.RelationNotFoundError, - [['relation-set', '-r', '3', '--file', '-']], - ), ( - lambda: None, - lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=True), - ops.RelationNotFoundError, - [['relation-set', '-r', '3', '--app', '--file', '-']], - ), ( - lambda: fake_script.write('relation-get', 'echo fooerror >&2 ; exit 1'), - lambda: self.backend.relation_get(3, 'remote/0', is_app=False), - ops.ModelError, - [['relation-get', '-r', '3', '-', 'remote/0', '--format=json']], - ), ( - lambda: fake_script.write('relation-get', f'echo {err_msg} >&2 ; exit 2'), - lambda: self.backend.relation_get(3, 'remote/0', is_app=False), - ops.RelationNotFoundError, - [['relation-get', '-r', '3', '-', 'remote/0', '--format=json']], - ), ( - lambda: None, - lambda: self.backend.relation_get(3, 'remote/0', is_app=True), - ops.RelationNotFoundError, - [['relation-get', '-r', '3', '-', 'remote/0', '--app', '--format=json']], - )] + test_cases = [ + ( + lambda: fake_script.write('relation-list', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_list(3), + ops.ModelError, + [['relation-list', '-r', '3', '--format=json']], + ), + ( + lambda: fake_script.write('relation-list', f'echo {err_msg} >&2 ; exit 2'), + lambda: self.backend.relation_list(3), + ops.RelationNotFoundError, + [['relation-list', '-r', '3', '--format=json']], + ), + ( + lambda: fake_script.write('relation-set', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), + ops.ModelError, + [['relation-set', '-r', '3', '--file', '-']], + ), + ( + lambda: fake_script.write('relation-set', f'echo {err_msg} >&2 ; exit 2'), + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False), + ops.RelationNotFoundError, + [['relation-set', '-r', '3', '--file', '-']], + ), + ( + lambda: None, + lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=True), + ops.RelationNotFoundError, + [['relation-set', '-r', '3', '--app', '--file', '-']], + ), + ( + lambda: fake_script.write('relation-get', 'echo fooerror >&2 ; exit 1'), + lambda: self.backend.relation_get(3, 'remote/0', is_app=False), + ops.ModelError, + [['relation-get', '-r', '3', '-', 'remote/0', '--format=json']], + ), + ( + lambda: fake_script.write('relation-get', f'echo {err_msg} >&2 ; exit 2'), + lambda: self.backend.relation_get(3, 'remote/0', is_app=False), + ops.RelationNotFoundError, + [['relation-get', '-r', '3', '-', 'remote/0', '--format=json']], + ), + ( + lambda: None, + lambda: self.backend.relation_get(3, 'remote/0', is_app=True), + ops.RelationNotFoundError, + [['relation-get', '-r', '3', '-', 'remote/0', '--app', '--format=json']], + ), + ] for _, (do_fake, run, exception, calls) in enumerate(test_cases): do_fake() @@ -2607,19 +2759,19 @@ def test_relation_tool_errors(self, fake_script: FakeScript, monkeypatch: pytest run() assert fake_script.calls(clear=True) == calls - @pytest.mark.parametrize("version", ['2.8.0', '2.7.0']) + @pytest.mark.parametrize('version', ['2.8.0', '2.7.0']) def test_relation_get_juju_version_quirks( self, fake_script: FakeScript, monkeypatch: pytest.MonkeyPatch, version: str, ): - fake_script.write('relation-get', '''echo '{"foo": "bar"}' ''') + fake_script.write('relation-get', """echo '{"foo": "bar"}' """) # on 2.7.0+, things proceed as expected monkeypatch.setenv('JUJU_VERSION', version) rel_data = self.backend.relation_get(1, 'foo/0', is_app=True) - assert rel_data == {"foo": "bar"} + assert rel_data == {'foo': 'bar'} calls = [' '.join(i) for i in fake_script.calls(clear=True)] assert calls == ['relation-get -r 1 - foo/0 --app --format=json'] @@ -2629,7 +2781,7 @@ def test_relation_get_juju_version_quirks( self.backend.relation_get(1, 'foo/0', is_app=True) assert fake_script.calls() == [] - @pytest.mark.parametrize("version", ['2.8.0', '2.7.0']) + @pytest.mark.parametrize('version', ['2.8.0', '2.7.0']) def test_relation_set_juju_version_quirks( self, fake_script: FakeScript, @@ -2639,9 +2791,12 @@ def test_relation_set_juju_version_quirks( # on 2.7.0+, things proceed as expected t = tempfile.NamedTemporaryFile() try: - fake_script.write('relation-set', dedent(""" + fake_script.write( + 'relation-set', + dedent(""" cat >> {} - """).format(pathlib.Path(t.name).as_posix())) + """).format(pathlib.Path(t.name).as_posix()), + ) monkeypatch.setenv('JUJU_VERSION', version) self.backend.relation_set(1, 'foo', 'bar', is_app=True) calls = [' '.join(i) for i in fake_script.calls(clear=True)] @@ -2664,8 +2819,8 @@ def test_status_get(self, fake_script: FakeScript): content = '{"message": "", "status": "unknown", "status-data": {}}' fake_script.write('status-get', f"echo '{content}'") s = self.backend.status_get(is_app=False) - assert s['status'] == "unknown" - assert s['message'] == "" + assert s['status'] == 'unknown' + assert s['message'] == '' # taken from actual Juju output content = dedent(""" { @@ -2685,8 +2840,8 @@ def test_status_get(self, fake_script: FakeScript): """) fake_script.write('status-get', f"echo '{content}'") s = self.backend.status_get(is_app=True) - assert s['status'] == "maintenance" - assert s['message'] == "installing" + assert s['status'] == 'maintenance' + assert s['message'] == 'installing' assert fake_script.calls(clear=True) == [ ['status-get', '--include-data', '--application=False', '--format=json'], ['status-get', '--include-data', '--application=True', '--format=json'], @@ -2709,9 +2864,9 @@ def test_status_is_app_forced_kwargs(self, fake_script: FakeScript): def test_local_set_invalid_status(self, fake_script: FakeScript): # juju returns exit code 1 if you ask to set status to 'unknown' or 'error' - meta = ops.CharmMeta.from_yaml(''' + meta = ops.CharmMeta.from_yaml(""" name: myapp - ''') + """) model = ops.Model(meta, self.backend) fake_script.write('status-set', 'exit 1') fake_script.write('is-leader', 'echo true') @@ -2738,37 +2893,37 @@ def test_local_set_invalid_status(self, fake_script: FakeScript): ['status-set', '--application=True', 'error', ''], ] - @pytest.mark.parametrize("name", ["active", "waiting", "blocked", "maintenance", "error"]) + @pytest.mark.parametrize('name', ['active', 'waiting', 'blocked', 'maintenance', 'error']) def test_local_get_status(self, fake_script: FakeScript, name: str): expected_cls = { - "active": ops.ActiveStatus, - "waiting": ops.WaitingStatus, - "blocked": ops.BlockedStatus, - "maintenance": ops.MaintenanceStatus, - "error": ops.ErrorStatus, + 'active': ops.ActiveStatus, + 'waiting': ops.WaitingStatus, + 'blocked': ops.BlockedStatus, + 'maintenance': ops.MaintenanceStatus, + 'error': ops.ErrorStatus, } - meta = ops.CharmMeta.from_yaml(''' + meta = ops.CharmMeta.from_yaml(""" name: myapp - ''') + """) model = ops.Model(meta, self.backend) content = json.dumps({ - "message": "foo", - "status": name, - "status-data": {}, + 'message': 'foo', + 'status': name, + 'status-data': {}, }) fake_script.write('status-get', f"echo '{content}'") assert isinstance(model.unit.status, expected_cls[name]) assert model.unit.status.name == name - assert model.unit.status.message == "foo" + assert model.unit.status.message == 'foo' content = json.dumps({ - "application-status": { - "message": "bar", - "status": name, - "status-data": {}, + 'application-status': { + 'message': 'bar', + 'status': name, + 'status-data': {}, } }) fake_script.write('status-get', f"echo '{content}'") @@ -2776,7 +2931,7 @@ def test_local_get_status(self, fake_script: FakeScript, name: str): assert isinstance(model.app.status, expected_cls[name]) assert model.app.status.name == name - assert model.app.status.message == "bar" + assert model.app.status.message == 'bar' def test_status_set_is_app_not_bool_raises(self): for is_app_v in [None, 1, 2.0, 'a', b'beef', object]: @@ -2787,21 +2942,20 @@ def test_storage_tool_errors(self, fake_script: FakeScript): fake_script.write('storage-list', 'echo fooerror >&2 ; exit 1') with pytest.raises(ops.ModelError): self.backend.storage_list('foobar') - assert fake_script.calls(clear=True) == \ - [['storage-list', 'foobar', '--format=json']] + assert fake_script.calls(clear=True) == [['storage-list', 'foobar', '--format=json']] fake_script.write('storage-get', 'echo fooerror >&2 ; exit 1') with pytest.raises(ops.ModelError): self.backend.storage_get('foobar', 'someattr') - assert fake_script.calls(clear=True) == \ - [['storage-get', '-s', 'foobar', 'someattr', '--format=json']] + assert fake_script.calls(clear=True) == [ + ['storage-get', '-s', 'foobar', 'someattr', '--format=json'] + ] fake_script.write('storage-add', 'echo fooerror >&2 ; exit 1') with pytest.raises(ops.ModelError): self.backend.storage_add('foobar', count=2) - assert fake_script.calls(clear=True) == \ - [['storage-add', 'foobar=2']] + assert fake_script.calls(clear=True) == [['storage-add', 'foobar=2']] fake_script.write('storage-add', 'echo fooerror >&2 ; exit 1') with pytest.raises(TypeError): - self.backend.storage_add('foobar', count=object), # type: ignore + (self.backend.storage_add('foobar', count=object),) # type: ignore assert fake_script.calls(clear=True) == [] fake_script.write('storage-add', 'echo fooerror >&2 ; exit 1') with pytest.raises(TypeError): @@ -2809,7 +2963,7 @@ def test_storage_tool_errors(self, fake_script: FakeScript): assert fake_script.calls(clear=True) == [] def test_network_get(self, fake_script: FakeScript): - network_get_out = '''{ + network_get_out = """{ "bind-addresses": [ { "mac-address": "", @@ -2829,35 +2983,38 @@ def test_network_get(self, fake_script: FakeScript): "ingress-addresses": [ "192.0.2.2" ] -}''' - fake_script.write('network-get', - f'''[ "$1" = deadbeef ] && echo '{network_get_out}' || exit 1''') +}""" + fake_script.write( + 'network-get', f"""[ "$1" = deadbeef ] && echo '{network_get_out}' || exit 1""" + ) network_info = self.backend.network_get('deadbeef') assert network_info == json.loads(network_get_out) - assert fake_script.calls(clear=True) == \ - [['network-get', 'deadbeef', '--format=json']] + assert fake_script.calls(clear=True) == [['network-get', 'deadbeef', '--format=json']] network_info = self.backend.network_get('deadbeef', 1) assert network_info == json.loads(network_get_out) - assert fake_script.calls(clear=True) == \ - [['network-get', 'deadbeef', '-r', '1', '--format=json']] + assert fake_script.calls(clear=True) == [ + ['network-get', 'deadbeef', '-r', '1', '--format=json'] + ] def test_network_get_errors(self, fake_script: FakeScript): err_no_endpoint = 'ERROR no network config found for binding "$2"' err_no_rel = 'ERROR invalid value "$3" for option -r: relation not found' - test_cases = [( - lambda: fake_script.write('network-get', - f'echo {err_no_endpoint} >&2 ; exit 1'), - lambda: self.backend.network_get("deadbeef"), - ops.ModelError, - [['network-get', 'deadbeef', '--format=json']], - ), ( - lambda: fake_script.write('network-get', f'echo {err_no_rel} >&2 ; exit 2'), - lambda: self.backend.network_get("deadbeef", 3), - ops.RelationNotFoundError, - [['network-get', 'deadbeef', '-r', '3', '--format=json']], - )] + test_cases = [ + ( + lambda: fake_script.write('network-get', f'echo {err_no_endpoint} >&2 ; exit 1'), + lambda: self.backend.network_get('deadbeef'), + ops.ModelError, + [['network-get', 'deadbeef', '--format=json']], + ), + ( + lambda: fake_script.write('network-get', f'echo {err_no_rel} >&2 ; exit 2'), + lambda: self.backend.network_get('deadbeef', 3), + ops.RelationNotFoundError, + [['network-get', 'deadbeef', '-r', '3', '--format=json']], + ), + ] for do_fake, run, exception, calls in test_cases: do_fake() with pytest.raises(exception): @@ -2877,15 +3034,16 @@ def test_action_set_error(self, fake_script: FakeScript): fake_script.write('action-set', 'echo fooerror >&2 ; exit 1') with pytest.raises(ops.ModelError): self.backend.action_set(OrderedDict([('foo', 'bar'), ('dead', 'beef cafe')])) - assert sorted(["action-set", "dead=beef cafe", "foo=bar"] - ) == sorted(fake_script.calls(clear=True)[0]) + assert sorted(['action-set', 'dead=beef cafe', 'foo=bar']) == sorted( + fake_script.calls(clear=True)[0] + ) def test_action_log_error(self, fake_script: FakeScript): fake_script.write('action-get', '') fake_script.write('action-log', 'echo fooerror >&2 ; exit 1') with pytest.raises(ops.ModelError): self.backend.action_log('log-message') - calls = [["action-log", "log-message"]] + calls = [['action-log', 'log-message']] assert fake_script.calls(clear=True) == calls def test_action_get(self, fake_script: FakeScript): @@ -2921,8 +3079,9 @@ def test_action_set_more_nested(self, fake_script: FakeScript): fake_script.write('action-get', 'exit 1') fake_script.write('action-set', 'exit 0') self.backend.action_set({'a': {'b': 1, 'c': 2, 'd': {'e': 3}}, 'f': 4}) - assert sorted(['action-set', 'a.b=1', 'a.c=2', 'a.d.e=3', 'f=4'] - ) == sorted(fake_script.calls()[0]) + assert sorted(['action-set', 'a.b=1', 'a.c=2', 'a.d.e=3', 'f=4']) == sorted( + fake_script.calls()[0] + ) def test_action_set_dotted_dict(self, fake_script: FakeScript): fake_script.write('action-get', 'exit 1') @@ -2966,8 +3125,9 @@ def test_application_version_set_invalid(self, fake_script: FakeScript): def test_juju_log(self, fake_script: FakeScript): fake_script.write('juju-log', 'exit 0') self.backend.juju_log('WARNING', 'foo') - assert fake_script.calls(clear=True) == \ - [['juju-log', '--log-level', 'WARNING', '--', 'foo']] + assert fake_script.calls(clear=True) == [ + ['juju-log', '--log-level', 'WARNING', '--', 'foo'] + ] with pytest.raises(TypeError): self.backend.juju_log('DEBUG') # type: ignore @@ -2976,21 +3136,32 @@ def test_juju_log(self, fake_script: FakeScript): fake_script.write('juju-log', 'exit 1') with pytest.raises(ops.ModelError): self.backend.juju_log('BAR', 'foo') - assert fake_script.calls(clear=True) == \ - [['juju-log', '--log-level', 'BAR', '--', 'foo']] + assert fake_script.calls(clear=True) == [['juju-log', '--log-level', 'BAR', '--', 'foo']] def test_valid_metrics(self, fake_script: FakeScript): fake_script.write('add-metric', 'exit 0') - test_cases: typing.List[_ValidMetricsTestCase] = [( - OrderedDict([('foo', 42), ('b-ar', 4.5), ('ba_-z', 4.5), ('a', 1)]), - OrderedDict([('de', 'ad'), ('be', 'ef_ -')]), - [['add-metric', '--labels', 'de=ad,be=ef_ -', - 'foo=42', 'b-ar=4.5', 'ba_-z=4.5', 'a=1']] - ), ( - OrderedDict([('foo1', 0), ('b2r', 4.5)]), - OrderedDict([('d3', 'aд'), ('b33f', '3_ -')]), - [['add-metric', '--labels', 'd3=aд,b33f=3_ -', 'foo1=0', 'b2r=4.5']], - )] + test_cases: typing.List[_ValidMetricsTestCase] = [ + ( + OrderedDict([('foo', 42), ('b-ar', 4.5), ('ba_-z', 4.5), ('a', 1)]), + OrderedDict([('de', 'ad'), ('be', 'ef_ -')]), + [ + [ + 'add-metric', + '--labels', + 'de=ad,be=ef_ -', + 'foo=42', + 'b-ar=4.5', + 'ba_-z=4.5', + 'a=1', + ] + ], + ), + ( + OrderedDict([('foo1', 0), ('b2r', 4.5)]), + OrderedDict([('d3', 'aд'), ('b33f', '3_ -')]), + [['add-metric', '--labels', 'd3=aд,b33f=3_ -', 'foo1=0', 'b2r=4.5']], + ), + ] for metrics, labels, expected_calls in test_cases: self.backend.add_metrics(metrics, labels) assert fake_script.calls(clear=True) == expected_calls @@ -3055,11 +3226,15 @@ def test_relation_remote_app_name_env(self, monkeypatch: pytest.MonkeyPatch): assert self.backend.relation_remote_app_name(5) == 'remoteapp1' def test_relation_remote_app_name_script_success( - self, fake_script: FakeScript, monkeypatch: pytest.MonkeyPatch): + self, fake_script: FakeScript, monkeypatch: pytest.MonkeyPatch + ): # JUJU_RELATION_ID and JUJU_REMOTE_APP both unset - fake_script.write('relation-list', r""" + fake_script.write( + 'relation-list', + r""" echo '"remoteapp2"' -""") +""", + ) assert self.backend.relation_remote_app_name(1) == 'remoteapp2' assert fake_script.calls(clear=True) == [ ['relation-list', '-r', '1', '--app', '--format=json'], @@ -3079,19 +3254,25 @@ def test_relation_remote_app_name_script_success( assert self.backend.relation_remote_app_name(5) == 'remoteapp2' def test_relation_remote_app_name_script_errors(self, fake_script: FakeScript): - fake_script.write('relation-list', r""" + fake_script.write( + 'relation-list', + r""" echo "ERROR invalid value \"6\" for option -r: relation not found" >&2 # NOQA exit 2 -""") +""", + ) assert self.backend.relation_remote_app_name(6) is None assert fake_script.calls(clear=True) == [ ['relation-list', '-r', '6', '--app', '--format=json'], ] - fake_script.write('relation-list', r""" + fake_script.write( + 'relation-list', + r""" echo "ERROR option provided but not defined: --app" >&2 exit 2 -""") +""", + ) assert self.backend.relation_remote_app_name(6) is None assert fake_script.calls(clear=True) == [ ['relation-list', '-r', '6', '--app', '--format=json'], @@ -3099,36 +3280,44 @@ def test_relation_remote_app_name_script_errors(self, fake_script: FakeScript): def test_planned_units(self, fake_script: FakeScript): # no units - fake_script.write('goal-state', """ + fake_script.write( + 'goal-state', + """ echo '{"units":{}, "relations":{}}' -""") +""", + ) assert self.backend.planned_units() == 0 # only active units - fake_script.write('goal-state', """ + fake_script.write( + 'goal-state', + """ echo '{ "units":{ "app/0": {"status":"active","since":"2023-05-23 17:05:05Z"}, "app/1": {"status":"active","since":"2023-05-23 17:57:05Z"} }, "relations": {} -}'""") +}'""", + ) assert self.backend.planned_units() == 2 # active and dying units - fake_script.write('goal-state', """ + fake_script.write( + 'goal-state', + """ echo '{ "units":{ "app/0": {"status":"active","since":"2023-05-23 17:05:05Z"}, "app/1": {"status":"dying","since":"2023-05-23 17:57:05Z"} }, "relations": {} -}'""") +}'""", + ) assert self.backend.planned_units() == 1 class TestLazyMapping: - def test_invalidate(self): loaded: typing.List[int] = [] @@ -3160,24 +3349,40 @@ def test_app_add_secret_simple(self, fake_script: FakeScript, model: ops.Model): assert secret.id == 'secret:123' assert secret.label is None - assert fake_script.calls(clear=True) == [ - ['secret-add', '--owner', 'application', 'foo=x'] - ] + assert fake_script.calls(clear=True) == [['secret-add', '--owner', 'application', 'foo=x']] def test_app_add_secret_args(self, fake_script: FakeScript, model: ops.Model): fake_script.write('secret-add', 'echo secret:234') expire = datetime.datetime(2022, 12, 9, 16, 17, 0) - secret = model.app.add_secret({'foo': 'x', 'bar': 'y'}, label='lbl', description='desc', - expire=expire, rotate=ops.SecretRotate.HOURLY) + secret = model.app.add_secret( + {'foo': 'x', 'bar': 'y'}, + label='lbl', + description='desc', + expire=expire, + rotate=ops.SecretRotate.HOURLY, + ) assert secret.id == 'secret:234' assert secret.label == 'lbl' assert secret.get_content() == {'foo': 'x', 'bar': 'y'} - assert fake_script.calls(clear=True) == \ - [['secret-add', '--label', 'lbl', '--description', 'desc', - '--expire', '2022-12-09T16:17:00', '--rotate', 'hourly', - '--owner', 'application', 'foo=x', 'bar=y']] + assert fake_script.calls(clear=True) == [ + [ + 'secret-add', + '--label', + 'lbl', + '--description', + 'desc', + '--expire', + '2022-12-09T16:17:00', + '--rotate', + 'hourly', + '--owner', + 'application', + 'foo=x', + 'bar=y', + ] + ] def test_unit_add_secret_simple(self, fake_script: FakeScript, model: ops.Model): fake_script.write('secret-add', 'echo secret:345') @@ -3187,9 +3392,7 @@ def test_unit_add_secret_simple(self, fake_script: FakeScript, model: ops.Model) assert secret.id == 'secret:345' assert secret.label is None - assert fake_script.calls(clear=True) == [ - ['secret-add', '--owner', 'unit', 'foo=x'] - ] + assert fake_script.calls(clear=True) == [['secret-add', '--owner', 'unit', 'foo=x']] def test_unit_add_secret_args(self, fake_script: FakeScript, model: ops.Model): fake_script.write('secret-add', 'echo secret:456') @@ -3200,16 +3403,29 @@ def test_unit_add_secret_args(self, fake_script: FakeScript, model: ops.Model): label='l2', description='xyz', expire=expire, - rotate=ops.SecretRotate.YEARLY + rotate=ops.SecretRotate.YEARLY, ) assert secret.id == 'secret:456' assert secret.label == 'l2' assert secret.get_content() == {'foo': 'w', 'bar': 'z'} - assert fake_script.calls(clear=True) == \ - [['secret-add', '--label', 'l2', '--description', 'xyz', - '--expire', '2022-12-09T16:22:00', '--rotate', 'yearly', - '--owner', 'unit', 'foo=w', 'bar=z']] + assert fake_script.calls(clear=True) == [ + [ + 'secret-add', + '--label', + 'l2', + '--description', + 'xyz', + '--expire', + '2022-12-09T16:22:00', + '--rotate', + 'yearly', + '--owner', + 'unit', + 'foo=w', + 'bar=z', + ] + ] def test_unit_add_secret_errors(self, model: ops.Model): # Additional add_secret tests are done in TestApplication @@ -3251,9 +3467,7 @@ def test_get_secret_id(self, fake_script: FakeScript, model: ops.Model): assert secret.label is None assert secret.get_content() == {'foo': 'g'} - assert fake_script.calls(clear=True) == [ - ['secret-get', 'secret:123', '--format=json'] - ] + assert fake_script.calls(clear=True) == [['secret-get', 'secret:123', '--format=json']] def test_get_secret_label(self, fake_script: FakeScript, model: ops.Model): fake_script.write('secret-get', """echo '{"foo": "g"}'""") @@ -3263,9 +3477,7 @@ def test_get_secret_label(self, fake_script: FakeScript, model: ops.Model): assert secret.label == 'lbl' assert secret.get_content() == {'foo': 'g'} - assert fake_script.calls(clear=True) == [ - ['secret-get', '--label', 'lbl', '--format=json'] - ] + assert fake_script.calls(clear=True) == [['secret-get', '--label', 'lbl', '--format=json']] def test_get_secret_id_and_label(self, fake_script: FakeScript, model: ops.Model): fake_script.write('secret-get', """echo '{"foo": "h"}'""") @@ -3347,13 +3559,16 @@ def test_init(self): def test_from_dict(self): utc = datetime.timezone.utc - info = ops.SecretInfo.from_dict('secret:4', { - 'label': 'fromdict', - 'revision': 8, - 'expires': '2022-12-09T14:10:00Z', - 'rotation': 'yearly', - 'rotates': '2023-01-09T14:10:00Z', - }) + info = ops.SecretInfo.from_dict( + 'secret:4', + { + 'label': 'fromdict', + 'revision': 8, + 'expires': '2022-12-09T14:10:00Z', + 'rotation': 'yearly', + 'rotates': '2023-01-09T14:10:00Z', + }, + ) assert info.id == 'secret:4' assert info.label == 'fromdict' assert info.revision == 8 @@ -3361,11 +3576,14 @@ def test_from_dict(self): assert info.rotation == ops.SecretRotate.YEARLY assert info.rotates == datetime.datetime(2023, 1, 9, 14, 10, 0, tzinfo=utc) - info = ops.SecretInfo.from_dict('secret:4', { - 'label': 'fromdict', - 'revision': 8, - 'rotation': 'badvalue', - }) + info = ops.SecretInfo.from_dict( + 'secret:4', + { + 'label': 'fromdict', + 'revision': 8, + 'rotation': 'badvalue', + }, + ) assert info.id == 'secret:4' assert info.label == 'fromdict' assert info.revision == 8 @@ -3432,9 +3650,7 @@ def test_get_content_uncached(self, model: ops.Model, fake_script: FakeScript): content = secret.get_content() assert content == {'foo': 'notcached'} - assert fake_script.calls(clear=True) == [ - ['secret-get', 'secret:z', '--format=json'] - ] + assert fake_script.calls(clear=True) == [['secret-get', 'secret:z', '--format=json']] def test_get_content_copies_dict(self, model: ops.Model, fake_script: FakeScript): fake_script.write('secret-get', """echo '{"foo": "bar"}'""") @@ -3445,9 +3661,7 @@ def test_get_content_copies_dict(self, model: ops.Model, fake_script: FakeScript content['new'] = 'value' assert secret.get_content() == {'foo': 'bar'} - assert fake_script.calls(clear=True) == [ - ['secret-get', 'secret:z', '--format=json'] - ] + assert fake_script.calls(clear=True) == [['secret-get', 'secret:z', '--format=json']] def test_set_content_invalidates_cache(self, model: ops.Model, fake_script: FakeScript): fake_script.write('secret-get', """echo '{"foo": "bar"}'""") @@ -3550,8 +3764,18 @@ def test_set_info(self, model: ops.Model, fake_script: FakeScript): assert secret.id == 'secret:z' assert fake_script.calls(clear=True) == [ - ['secret-set', 'secret:x', '--label', 'lab', '--description', 'desc', - '--expire', '2022-12-09T16:59:00', '--rotate', 'monthly'], + [ + 'secret-set', + 'secret:x', + '--label', + 'lab', + '--description', + 'desc', + '--expire', + '2022-12-09T16:59:00', + '--rotate', + 'monthly', + ], ['secret-info-get', '--label', 'y', '--format=json'], ['secret-set', 'secret:z', '--label', 'lbl'], ] @@ -3734,10 +3958,8 @@ def test_opened_ports(self, fake_script: FakeScript, unit: ops.Unit): ] def test_opened_ports_warnings( - self, - caplog: pytest.LogCaptureFixture, - fake_script: FakeScript, - unit: ops.Unit): + self, caplog: pytest.LogCaptureFixture, fake_script: FakeScript, unit: ops.Unit + ): fake_script.write('opened-ports', """echo 8080/tcp; echo 1234/ftp; echo 1000-2000/udp""") with caplog.at_level(level='WARNING', logger='ops.model'): @@ -3882,12 +4104,10 @@ def get_notice(self, id: str): def test_repr(self): workload = typing.cast(ops.Container, None) n = ops.model.LazyNotice(workload, '123', 'custom', 'example.com/a') - assert repr(n) == \ - "LazyNotice(id='123', type=NoticeType.CUSTOM, key='example.com/a')" + assert repr(n) == "LazyNotice(id='123', type=NoticeType.CUSTOM, key='example.com/a')" n = ops.model.LazyNotice(workload, '123', 'foobar', 'example.com/a') - assert repr(n) == \ - "LazyNotice(id='123', type='foobar', key='example.com/a')" + assert repr(n) == "LazyNotice(id='123', type='foobar', key='example.com/a')" class TestCloudCredential: @@ -3903,12 +4123,8 @@ def test_from_dict(self): def test_from_dict_full(self): d = { 'auth-type': 'certificate', - 'attrs': { - 'client-cert': 'foo', - 'client-key': 'bar', - 'server-cert': 'baz' - }, - 'redacted': ['foo'] + 'attrs': {'client-cert': 'foo', 'client-key': 'bar', 'server-cert': 'baz'}, + 'redacted': ['foo'], } cloud_cred = ops.CloudCredential.from_dict(d) assert cloud_cred.auth_type == d['auth-type'] @@ -3918,12 +4134,10 @@ def test_from_dict_full(self): class TestCloudSpec: def test_from_dict(self): - cloud_spec = ops.CloudSpec.from_dict( - { - 'type': 'lxd', - 'name': 'localhost', - } - ) + cloud_spec = ops.CloudSpec.from_dict({ + 'type': 'lxd', + 'name': 'localhost', + }) assert cloud_spec.type == 'lxd' assert cloud_spec.name == 'localhost' assert cloud_spec.region is None @@ -3938,12 +4152,8 @@ def test_from_dict(self): def test_from_dict_full(self): cred = { 'auth-type': 'certificate', - 'attrs': { - 'client-cert': 'foo', - 'client-key': 'bar', - 'server-cert': 'baz' - }, - 'redacted': ['foo'] + 'attrs': {'client-cert': 'foo', 'client-key': 'bar', 'server-cert': 'baz'}, + 'redacted': ['foo'], } d = { 'type': 'lxd', @@ -4008,12 +4218,12 @@ def test_success(self, fake_script: FakeScript, model: ops.Model): def test_error(self, fake_script: FakeScript, model: ops.Model): fake_script.write( - 'credential-get', - """echo 'ERROR cannot access cloud credentials' >&2; exit 1""") + 'credential-get', """echo 'ERROR cannot access cloud credentials' >&2; exit 1""" + ) with pytest.raises(ops.ModelError) as excinfo: model.get_cloud_spec() assert str(excinfo.value) == 'ERROR cannot access cloud credentials\n' -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/test/test_pebble.py b/test/test_pebble.py index 7dda0c739..5ac5d7de2 100644 --- a/test/test_pebble.py +++ b/test/test_pebble.py @@ -20,7 +20,6 @@ import signal import socket import tempfile -import test.fake_pebble as fake_pebble import typing import unittest import unittest.mock @@ -29,6 +28,7 @@ import pytest import websocket # type: ignore +import test.fake_pebble as fake_pebble from ops import pebble from ops._private import yaml @@ -76,14 +76,12 @@ def test_path_error(self): def test_api_error(self): body = { - "result": { - "message": "no services to start provided" - }, - "status": "Bad Request", - "status-code": 400, - "type": "error" + 'result': {'message': 'no services to start provided'}, + 'status': 'Bad Request', + 'status-code': 400, + 'type': 'error', } - error = pebble.APIError(body, 400, "Bad Request", "no services") + error = pebble.APIError(body, 400, 'Bad Request', 'no services') assert isinstance(error, pebble.Error) assert error.body == body assert error.code == 400 @@ -158,13 +156,16 @@ def test_change_error_with_task_logs(self): assert isinstance(error, pebble.Error) assert error.err == 'Some error' assert error.change == change - assert str(error) == """Some error + assert ( + str(error) + == """Some error ----- Logs from task 0 ----- LINE1 LINE2 ----- Logs from task 2 ----- single log -----""" + ) def test_warning_state(self): assert list(pebble.WarningState) == [ @@ -285,18 +286,18 @@ def test_task_init(self): def test_task_from_dict(self): d: pebble._TaskDict = { - "id": "78", - "kind": "start", - "progress": { - "done": 1, - "label": "", - "total": 1, + 'id': '78', + 'kind': 'start', + 'progress': { + 'done': 1, + 'label': '', + 'total': 1, }, - "ready-time": "2021-01-28T14:37:03.270218778+13:00", - "spawn-time": "2021-01-28T14:37:02.247158162+13:00", - "status": "Done", - "summary": 'Start service "svc"', - "data": {"exit-code": 42}, + 'ready-time': '2021-01-28T14:37:03.270218778+13:00', + 'spawn-time': '2021-01-28T14:37:02.247158162+13:00', + 'status': 'Done', + 'summary': 'Start service "svc"', + 'data': {'exit-code': 42}, } task = pebble.Task.from_dict(d) assert task.id == '78' @@ -345,17 +346,17 @@ def test_change_init(self): assert change.data == {} def test_change_from_dict(self): - d: 'pebble._ChangeDict' = { - "id": "70", - "kind": "autostart", - "err": "SILLY", - "ready": True, - "ready-time": "2021-01-28T14:37:04.291517768+13:00", - "spawn-time": "2021-01-28T14:37:02.247202105+13:00", - "status": "Done", - "summary": 'Autostart service "svc"', - "tasks": [], - "data": {"exit-code": 42}, + d: pebble._ChangeDict = { + 'id': '70', + 'kind': 'autostart', + 'err': 'SILLY', + 'ready': True, + 'ready-time': '2021-01-28T14:37:04.291517768+13:00', + 'spawn-time': '2021-01-28T14:37:02.247202105+13:00', + 'status': 'Done', + 'summary': 'Autostart service "svc"', + 'tasks': [], + 'data': {'exit-code': 42}, } change = pebble.Change.from_dict(d) assert change.id == '70' @@ -394,9 +395,18 @@ def test_file_type(self): assert pebble.FileType.UNKNOWN.value == 'unknown' def test_file_info_init(self): - info = pebble.FileInfo('/etc/hosts', 'hosts', pebble.FileType.FILE, 123, 0o644, - datetime_nzdt(2021, 1, 28, 14, 37, 4, 291518), - 12, 'bob', 34, 'staff') + info = pebble.FileInfo( + '/etc/hosts', + 'hosts', + pebble.FileType.FILE, + 123, + 0o644, + datetime_nzdt(2021, 1, 28, 14, 37, 4, 291518), + 12, + 'bob', + 34, + 'staff', + ) assert info.path == '/etc/hosts' assert info.name == 'hosts' assert info.type == pebble.FileType.FILE @@ -536,7 +546,7 @@ def test_log_targets(self): plan = pebble.Plan('') assert plan.log_targets == {} - location = "https://example.com:3100/loki/api/v1/push" + location = 'https://example.com:3100/loki/api/v1/push' plan = pebble.Plan(f""" log-targets: baz: @@ -548,7 +558,7 @@ def test_log_targets(self): assert len(plan.log_targets) == 1 assert plan.log_targets['baz'].name == 'baz' assert plan.log_targets['baz'].override == 'replace' - assert plan.log_targets['baz'].type == "loki" + assert plan.log_targets['baz'].type == 'loki' assert plan.log_targets['baz'].location == location # Should be read-only ("can't set attribute") @@ -562,7 +572,7 @@ def test_yaml(self): assert str(plan) == '{}\n' # With a service, we return validated yaml content. - raw = '''\ + raw = """\ services: foo: override: replace @@ -578,7 +588,7 @@ def test_yaml(self): override: replace type: loki location: https://example.com:3100/loki/api/v1/push -''' +""" plan = pebble.Plan(raw) reformed = yaml.safe_dump(yaml.safe_load(raw)) assert plan.to_yaml() == reformed @@ -593,22 +603,22 @@ def test_plandict(self): # With a service, we return validated yaml content. raw: pebble.PlanDict = { - "services": { - "foo": { - "override": "replace", - "command": "echo foo", + 'services': { + 'foo': { + 'override': 'replace', + 'command': 'echo foo', }, }, - "checks": { - "bar": { - "http": {"url": "https://example.com/"}, + 'checks': { + 'bar': { + 'http': {'url': 'https://example.com/'}, }, }, - "log-targets": { - "baz": { - "override": "replace", - "type": "loki", - "location": "https://example.com:3100/loki/api/v1/push", + 'log-targets': { + 'baz': { + 'override': 'replace', + 'type': 'loki', + 'location': 'https://example.com:3100/loki/api/v1/push', }, }, } @@ -623,52 +633,48 @@ def test_service_equality(self): command: echo foo """) - old_service = pebble.Service(name="foo", - raw={ - "override": "replace", - "command": "echo foo" - }) - old_services = {"foo": old_service} + old_service = pebble.Service( + name='foo', raw={'override': 'replace', 'command': 'echo foo'} + ) + old_services = {'foo': old_service} assert plan.services == old_services - services_as_dict = { - "foo": {"override": "replace", "command": "echo foo"} - } + services_as_dict = {'foo': {'override': 'replace', 'command': 'echo foo'}} assert plan.services == services_as_dict def test_plan_equality(self): - plan1 = pebble.Plan(''' + plan1 = pebble.Plan(""" services: foo: override: replace command: echo foo -''') - assert plan1 != "foo" - plan2 = pebble.Plan(''' +""") + assert plan1 != 'foo' + plan2 = pebble.Plan(""" services: foo: command: echo foo override: replace -''') +""") assert plan1 == plan2 plan1_as_dict = { - "services": { - "foo": { - "command": "echo foo", - "override": "replace", + 'services': { + 'foo': { + 'command': 'echo foo', + 'override': 'replace', }, }, } assert plan1 == plan1_as_dict - plan3 = pebble.Plan(''' + plan3 = pebble.Plan(""" services: foo: override: replace command: echo bar -''') +""") # Different command. assert plan1 != plan3 - plan4 = pebble.Plan(''' + plan4 = pebble.Plan(""" services: foo: override: replace @@ -684,8 +690,8 @@ def test_plan_equality(self): override: replace type: loki location: https://example.com:3100/loki/api/v1/push -''') - plan5 = pebble.Plan(''' +""") + plan5 = pebble.Plan(""" services: foo: override: replace @@ -701,10 +707,10 @@ def test_plan_equality(self): override: replace type: loki location: https://example.com:3100/loki/api/v1/push -''') +""") # Different checks.bar.http assert plan4 != plan5 - plan6 = pebble.Plan(''' + plan6 = pebble.Plan(""" services: foo: override: replace @@ -720,10 +726,10 @@ def test_plan_equality(self): override: replace type: loki location: https://example.com:3200/loki/api/v1/push -''') +""") # Reordered elements. assert plan4 != plan6 - plan7 = pebble.Plan(''' + plan7 = pebble.Plan(""" services: foo: command: echo foo @@ -740,7 +746,7 @@ def test_plan_equality(self): http: https://example.com/ -''') +""") # Reordered sections. assert plan4 == plan7 @@ -783,9 +789,9 @@ def test_dict(self): 'services': ['foo'], 'labels': { 'key': 'value $VAR', - } + }, }, - } + }, } s = pebble.Layer(d) assert s.summary == 'Sum Mary' @@ -844,8 +850,7 @@ def test_yaml(self): assert s.services['bar'].name == 'bar' assert s.services['bar'].summary == 'Bar' assert s.services['bar'].command == 'echo bar' - assert s.services['bar'].environment == \ - {'ENV1': 'value1', 'ENV2': 'value2'} + assert s.services['bar'].environment == {'ENV1': 'value1', 'ENV2': 'value2'} assert s.services['bar'].user == 'bob' assert s.services['bar'].user_id == 1000 assert s.services['bar'].group == 'staff' @@ -877,7 +882,7 @@ def test_layer_service_equality(self): 'summary': 'Bar', 'command': 'echo bar', }, - } + }, } s = pebble.Layer(d) t = pebble.Layer(d) @@ -900,7 +905,7 @@ def test_layer_equality(self): 'summary': 'Bar', 'command': 'echo bar', }, - } + }, } t = pebble.Layer(d) assert s != t @@ -1029,8 +1034,8 @@ def test_equality(self): 'group': 'staff', 'group-id': 2000, } - one = pebble.Service("Name 1", d) - two = pebble.Service("Name 1", d) + one = pebble.Service('Name 1', d) + two = pebble.Service('Name 1', d) assert one == two as_dict = { @@ -1157,7 +1162,7 @@ def test_dict(self): 'type': 'loki', 'location': 'https://example.com:3100/loki/api/v1/push', 'services': ['+all'], - 'labels': {'key': 'val', 'key2': 'val2'} + 'labels': {'key': 'val', 'key2': 'val2'}, } target = pebble.LogTarget('tgt', d) assert target.name == 'tgt' @@ -1183,7 +1188,7 @@ def test_equality(self): 'type': 'loki', 'location': 'https://example.com', 'services': ['foo', 'bar'], - 'labels': {'k': 'v'} + 'labels': {'k': 'v'}, } one = pebble.LogTarget('one', d) two = pebble.LogTarget('two', d) @@ -1222,10 +1227,7 @@ def test_service_info(self): assert s.startup == pebble.ServiceStartup.ENABLED assert s.current == pebble.ServiceStatus.ACTIVE - s = pebble.ServiceInfo( - 'svc1', - pebble.ServiceStartup.ENABLED, - pebble.ServiceStatus.ACTIVE) + s = pebble.ServiceInfo('svc1', pebble.ServiceStartup.ENABLED, pebble.ServiceStatus.ACTIVE) assert s.name == 'svc1' assert s.startup == pebble.ServiceStartup.ENABLED assert s.current == pebble.ServiceStatus.ACTIVE @@ -1345,12 +1347,13 @@ def __init__(self): self.timeout = 5 self.websockets: typing.Dict[typing.Any, MockWebsocket] = {} - def _request(self, - method: str, - path: str, - query: typing.Optional[typing.Dict[str, typing.Any]] = None, - body: typing.Optional[typing.Dict[str, typing.Any]] = None - ) -> typing.Dict[str, typing.Any]: + def _request( + self, + method: str, + path: str, + query: typing.Optional[typing.Dict[str, typing.Any]] = None, + body: typing.Optional[typing.Dict[str, typing.Any]] = None, + ) -> typing.Dict[str, typing.Any]: self.requests.append((method, path, query, body)) resp = self.responses.pop(0) if isinstance(resp, Exception): @@ -1359,12 +1362,14 @@ def _request(self, resp = resp() return resp - def _request_raw(self, - method: str, - path: str, - query: typing.Optional[typing.Dict[str, typing.Any]] = None, - headers: typing.Optional[typing.Dict[str, str]] = None, - data: typing.Optional[typing.Union[bytes, _bytes_generator]] = None): + def _request_raw( + self, + method: str, + path: str, + query: typing.Optional[typing.Dict[str, typing.Any]] = None, + headers: typing.Optional[typing.Dict[str, str]] = None, + data: typing.Optional[typing.Union[bytes, _bytes_generator]] = None, + ): self.requests.append((method, path, query, headers, data)) headers, body = self.responses.pop(0) assert headers is not None @@ -1402,45 +1407,46 @@ def sleep(self, delay: float): def build_mock_change_dict(change_id: str = '70') -> 'pebble._ChangeDict': return { - "id": change_id, - "kind": "autostart", - "ready": True, - "ready-time": "2021-01-28T14:37:04.291517768+13:00", - "spawn-time": "2021-01-28T14:37:02.247202105+13:00", - "status": "Done", - "summary": 'Autostart service "svc"', - "tasks": [ + 'id': change_id, + 'kind': 'autostart', + 'ready': True, + 'ready-time': '2021-01-28T14:37:04.291517768+13:00', + 'spawn-time': '2021-01-28T14:37:02.247202105+13:00', + 'status': 'Done', + 'summary': 'Autostart service "svc"', + 'tasks': [ { - "id": "78", - "kind": "start", - "progress": { - "done": 1, - "label": "", - "total": 1, - "extra-field": "foo", # type: ignore + 'id': '78', + 'kind': 'start', + 'progress': { + 'done': 1, + 'label': '', + 'total': 1, + 'extra-field': 'foo', # type: ignore }, - "ready-time": "2021-01-28T14:37:03.270218778+13:00", - "spawn-time": "2021-01-28T14:37:02.247158162+13:00", - "status": "Done", - "summary": 'Start service "svc"', - "extra-field": "foo", + 'ready-time': '2021-01-28T14:37:03.270218778+13:00', + 'spawn-time': '2021-01-28T14:37:02.247158162+13:00', + 'status': 'Done', + 'summary': 'Start service "svc"', + 'extra-field': 'foo', }, ], - "extra-field": "foo", + 'extra-field': 'foo', } class MultipartParserTestCase: def __init__( - self, - name: str, - data: bytes, - want_headers: typing.List[bytes], - want_bodies: typing.List[bytes], - want_bodies_done: typing.List[bool], - max_boundary: int = 14, - max_lookahead: int = 8 * 1024, - error: str = ''): + self, + name: str, + data: bytes, + want_headers: typing.List[bytes], + want_bodies: typing.List[bytes], + want_bodies_done: typing.List[bool], + max_boundary: int = 14, + max_lookahead: int = 8 * 1024, + error: str = '', + ): self.name = name self.data = data self.want_headers = want_headers @@ -1452,89 +1458,92 @@ def __init__( class TestMultipartParser: - @pytest.mark.parametrize("test", [ - MultipartParserTestCase( - 'baseline', - b'\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\nfoo bar\r\n--qwerty--\r\n', - [b'header foo\r\n\r\n'], - [b'foo bar\nfoo bar'], - want_bodies_done=[True], - ), - MultipartParserTestCase( - 'incomplete header', - b'\r\n--qwerty\r\nheader foo\r\n', - [], - [], - want_bodies_done=[], - ), - MultipartParserTestCase( - 'missing header', - b'\r\n--qwerty\r\nheader foo\r\n' + 40 * b' ', - [], - [], - want_bodies_done=[], - max_lookahead=40, - error='header terminator not found', - ), - MultipartParserTestCase( - 'incomplete body terminator', - b'\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\rhello my name is joe and I work in a button factory', # noqa - [b'header foo\r\n\r\n'], - [b'foo bar\r\n--qwerty\rhello my name is joe and I work in a '], - want_bodies_done=[False], - ), - MultipartParserTestCase( - 'empty body', - b'\r\n--qwerty\r\nheader foo\r\n\r\n\r\n--qwerty\r\n', - [b'header foo\r\n\r\n'], - [b''], - want_bodies_done=[True], - ), - MultipartParserTestCase( - 'ignore leading garbage', - b'hello my name is joe\r\n\n\n\n\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\r\n', # noqa - [b'header foo\r\n\r\n'], - [b'foo bar'], - want_bodies_done=[True], - ), - MultipartParserTestCase( - 'ignore trailing garbage', - b'\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\r\nhello my name is joe', - [b'header foo\r\n\r\n'], - [b'foo bar'], - want_bodies_done=[True], - ), - MultipartParserTestCase( - 'boundary allow linear whitespace', - b'\r\n--qwerty \t \r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\r\n', - [b'header foo\r\n\r\n'], - [b'foo bar'], - want_bodies_done=[True], - max_boundary=20, - ), - MultipartParserTestCase( - 'terminal boundary allow linear whitespace', - b'\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\r\n--qwerty-- \t \r\n', - [b'header foo\r\n\r\n'], - [b'foo bar'], - want_bodies_done=[True], - max_boundary=20, - ), - MultipartParserTestCase( - 'multiple parts', - b'\r\n--qwerty \t \r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\r\nheader bar\r\n\r\nfoo baz\r\n--qwerty--\r\n', # noqa - [b'header foo\r\n\r\n', b'header bar\r\n\r\n'], - [b'foo bar', b'foo baz'], - want_bodies_done=[True, True], - ), - MultipartParserTestCase( - 'ignore after terminal boundary', - b'\r\n--qwerty \t \r\nheader foo\r\n\r\nfoo bar\r\n--qwerty--\r\nheader bar\r\n\r\nfoo baz\r\n--qwerty--\r\n', # noqa - [b'header foo\r\n\r\n'], - [b'foo bar'], - want_bodies_done=[True], - ), - ]) + @pytest.mark.parametrize( + 'test', + [ + MultipartParserTestCase( + 'baseline', + b'\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\nfoo bar\r\n--qwerty--\r\n', + [b'header foo\r\n\r\n'], + [b'foo bar\nfoo bar'], + want_bodies_done=[True], + ), + MultipartParserTestCase( + 'incomplete header', + b'\r\n--qwerty\r\nheader foo\r\n', + [], + [], + want_bodies_done=[], + ), + MultipartParserTestCase( + 'missing header', + b'\r\n--qwerty\r\nheader foo\r\n' + 40 * b' ', + [], + [], + want_bodies_done=[], + max_lookahead=40, + error='header terminator not found', + ), + MultipartParserTestCase( + 'incomplete body terminator', + b'\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\rhello my name is joe and I work in a button factory', # noqa + [b'header foo\r\n\r\n'], + [b'foo bar\r\n--qwerty\rhello my name is joe and I work in a '], + want_bodies_done=[False], + ), + MultipartParserTestCase( + 'empty body', + b'\r\n--qwerty\r\nheader foo\r\n\r\n\r\n--qwerty\r\n', + [b'header foo\r\n\r\n'], + [b''], + want_bodies_done=[True], + ), + MultipartParserTestCase( + 'ignore leading garbage', + b'hello my name is joe\r\n\n\n\n\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\r\n', # noqa + [b'header foo\r\n\r\n'], + [b'foo bar'], + want_bodies_done=[True], + ), + MultipartParserTestCase( + 'ignore trailing garbage', + b'\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\r\nhello my name is joe', + [b'header foo\r\n\r\n'], + [b'foo bar'], + want_bodies_done=[True], + ), + MultipartParserTestCase( + 'boundary allow linear whitespace', + b'\r\n--qwerty \t \r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\r\n', + [b'header foo\r\n\r\n'], + [b'foo bar'], + want_bodies_done=[True], + max_boundary=20, + ), + MultipartParserTestCase( + 'terminal boundary allow linear whitespace', + b'\r\n--qwerty\r\nheader foo\r\n\r\nfoo bar\r\n--qwerty-- \t \r\n', + [b'header foo\r\n\r\n'], + [b'foo bar'], + want_bodies_done=[True], + max_boundary=20, + ), + MultipartParserTestCase( + 'multiple parts', + b'\r\n--qwerty \t \r\nheader foo\r\n\r\nfoo bar\r\n--qwerty\r\nheader bar\r\n\r\nfoo baz\r\n--qwerty--\r\n', # noqa + [b'header foo\r\n\r\n', b'header bar\r\n\r\n'], + [b'foo bar', b'foo baz'], + want_bodies_done=[True, True], + ), + MultipartParserTestCase( + 'ignore after terminal boundary', + b'\r\n--qwerty \t \r\nheader foo\r\n\r\nfoo bar\r\n--qwerty--\r\nheader bar\r\n\r\nfoo baz\r\n--qwerty--\r\n', # noqa + [b'header foo\r\n\r\n'], + [b'foo bar'], + want_bodies_done=[True], + ), + ], + ) def test_multipart_parser(self, test: MultipartParserTestCase): chunk_sizes = [1, 2, 3, 4, 5, 7, 13, 17, 19, 23, 29, 31, 37, 42, 50, 100, 1000] marker = b'qwerty' @@ -1560,7 +1569,8 @@ def handle_body(data: bytes, done: bool = False): handle_header, handle_body, max_boundary_length=test.max_boundary, - max_lookahead=test.max_lookahead) + max_lookahead=test.max_lookahead, + ) src = io.BytesIO(test.data) try: @@ -1604,13 +1614,13 @@ def test_client_init(self): def test_get_system_info(self, client: MockClient): client.responses.append({ - "result": { - "version": "1.2.3", - "extra-field": "foo", + 'result': { + 'version': '1.2.3', + 'extra-field': 'foo', }, - "status": "OK", - "status-code": 200, - "type": "sync" + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) info = client.get_system_info() assert info.version == '1.2.3' @@ -1620,10 +1630,10 @@ def test_get_system_info(self, client: MockClient): def test_get_warnings(self, client: MockClient): empty: typing.Dict[str, typing.Any] = { - "result": [], - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': [], + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', } client.responses.append(empty) warnings = client.get_warnings() @@ -1639,19 +1649,19 @@ def test_get_warnings(self, client: MockClient): ] def test_ack_warnings(self, client: MockClient): - client.responses.append({ - "result": 0, - "status": "OK", - "status-code": 200, - "type": "sync" - }) + client.responses.append({'result': 0, 'status': 'OK', 'status-code': 200, 'type': 'sync'}) num = client.ack_warnings(datetime_nzdt(2021, 1, 28, 15, 11, 0)) assert num == 0 assert client.requests == [ - ('POST', '/v1/warnings', None, { - 'action': 'okay', - 'timestamp': '2021-01-28T15:11:00+13:00', - }), + ( + 'POST', + '/v1/warnings', + None, + { + 'action': 'okay', + 'timestamp': '2021-01-28T15:11:00+13:00', + }, + ), ] def assert_mock_change(self, change: pebble.Change): @@ -1668,10 +1678,8 @@ def assert_mock_change(self, change: pebble.Change): assert change.tasks[0].progress.done == 1 assert change.tasks[0].progress.label == '' assert change.tasks[0].progress.total == 1 - assert change.tasks[0].ready_time == \ - datetime_nzdt(2021, 1, 28, 14, 37, 3, 270219) - assert change.tasks[0].spawn_time == \ - datetime_nzdt(2021, 1, 28, 14, 37, 2, 247158) + assert change.tasks[0].ready_time == datetime_nzdt(2021, 1, 28, 14, 37, 3, 270219) + assert change.tasks[0].spawn_time == datetime_nzdt(2021, 1, 28, 14, 37, 2, 247158) assert change.ready assert change.err is None assert change.ready_time == datetime_nzdt(2021, 1, 28, 14, 37, 4, 291518) @@ -1679,10 +1687,10 @@ def assert_mock_change(self, change: pebble.Change): def test_get_changes(self, client: MockClient): empty: typing.Dict[str, typing.Any] = { - "result": [], - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': [], + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', } client.responses.append(empty) changes = client.get_changes() @@ -1697,12 +1705,12 @@ def test_get_changes(self, client: MockClient): assert changes == [] client.responses.append({ - "result": [ + 'result': [ build_mock_change_dict(), ], - "status": "OK", - "status-code": 200, - "type": "sync" + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) changes = client.get_changes() assert len(changes) == 1 @@ -1717,10 +1725,10 @@ def test_get_changes(self, client: MockClient): def test_get_change(self, client: MockClient): client.responses.append({ - "result": build_mock_change_dict(), - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': build_mock_change_dict(), + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) change = client.get_change(pebble.ChangeID('70')) self.assert_mock_change(change) @@ -1730,10 +1738,10 @@ def test_get_change(self, client: MockClient): def test_get_change_str(self, client: MockClient): client.responses.append({ - "result": build_mock_change_dict(), - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': build_mock_change_dict(), + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) change = client.get_change('70') # type: ignore self.assert_mock_change(change) @@ -1743,10 +1751,10 @@ def test_get_change_str(self, client: MockClient): def test_abort_change(self, client: MockClient): client.responses.append({ - "result": build_mock_change_dict(), - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': build_mock_change_dict(), + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) change = client.abort_change(pebble.ChangeID('70')) self.assert_mock_change(change) @@ -1755,25 +1763,26 @@ def test_abort_change(self, client: MockClient): ] def _services_action_helper( - self, - client: MockClient, - action: str, - api_func: typing.Callable[[], str], - services: typing.List[str]): + self, + client: MockClient, + action: str, + api_func: typing.Callable[[], str], + services: typing.List[str], + ): client.responses.append({ - "change": "70", - "result": None, - "status": "Accepted", - "status-code": 202, - "type": "async" + 'change': '70', + 'result': None, + 'status': 'Accepted', + 'status-code': 202, + 'type': 'async', }) change = build_mock_change_dict() change['ready'] = True client.responses.append({ - "result": change, - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': change, + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) change_id = api_func() assert change_id == '70' @@ -1790,11 +1799,11 @@ def _services_action_async_helper( services: typing.List[str], ): client.responses.append({ - "change": "70", - "result": None, - "status": "Accepted", - "status-code": 202, - "type": "async", + 'change': '70', + 'result': None, + 'status': 'Accepted', + 'status-code': 202, + 'type': 'async', }) change_id = api_func(timeout=0) assert change_id == '70' @@ -1817,6 +1826,7 @@ def test_replan_services_async(self, client: MockClient): def test_start_services(self, client: MockClient): def api_func(): return client.start_services(['svc']) + self._services_action_helper(client, 'start', api_func, ['svc']) with pytest.raises(TypeError): @@ -1831,11 +1841,13 @@ def api_func(): def test_start_services_async(self, client: MockClient): def api_func(timeout: float = 30): return client.start_services(['svc'], timeout=timeout) + self._services_action_async_helper(client, 'start', api_func, ['svc']) def test_stop_services(self, client: MockClient): def api_func(): return client.stop_services(['svc']) + self._services_action_helper(client, 'stop', api_func, ['svc']) with pytest.raises(TypeError): @@ -1850,11 +1862,13 @@ def api_func(): def test_stop_services_async(self, client: MockClient): def api_func(timeout: float = 30): return client.stop_services(['svc'], timeout=timeout) + self._services_action_async_helper(client, 'stop', api_func, ['svc']) def test_restart_services(self, client: MockClient): def api_func(): return client.restart_services(['svc']) + self._services_action_helper(client, 'restart', api_func, ['svc']) with pytest.raises(TypeError): @@ -1869,23 +1883,24 @@ def api_func(): def test_restart_services_async(self, client: MockClient): def api_func(timeout: float = 30): return client.restart_services(['svc'], timeout=timeout) + self._services_action_async_helper(client, 'restart', api_func, ['svc']) def test_change_error(self, client: MockClient): client.responses.append({ - "change": "70", - "result": None, - "status": "Accepted", - "status-code": 202, - "type": "async" + 'change': '70', + 'result': None, + 'status': 'Accepted', + 'status-code': 202, + 'type': 'async', }) change = build_mock_change_dict() change['err'] = 'Some kind of service error' client.responses.append({ - "result": change, - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': change, + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) with pytest.raises(pebble.ChangeError) as excinfo: client.autostart_services() @@ -1902,10 +1917,10 @@ def test_change_error(self, client: MockClient): def test_wait_change_success(self, client: MockClient, timeout: typing.Optional[float] = 30.0): change = build_mock_change_dict() client.responses.append({ - "result": change, - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': change, + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) response = client.wait_change(pebble.ChangeID('70'), timeout=timeout) @@ -1922,16 +1937,16 @@ def test_wait_change_success_timeout_none(self, client: MockClient): def test_wait_change_success_multiple_calls(self, client: MockClient, time: MockTime): def timeout_response(n: float): time.sleep(n) # simulate passing of time due to wait_change call - raise pebble.APIError({}, 504, "Gateway Timeout", "timed out") + raise pebble.APIError({}, 504, 'Gateway Timeout', 'timed out') client.responses.append(lambda: timeout_response(4)) change = build_mock_change_dict() client.responses.append({ - "result": change, - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': change, + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) response = client.wait_change(pebble.ChangeID('70')) @@ -1952,16 +1967,15 @@ def test_wait_change_success_polled( timeout: typing.Optional[float] = 30.0, ): # Trigger polled mode - client.responses.append(pebble.APIError({}, 404, "Not Found", "not found")) - + client.responses.append(pebble.APIError({}, 404, 'Not Found', 'not found')) for i in range(3): change = build_mock_change_dict() change['ready'] = i == 2 client.responses.append({ - "result": change, - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': change, + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) response = client.wait_change(pebble.ChangeID('70'), timeout=timeout, delay=1) @@ -1983,7 +1997,7 @@ def test_wait_change_success_polled_timeout_none(self, client: MockClient, time: def test_wait_change_timeout(self, client: MockClient, time: MockTime): def timeout_response(n: float): time.sleep(n) # simulate passing of time due to wait_change call - raise pebble.APIError({}, 504, "Gateway Timeout", "timed out") + raise pebble.APIError({}, 504, 'Gateway Timeout', 'timed out') client.responses.append(lambda: timeout_response(4)) client.responses.append(lambda: timeout_response(2)) @@ -2002,16 +2016,16 @@ def timeout_response(n: float): def test_wait_change_timeout_polled(self, client: MockClient, time: MockTime): # Trigger polled mode - client.responses.append(pebble.APIError({}, 404, "Not Found", "not found")) + client.responses.append(pebble.APIError({}, 404, 'Not Found', 'not found')) change = build_mock_change_dict() change['ready'] = False for _ in range(3): client.responses.append({ - "result": change, - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': change, + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) with pytest.raises(pebble.TimeoutError) as excinfo: @@ -2032,10 +2046,10 @@ def test_wait_change_error(self, client: MockClient): change = build_mock_change_dict() change['err'] = 'Some kind of service error' client.responses.append({ - "result": change, - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': change, + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) # wait_change() itself shouldn't raise an error response = client.wait_change(pebble.ChangeID('70')) @@ -2049,7 +2063,7 @@ def test_wait_change_error(self, client: MockClient): def test_wait_change_socket_timeout(self, client: MockClient, time: MockTime): def timeout_response(n: float): time.sleep(n) - raise socket.timeout("socket.timeout: timed out") + raise socket.timeout('socket.timeout: timed out') client.responses.append(lambda: timeout_response(3)) @@ -2059,12 +2073,7 @@ def timeout_response(n: float): assert isinstance(excinfo.value, TimeoutError) def test_add_layer(self, client: MockClient): - okay_response = { - "result": True, - "status": "OK", - "status-code": 200, - "type": "sync" - } + okay_response = {'result': True, 'status': 'OK', 'status-code': 200, 'type': 'sync'} client.responses.append(okay_response) client.responses.append(okay_response) client.responses.append(okay_response) @@ -2117,10 +2126,10 @@ def test_get_plan(self, client: MockClient): override: replace """[1:] client.responses.append({ - "result": plan_yaml, - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': plan_yaml, + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) plan = client.get_plan() assert plan.to_yaml() == plan_yaml @@ -2134,21 +2143,13 @@ def test_get_plan(self, client: MockClient): def test_get_services_all(self, client: MockClient): client.responses.append({ - "result": [ - { - "current": "inactive", - "name": "svc1", - "startup": "disabled" - }, - { - "current": "active", - "name": "svc2", - "startup": "enabled" - } + 'result': [ + {'current': 'inactive', 'name': 'svc1', 'startup': 'disabled'}, + {'current': 'active', 'name': 'svc2', 'startup': 'enabled'}, ], - "status": "OK", - "status-code": 200, - "type": "sync" + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) services = client.get_services() assert len(services) == 2 @@ -2165,21 +2166,13 @@ def test_get_services_all(self, client: MockClient): def test_get_services_names(self, client: MockClient): client.responses.append({ - "result": [ - { - "current": "inactive", - "name": "svc1", - "startup": "disabled" - }, - { - "current": "active", - "name": "svc2", - "startup": "enabled" - } + 'result': [ + {'current': 'inactive', 'name': 'svc1', 'startup': 'disabled'}, + {'current': 'active', 'name': 'svc2', 'startup': 'enabled'}, ], - "status": "OK", - "status-code": 200, - "type": "sync" + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) services = client.get_services(['svc1', 'svc2']) assert len(services) == 2 @@ -2191,16 +2184,10 @@ def test_get_services_names(self, client: MockClient): assert services[1].current == pebble.ServiceStatus.ACTIVE client.responses.append({ - "result": [ - { - "current": "active", - "name": "svc2", - "startup": "enabled" - } - ], - "status": "OK", - "status-code": 200, - "type": "sync" + 'result': [{'current': 'active', 'name': 'svc2', 'startup': 'enabled'}], + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) services = client.get_services(['svc2']) assert len(services) == 1 @@ -2240,8 +2227,13 @@ def test_pull_boundary_spanning_chunk(self, client: MockClient): assert content == '127.0.0.1 localhost # 😀\nfoo\r\nbar' assert client.requests == [ - ('GET', '/v1/files', {'action': 'read', 'path': '/etc/hosts'}, - {'Accept': 'multipart/form-data'}, None), + ( + 'GET', + '/v1/files', + {'action': 'read', 'path': '/etc/hosts'}, + {'Accept': 'multipart/form-data'}, + None, + ), ] def test_pull_text(self, client: MockClient): @@ -2270,8 +2262,13 @@ def test_pull_text(self, client: MockClient): assert content == '127.0.0.1 localhost # 😀\nfoo\r\nbar' assert client.requests == [ - ('GET', '/v1/files', {'action': 'read', 'path': '/etc/hosts'}, - {'Accept': 'multipart/form-data'}, None), + ( + 'GET', + '/v1/files', + {'action': 'read', 'path': '/etc/hosts'}, + {'Accept': 'multipart/form-data'}, + None, + ), ] def test_pull_binary(self, client: MockClient): @@ -2300,8 +2297,13 @@ def test_pull_binary(self, client: MockClient): assert content == b'127.0.0.1 localhost # \xf0\x9f\x98\x80\nfoo\r\nbar' assert client.requests == [ - ('GET', '/v1/files', {'action': 'read', 'path': '/etc/hosts'}, - {'Accept': 'multipart/form-data'}, None), + ( + 'GET', + '/v1/files', + {'action': 'read', 'path': '/etc/hosts'}, + {'Accept': 'multipart/form-data'}, + None, + ), ] def test_pull_path_error(self, client: MockClient): @@ -2330,8 +2332,13 @@ def test_pull_path_error(self, client: MockClient): assert excinfo.value.message == 'not found' assert client.requests == [ - ('GET', '/v1/files', {'action': 'read', 'path': '/etc/hosts'}, - {'Accept': 'multipart/form-data'}, None), + ( + 'GET', + '/v1/files', + {'action': 'read', 'path': '/etc/hosts'}, + {'Accept': 'multipart/form-data'}, + None, + ), ] def test_pull_protocol_errors(self, client: MockClient): @@ -2339,8 +2346,7 @@ def test_pull_protocol_errors(self, client: MockClient): with pytest.raises(pebble.ProtocolError) as excinfo: client.pull('/etc/hosts') assert isinstance(excinfo.value, pebble.Error) - assert str(excinfo.value) == \ - "expected Content-Type 'multipart/form-data', got 'c/t'" + assert str(excinfo.value) == "expected Content-Type 'multipart/form-data', got 'c/t'" client.responses.append(({'Content-Type': 'multipart/form-data'}, b'')) with pytest.raises(pebble.ProtocolError) as excinfo: @@ -2474,8 +2480,16 @@ def test_push_all_options(self, client: MockClient): """, )) - client.push('/foo/bar', 'content', make_dirs=True, permissions=0o600, - user_id=12, user='bob', group_id=34, group='staff') + client.push( + '/foo/bar', + 'content', + make_dirs=True, + permissions=0o600, + user_id=12, + user='bob', + group_id=34, + group='staff', + ) assert len(client.requests) == 1 request = client.requests[0] @@ -2488,15 +2502,17 @@ def test_push_all_options(self, client: MockClient): assert content == b'content' assert req == { 'action': 'write', - 'files': [{ - 'path': '/foo/bar', - 'make-dirs': True, - 'permissions': '600', - 'user-id': 12, - 'user': 'bob', - 'group-id': 34, - 'group': 'staff', - }], + 'files': [ + { + 'path': '/foo/bar', + 'make-dirs': True, + 'permissions': '600', + 'user-id': 12, + 'user': 'bob', + 'group-id': 34, + 'group': 'staff', + } + ], } def test_push_uid_gid(self, client: MockClient): @@ -2527,11 +2543,13 @@ def test_push_uid_gid(self, client: MockClient): assert content == b'content' assert req == { 'action': 'write', - 'files': [{ - 'path': '/foo/bar', - 'user-id': 12, - 'group-id': 34, - }], + 'files': [ + { + 'path': '/foo/bar', + 'user-id': 12, + 'group-id': 34, + } + ], } def test_push_path_error(self, client: MockClient): @@ -2568,9 +2586,7 @@ def test_push_path_error(self, client: MockClient): 'files': [{'path': '/foo/bar'}], } - def _parse_write_multipart(self, - content_type: str, - body: _bytes_generator): + def _parse_write_multipart(self, content_type: str, body: _bytes_generator): message = email.message.Message() message['Content-Type'] = content_type assert message.get_content_type() == 'multipart/form-data' @@ -2580,8 +2596,11 @@ def _parse_write_multipart(self, # We have to manually write the Content-Type with boundary, because # email.parser expects the entire multipart message with headers. parser = email.parser.BytesFeedParser() - parser.feed(b'Content-Type: multipart/form-data; boundary=' - + boundary.encode('utf-8') + b'\r\n\r\n') + parser.feed( + b'Content-Type: multipart/form-data; boundary=' + + boundary.encode('utf-8') + + b'\r\n\r\n' + ) for b in body: # With the "memory efficient push" changes, body is an iterable. parser.feed(b) @@ -2602,7 +2621,7 @@ def _parse_write_multipart(self, def test_list_files_path(self, client: MockClient): client.responses.append({ - "result": [ + 'result': [ { 'path': '/etc/hosts', 'name': 'hosts', @@ -2657,7 +2676,7 @@ def test_list_files_path(self, client: MockClient): def test_list_files_pattern(self, client: MockClient): client.responses.append({ - "result": [], + 'result': [], 'status': 'OK', 'status-code': 200, 'type': 'sync', @@ -2672,7 +2691,7 @@ def test_list_files_pattern(self, client: MockClient): def test_list_files_itself(self, client: MockClient): client.responses.append({ - "result": [], + 'result': [], 'status': 'OK', 'status-code': 200, 'type': 'sync', @@ -2687,51 +2706,70 @@ def test_list_files_itself(self, client: MockClient): def test_make_dir_basic(self, client: MockClient): client.responses.append({ - "result": [{'path': '/foo/bar'}], + 'result': [{'path': '/foo/bar'}], 'status': 'OK', 'status-code': 200, 'type': 'sync', }) client.make_dir('/foo/bar') - req = {'action': 'make-dirs', 'dirs': [{ - 'path': '/foo/bar', - }]} + req = { + 'action': 'make-dirs', + 'dirs': [ + { + 'path': '/foo/bar', + } + ], + } assert client.requests == [ ('POST', '/v1/files', None, req), ] def test_make_dir_all_options(self, client: MockClient): client.responses.append({ - "result": [{'path': '/foo/bar'}], + 'result': [{'path': '/foo/bar'}], 'status': 'OK', 'status-code': 200, 'type': 'sync', }) - client.make_dir('/foo/bar', make_parents=True, permissions=0o600, - user_id=12, user='bob', group_id=34, group='staff') - - req = {'action': 'make-dirs', 'dirs': [{ - 'path': '/foo/bar', - 'make-parents': True, - 'permissions': '600', - 'user-id': 12, - 'user': 'bob', - 'group-id': 34, - 'group': 'staff', - }]} + client.make_dir( + '/foo/bar', + make_parents=True, + permissions=0o600, + user_id=12, + user='bob', + group_id=34, + group='staff', + ) + + req = { + 'action': 'make-dirs', + 'dirs': [ + { + 'path': '/foo/bar', + 'make-parents': True, + 'permissions': '600', + 'user-id': 12, + 'user': 'bob', + 'group-id': 34, + 'group': 'staff', + } + ], + } assert client.requests == [ ('POST', '/v1/files', None, req), ] def test_make_dir_error(self, client: MockClient): client.responses.append({ - "result": [{ - 'path': '/foo/bar', - 'error': { - 'kind': 'permission-denied', - 'message': 'permission denied', - }, - }], + 'result': [ + { + 'path': '/foo/bar', + 'error': { + 'kind': 'permission-denied', + 'message': 'permission denied', + }, + } + ], 'status': 'OK', 'status-code': 200, 'type': 'sync', @@ -2744,45 +2782,57 @@ def test_make_dir_error(self, client: MockClient): def test_remove_path_basic(self, client: MockClient): client.responses.append({ - "result": [{'path': '/boo/far'}], + 'result': [{'path': '/boo/far'}], 'status': 'OK', 'status-code': 200, 'type': 'sync', }) client.remove_path('/boo/far') - req = {'action': 'remove', 'paths': [{ - 'path': '/boo/far', - }]} + req = { + 'action': 'remove', + 'paths': [ + { + 'path': '/boo/far', + } + ], + } assert client.requests == [ ('POST', '/v1/files', None, req), ] def test_remove_path_recursive(self, client: MockClient): client.responses.append({ - "result": [{'path': '/boo/far'}], + 'result': [{'path': '/boo/far'}], 'status': 'OK', 'status-code': 200, 'type': 'sync', }) client.remove_path('/boo/far', recursive=True) - req = {'action': 'remove', 'paths': [{ - 'path': '/boo/far', - 'recursive': True, - }]} + req = { + 'action': 'remove', + 'paths': [ + { + 'path': '/boo/far', + 'recursive': True, + } + ], + } assert client.requests == [ ('POST', '/v1/files', None, req), ] def test_remove_path_error(self, client: MockClient): client.responses.append({ - "result": [{ - 'path': '/boo/far', - 'error': { - 'kind': 'generic-file-error', - 'message': 'some other error', - }, - }], + 'result': [ + { + 'path': '/boo/far', + 'error': { + 'kind': 'generic-file-error', + 'message': 'some other error', + }, + } + ], 'status': 'OK', 'status-code': 200, 'type': 'sync', @@ -2831,23 +2881,23 @@ def test_send_signal_type_error(self, client: MockClient): def test_get_checks_all(self, client: MockClient): client.responses.append({ - "result": [ + 'result': [ { - "name": "chk1", - "status": "up", - "threshold": 2, + 'name': 'chk1', + 'status': 'up', + 'threshold': 2, }, { - "name": "chk2", - "level": "alive", - "status": "down", - "failures": 5, - "threshold": 3, - } + 'name': 'chk2', + 'level': 'alive', + 'status': 'down', + 'failures': 5, + 'threshold': 3, + }, ], - "status": "OK", - "status-code": 200, - "type": "sync" + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) checks = client.get_checks() assert len(checks) == 2 @@ -2868,17 +2918,17 @@ def test_get_checks_all(self, client: MockClient): def test_get_checks_filters(self, client: MockClient): client.responses.append({ - "result": [ + 'result': [ { - "name": "chk2", - "level": "ready", - "status": "up", - "threshold": 3, + 'name': 'chk2', + 'level': 'ready', + 'status': 'up', + 'threshold': 3, }, ], - "status": "OK", - "status-code": 200, - "type": "sync" + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) checks = client.get_checks(level=pebble.CheckLevel.READY, names=['chk2']) assert len(checks) == 1 @@ -2894,17 +2944,17 @@ def test_get_checks_filters(self, client: MockClient): def test_checklevel_conversion(self, client: MockClient): client.responses.append({ - "result": [ + 'result': [ { - "name": "chk2", - "level": "foobar!", - "status": "up", - "threshold": 3, + 'name': 'chk2', + 'level': 'foobar!', + 'status': 'up', + 'threshold': 3, }, ], - "status": "OK", - "status-code": 200, - "type": "sync" + 'status': 'OK', + 'status-code': 200, + 'type': 'sync', }) checks = client.get_checks(level=pebble.CheckLevel.READY, names=['chk2']) assert len(checks) == 1 @@ -2932,11 +2982,16 @@ def test_notify_basic(self, client: MockClient): assert notice_id == '123' assert client.requests == [ - ('POST', '/v1/notices', None, { - 'action': 'add', - 'key': 'example.com/a', - 'type': 'custom', - }), + ( + 'POST', + '/v1/notices', + None, + { + 'action': 'add', + 'key': 'example.com/a', + 'type': 'custom', + }, + ), ] def test_notify_other_args(self, client: MockClient): @@ -2949,19 +3004,27 @@ def test_notify_other_args(self, client: MockClient): 'type': 'sync', }) - notice_id = client.notify(pebble.NoticeType.CUSTOM, 'example.com/a', - data={'k': 'v'}, - repeat_after=datetime.timedelta(hours=3)) + notice_id = client.notify( + pebble.NoticeType.CUSTOM, + 'example.com/a', + data={'k': 'v'}, + repeat_after=datetime.timedelta(hours=3), + ) assert notice_id == '321' assert client.requests == [ - ('POST', '/v1/notices', None, { - 'action': 'add', - 'key': 'example.com/a', - 'type': 'custom', - 'data': {'k': 'v'}, - 'repeat-after': '10800.000s', - }), + ( + 'POST', + '/v1/notices', + None, + { + 'action': 'add', + 'key': 'example.com/a', + 'type': 'custom', + 'data': {'k': 'v'}, + 'repeat-after': '10800.000s', + }, + ), ] def test_get_notice(self, client: MockClient): @@ -3003,24 +3066,27 @@ def test_get_notice_not_found(self, client: MockClient): def test_get_notices_all(self, client: MockClient): client.responses.append({ - 'result': [{ - 'id': '123', - 'user-id': 1000, - 'type': 'custom', - 'key': 'example.com/a', - 'first-occurred': '2023-12-07T17:01:02.123456789Z', - 'last-occurred': '2023-12-07T17:01:03.123456789Z', - 'last-repeated': '2023-12-07T17:01:04.123456789Z', - 'occurrences': 7, - }, { - 'id': '124', - 'type': 'other', - 'key': 'example.com/b', - 'first-occurred': '2023-12-07T17:01:02.123456789Z', - 'last-occurred': '2023-12-07T17:01:03.123456789Z', - 'last-repeated': '2023-12-07T17:01:04.123456789Z', - 'occurrences': 8, - }], + 'result': [ + { + 'id': '123', + 'user-id': 1000, + 'type': 'custom', + 'key': 'example.com/a', + 'first-occurred': '2023-12-07T17:01:02.123456789Z', + 'last-occurred': '2023-12-07T17:01:03.123456789Z', + 'last-repeated': '2023-12-07T17:01:04.123456789Z', + 'occurrences': 7, + }, + { + 'id': '124', + 'type': 'other', + 'key': 'example.com/b', + 'first-occurred': '2023-12-07T17:01:02.123456789Z', + 'last-occurred': '2023-12-07T17:01:03.123456789Z', + 'last-repeated': '2023-12-07T17:01:04.123456789Z', + 'occurrences': 8, + }, + ], 'status': 'OK', 'status-code': 200, 'type': 'sync', @@ -3037,24 +3103,27 @@ def test_get_notices_all(self, client: MockClient): def test_get_notices_filters(self, client: MockClient): client.responses.append({ - 'result': [{ - 'id': '123', - 'user-id': 1000, - 'type': 'custom', - 'key': 'example.com/a', - 'first-occurred': '2023-12-07T17:01:02.123456789Z', - 'last-occurred': '2023-12-07T17:01:03.123456789Z', - 'last-repeated': '2023-12-07T17:01:04.123456789Z', - 'occurrences': 7, - }, { - 'id': '124', - 'type': 'other', - 'key': 'example.com/b', - 'first-occurred': '2023-12-07T17:01:02.123456789Z', - 'last-occurred': '2023-12-07T17:01:03.123456789Z', - 'last-repeated': '2023-12-07T17:01:04.123456789Z', - 'occurrences': 8, - }], + 'result': [ + { + 'id': '123', + 'user-id': 1000, + 'type': 'custom', + 'key': 'example.com/a', + 'first-occurred': '2023-12-07T17:01:02.123456789Z', + 'last-occurred': '2023-12-07T17:01:03.123456789Z', + 'last-repeated': '2023-12-07T17:01:04.123456789Z', + 'occurrences': 7, + }, + { + 'id': '124', + 'type': 'other', + 'key': 'example.com/b', + 'first-occurred': '2023-12-07T17:01:02.123456789Z', + 'last-occurred': '2023-12-07T17:01:03.123456789Z', + 'last-repeated': '2023-12-07T17:01:04.123456789Z', + 'occurrences': 8, + }, + ], 'status': 'OK', 'status-code': 200, 'type': 'sync', @@ -3087,7 +3156,7 @@ def test_socket_not_found(self): with pytest.raises(pebble.ConnectionError) as excinfo: client.get_system_info() assert isinstance(excinfo.value, pebble.Error) - assert "Could not connect to Pebble" in str(excinfo.value) + assert 'Could not connect to Pebble' in str(excinfo.value) def test_real_client(self): shutdown, socket_path = fake_pebble.start_server() @@ -3130,14 +3199,18 @@ def test_str(self): assert str(e) == "non-zero exit code 1 executing ['x'], stderr='only-err'" e = pebble.ExecError(['a', 'b'], 1, 'out', 'err') - assert str(e) == "non-zero exit code 1 executing ['a', 'b'], " \ - + "stdout='out', stderr='err'" + assert ( + str(e) == "non-zero exit code 1 executing ['a', 'b'], " + "stdout='out', stderr='err'" + ) def test_str_truncated(self): e = pebble.ExecError(['foo'], 2, 'longout', 'longerr') e.STR_MAX_OUTPUT = 5 # type: ignore - assert str(e) == "non-zero exit code 2 executing ['foo'], " \ + assert ( + str(e) + == "non-zero exit code 2 executing ['foo'], " + "stdout='longo' [truncated], stderr='longe' [truncated]" + ) class MockWebsocket: @@ -3159,9 +3232,14 @@ def shutdown(self): class TestExec: - def add_responses(self, client: MockClient, change_id: str, exit_code: int, - change_err: typing.Optional[str] = None): - task_id = f"T{change_id}" # create a task_id based on change_id + def add_responses( + self, + client: MockClient, + change_id: str, + exit_code: int, + change_err: typing.Optional[str] = None, + ): + task_id = f'T{change_id}' # create a task_id based on change_id client.responses.append({ 'change': change_id, 'result': {'task-id': task_id}, @@ -3187,17 +3265,19 @@ def add_responses(self, client: MockClient, change_id: str, exit_code: int, } return (stdio, stderr, control) - def build_exec_data(self, - command: typing.List[str], - service_context: typing.Optional[str] = None, - environment: typing.Optional[typing.Dict[str, str]] = None, - working_dir: typing.Optional[str] = None, - timeout: typing.Optional[float] = None, - user_id: typing.Optional[int] = None, - user: typing.Optional[str] = None, - group_id: typing.Optional[int] = None, - group: typing.Optional[str] = None, - combine_stderr: bool = False): + def build_exec_data( + self, + command: typing.List[str], + service_context: typing.Optional[str] = None, + environment: typing.Optional[typing.Dict[str, str]] = None, + working_dir: typing.Optional[str] = None, + timeout: typing.Optional[float] = None, + user_id: typing.Optional[int] = None, + user: typing.Optional[str] = None, + group_id: typing.Optional[int] = None, + group: typing.Optional[str] = None, + combine_stderr: bool = False, + ): return { 'command': command, 'service-context': service_context, @@ -3223,16 +3303,17 @@ def test_arg_errors(self, client: MockClient): with pytest.raises(TypeError): client.exec(['foo'], stdin=123) # type: ignore with pytest.raises(ValueError): - client.exec(['foo'], stdout=io.StringIO(), stderr=io.StringIO(), - combine_stderr=True) + client.exec(['foo'], stdout=io.StringIO(), stderr=io.StringIO(), combine_stderr=True) def test_no_wait_call(self, client: MockClient): self.add_responses(client, '123', 0) with pytest.warns(ResourceWarning) as record: process = client.exec(['true']) del process - assert str(record[0].message) == \ - 'ExecProcess instance garbage collected without call to wait() or wait_output()' + assert ( + str(record[0].message) + == 'ExecProcess instance garbage collected without call to wait() or wait_output()' + ) def test_wait_exit_zero(self, client: MockClient): self.add_responses(client, '123', 0) @@ -3289,15 +3370,20 @@ def test_wait_other_args(self, client: MockClient): process.wait() assert client.requests == [ - ('POST', '/v1/exec', None, self.build_exec_data( - command=['true'], - environment={'K1': 'V1', 'K2': 'V2'}, - working_dir='WD', - user_id=1000, - user='bob', - group_id=1000, - group='staff', - )), + ( + 'POST', + '/v1/exec', + None, + self.build_exec_data( + command=['true'], + environment={'K1': 'V1', 'K2': 'V2'}, + working_dir='WD', + user_id=1000, + user='bob', + group_id=1000, + group='staff', + ), + ), ('GET', '/v1/changes/123/wait', {'timeout': '4.000s'}, None), ] @@ -3335,15 +3421,21 @@ def test_send_signal(self, client: MockClient): assert len(control.sends) == num_sends assert control.sends[0][0] == 'TXT' - assert json.loads(control.sends[0][1]) == \ - {'command': 'signal', 'signal': {'name': 'SIGHUP'}} + assert json.loads(control.sends[0][1]) == { + 'command': 'signal', + 'signal': {'name': 'SIGHUP'}, + } if hasattr(signal, 'SIGHUP'): assert control.sends[1][0] == 'TXT' - assert json.loads(control.sends[1][1]) == \ - {'command': 'signal', 'signal': {'name': signal.Signals(1).name}} + assert json.loads(control.sends[1][1]) == { + 'command': 'signal', + 'signal': {'name': signal.Signals(1).name}, + } assert control.sends[2][0] == 'TXT' - assert json.loads(control.sends[2][1]) == \ - {'command': 'signal', 'signal': {'name': 'SIGHUP'}} + assert json.loads(control.sends[2][1]) == { + 'command': 'signal', + 'signal': {'name': 'SIGHUP'}, + } def test_wait_output(self, client: MockClient): stdio, stderr, _ = self.add_responses(client, '123', 0) @@ -3457,8 +3549,7 @@ def test_wait_output_send_stdin_bytes(self, client: MockClient): stdio.receives.append('{"command":"end"}') stderr.receives.append('{"command":"end"}') - process = client.exec(['awk', '{ print toupper($) }'], stdin=b'foo\nbar\n', - encoding=None) + process = client.exec(['awk', '{ print toupper($) }'], stdin=b'foo\nbar\n', encoding=None) out, err = process.wait_output() assert out == b'FOO\nBAR\n' assert err == b'' @@ -3477,7 +3568,7 @@ def test_wait_output_no_stdout(self, client: MockClient): stdio.receives.append('{"command":"end"}') stderr.receives.append('{"command":"end"}') stdout_buffer = io.BytesIO() - process = client.exec(["echo", "FOOBAR"], stdout=stdout_buffer, encoding=None) + process = client.exec(['echo', 'FOOBAR'], stdout=stdout_buffer, encoding=None) with pytest.raises(TypeError): process.wait_output() @@ -3493,7 +3584,7 @@ def test_wait_output_bad_command(self, caplog: pytest.LogCaptureFixture, client: process = client.exec(['python3', '--version']) out, err = process.wait_output() expected = [ - "Cannot decode I/O command (invalid JSON)", + 'Cannot decode I/O command (invalid JSON)', "Invalid I/O command 'foo'", ] assert expected == [record.message for record in caplog.records] @@ -3567,9 +3658,8 @@ def test_wait_passed_output_bytes(self, client: MockClient): assert io_ws.sends == [] def test_wait_passed_output_bad_command( - self, - caplog: pytest.LogCaptureFixture, - client: MockClient): + self, caplog: pytest.LogCaptureFixture, client: MockClient + ): io_ws, stderr, _ = self.add_responses(client, '123', 0) io_ws.receives.append(b'foo\n') io_ws.receives.append('not json') # bad JSON should be ignored @@ -3585,7 +3675,7 @@ def test_wait_passed_output_bad_command( process = client.exec(['echo', 'foo'], stdout=out, stderr=err) process.wait() expected = [ - "Cannot decode I/O command (invalid JSON)", + 'Cannot decode I/O command (invalid JSON)', "Invalid I/O command 'foo'", ] assert expected == [record.message for record in caplog.records] @@ -3732,7 +3822,8 @@ def send_binary(b: bytes): # PytestUnhandledThreadExceptionWarning isn't present on older Python versions. if hasattr(pytest, 'PytestUnhandledThreadExceptionWarning'): test_websocket_send_raises = pytest.mark.filterwarnings( - 'ignore::pytest.PytestUnhandledThreadExceptionWarning')(test_websocket_send_raises) + 'ignore::pytest.PytestUnhandledThreadExceptionWarning' + )(test_websocket_send_raises) def test_websocket_recv_raises(self, client: MockClient): stdio, stderr, _ = self.add_responses(client, '123', 0) @@ -3763,4 +3854,5 @@ def recv(): if hasattr(pytest, 'PytestUnhandledThreadExceptionWarning'): test_websocket_recv_raises = pytest.mark.filterwarnings( - 'ignore::pytest.PytestUnhandledThreadExceptionWarning')(test_websocket_recv_raises) + 'ignore::pytest.PytestUnhandledThreadExceptionWarning' + )(test_websocket_recv_raises) diff --git a/test/test_real_pebble.py b/test/test_real_pebble.py index b64dbcbfb..6f5d97174 100644 --- a/test/test_real_pebble.py +++ b/test/test_real_pebble.py @@ -66,33 +66,37 @@ def client(): ) class TestRealPebble: def test_checks_and_health(self, client: pebble.Client): - client.add_layer('layer', { - 'checks': { - 'bad': { - 'override': 'replace', - 'level': 'ready', - 'period': '50ms', - 'threshold': 2, - 'exec': { - 'command': 'sleep x', + client.add_layer( + 'layer', + { + 'checks': { + 'bad': { + 'override': 'replace', + 'level': 'ready', + 'period': '50ms', + 'threshold': 2, + 'exec': { + 'command': 'sleep x', + }, }, - }, - 'good': { - 'override': 'replace', - 'level': 'alive', - 'period': '50ms', - 'exec': { - 'command': 'echo foo', + 'good': { + 'override': 'replace', + 'level': 'alive', + 'period': '50ms', + 'exec': { + 'command': 'echo foo', + }, }, - }, - 'other': { - 'override': 'replace', - 'exec': { - 'command': 'echo bar', + 'other': { + 'override': 'replace', + 'exec': { + 'command': 'echo bar', + }, }, }, }, - }, combine=True) + combine=True, + ) # Checks should all be "up" initially checks = client.get_checks() @@ -217,7 +221,7 @@ def test_exec_working_dir(self, client: pebble.Client): with tempfile.TemporaryDirectory() as temp_dir: process = client.exec(['pwd'], working_dir=temp_dir) out, err = process.wait_output() - assert out == f"{temp_dir}\n" + assert out == f'{temp_dir}\n' assert err == '' def test_exec_environment(self, client: pebble.Client): @@ -274,30 +278,34 @@ def stdin_thread(): assert reads == [b'one\n', b'2\n', b'THREE\n'] def test_log_forwarding(self, client: pebble.Client): - client.add_layer("log-forwarder", { - "services": { - "tired": { - "override": "replace", - "command": "sleep 1", + client.add_layer( + 'log-forwarder', + { + 'services': { + 'tired': { + 'override': 'replace', + 'command': 'sleep 1', + }, }, - }, - "log-targets": { - "pretend-loki": { - "type": "loki", - "override": "replace", - "location": "https://example.com", - "services": ["all"], - "labels": {"foo": "bar"}, + 'log-targets': { + 'pretend-loki': { + 'type': 'loki', + 'override': 'replace', + 'location': 'https://example.com', + 'services': ['all'], + 'labels': {'foo': 'bar'}, + }, }, }, - }, combine=True) + combine=True, + ) plan = client.get_plan() assert len(plan.log_targets) == 1 - assert plan.log_targets["pretend-loki"].type == "loki" - assert plan.log_targets["pretend-loki"].override == "replace" - assert plan.log_targets["pretend-loki"].location == "https://example.com" - assert plan.log_targets["pretend-loki"].services == ["all"] - assert plan.log_targets["pretend-loki"].labels == {"foo": "bar"} + assert plan.log_targets['pretend-loki'].type == 'loki' + assert plan.log_targets['pretend-loki'].override == 'replace' + assert plan.log_targets['pretend-loki'].location == 'https://example.com' + assert plan.log_targets['pretend-loki'].services == ['all'] + assert plan.log_targets['pretend-loki'].labels == {'foo': 'bar'} @pytest.mark.skipif( diff --git a/test/test_storage.py b/test/test_storage.py index eab42c6c9..44091f6b1 100644 --- a/test/test_storage.py +++ b/test/test_storage.py @@ -23,7 +23,6 @@ import typing import unittest import unittest.mock -from test.test_helpers import FakeScript from textwrap import dedent import pytest @@ -31,6 +30,7 @@ import ops import ops.storage +from test.test_helpers import FakeScript @pytest.fixture @@ -39,7 +39,6 @@ def fake_script(request: pytest.FixtureRequest): class StoragePermutations(abc.ABC): - assertEqual = unittest.TestCase.assertEqual # noqa assertRaises = unittest.TestCase.assertRaises # noqa @@ -69,7 +68,6 @@ def test_save_and_load_snapshot( f = self.create_framework(request, fake_script) class Sample(ops.StoredStateData): - def __init__( self, parent: ops.Object, @@ -121,7 +119,6 @@ class Events(ops.ObjectEvents): event = ops.EventSource(Evt) class Sample(ops.Object): - on = Events() # type: ignore def __init__(self, parent: ops.Object, key: str): @@ -255,25 +252,24 @@ def test_save_load_drop_load_notices( store.save_notice('event', 'observer', 'method2') assert list(store.notices('event')) == [ ('event', 'observer', 'method'), - ('event', 'observer', 'method2') + ('event', 'observer', 'method2'), ] class TestSQLiteStorage(StoragePermutations): - def create_storage(self, request: pytest.FixtureRequest, fake_script: FakeScript): return ops.storage.SQLiteStorage(':memory:') def test_permissions_new(self): with tempfile.TemporaryDirectory() as temp_dir: - filename = os.path.join(temp_dir, ".unit-state.db") + filename = os.path.join(temp_dir, '.unit-state.db') storage = ops.storage.SQLiteStorage(filename) assert stat.S_IMODE(os.stat(filename).st_mode) == stat.S_IRUSR | stat.S_IWUSR storage.close() def test_permissions_existing(self): with tempfile.TemporaryDirectory() as temp_dir: - filename = os.path.join(temp_dir, ".unit-state.db") + filename = os.path.join(temp_dir, '.unit-state.db') ops.storage.SQLiteStorage(filename).close() # Set the file to access that will need fixing for user, group, and other. os.chmod(filename, 0o744) @@ -281,22 +277,22 @@ def test_permissions_existing(self): assert stat.S_IMODE(os.stat(filename).st_mode) == stat.S_IRUSR | stat.S_IWUSR storage.close() - @unittest.mock.patch("os.path.exists") + @unittest.mock.patch('os.path.exists') def test_permissions_race(self, exists: unittest.mock.MagicMock): exists.return_value = False with tempfile.TemporaryDirectory() as temp_dir: - filename = os.path.join(temp_dir, ".unit-state.db") + filename = os.path.join(temp_dir, '.unit-state.db') # Create an existing file, but the mock will simulate a race condition saying that it # does not exist. - open(filename, "w").close() + open(filename, 'w').close() pytest.raises(RuntimeError, ops.storage.SQLiteStorage, filename) - @unittest.mock.patch("os.chmod") + @unittest.mock.patch('os.chmod') def test_permissions_failure(self, chmod: unittest.mock.MagicMock): chmod.side_effect = OSError with tempfile.TemporaryDirectory() as temp_dir: - filename = os.path.join(temp_dir, ".unit-state.db") - open(filename, "w").close() + filename = os.path.join(temp_dir, '.unit-state.db') + open(filename, 'w').close() pytest.raises(RuntimeError, ops.storage.SQLiteStorage, filename) @@ -308,7 +304,9 @@ def setup_juju_backend(fake_script: FakeScript, state_file: pathlib.Path): 'state_file': str(state_file.as_posix()), } - fake_script.write('state-set', dedent('''\ + fake_script.write( + 'state-set', + dedent("""\ {executable} -c ' import sys if "{pthpth}" not in sys.path: @@ -327,9 +325,12 @@ def setup_juju_backend(fake_script: FakeScript, state_file: pathlib.Path): with state_file.open("wb") as f: pickle.dump(state, f) ' "$@" - ''').format(**template_args)) + """).format(**template_args), + ) - fake_script.write('state-get', dedent('''\ + fake_script.write( + 'state-get', + dedent("""\ {executable} -Sc ' import sys if "{pthpth}" not in sys.path: @@ -345,9 +346,12 @@ def setup_juju_backend(fake_script: FakeScript, state_file: pathlib.Path): result = state.get(sys.argv[1], "\\n") sys.stdout.write(result) ' "$@" - ''').format(**template_args)) + """).format(**template_args), + ) - fake_script.write('state-delete', dedent('''\ + fake_script.write( + 'state-delete', + dedent("""\ {executable} -Sc ' import sys if "{pthpth}" not in sys.path: @@ -364,11 +368,11 @@ def setup_juju_backend(fake_script: FakeScript, state_file: pathlib.Path): with state_file.open("wb") as f: pickle.dump(state, f) ' "$@" - ''').format(**template_args)) + """).format(**template_args), + ) class TestJujuStorage(StoragePermutations): - def create_storage(self, request: pytest.FixtureRequest, fake_script: FakeScript): fd, fn = tempfile.mkstemp(prefix='tmp-ops-test-state-') os.close(fd) @@ -379,7 +383,6 @@ def create_storage(self, request: pytest.FixtureRequest, fake_script: FakeScript class TestSimpleLoader: - def test_is_c_loader(self): loader = ops.storage._SimpleLoader(io.StringIO('')) if getattr(yaml, 'CSafeLoader', None) is not None: @@ -417,12 +420,12 @@ def test_forbids_some_types(self): class Foo: pass + f = Foo() self.assertRefused(f) class TestJujuStateBackend: - def test_is_not_available(self): assert not ops.storage.juju_backend_available() @@ -434,9 +437,12 @@ def test_is_available(self, fake_script: FakeScript): def test_set_encodes_args(self, fake_script: FakeScript): t = tempfile.NamedTemporaryFile() try: - fake_script.write('state-set', dedent(""" + fake_script.write( + 'state-set', + dedent(""" cat >> {} - """).format(pathlib.Path(t.name).as_posix())) + """).format(pathlib.Path(t.name).as_posix()), + ) backend = ops.storage._JujuStorageBackend() backend.set('key', {'foo': 2}) assert fake_script.calls(clear=True) == [ @@ -452,9 +458,12 @@ def test_set_encodes_args(self, fake_script: FakeScript): """) def test_get(self, fake_script: FakeScript): - fake_script.write('state-get', dedent(""" + fake_script.write( + 'state-get', + dedent(""" echo 'foo: "bar"' - """)) + """), + ) backend = ops.storage._JujuStorageBackend() value = backend.get('key') assert value == {'foo': 'bar'} @@ -465,9 +474,12 @@ def test_get(self, fake_script: FakeScript): def test_set_and_get_complex_value(self, fake_script: FakeScript): t = tempfile.NamedTemporaryFile() try: - fake_script.write('state-set', dedent(""" + fake_script.write( + 'state-set', + dedent(""" cat >> {} - """).format(pathlib.Path(t.name).as_posix())) + """).format(pathlib.Path(t.name).as_posix()), + ) backend = ops.storage._JujuStorageBackend() complex_val = { 'foo': 2, @@ -502,7 +514,9 @@ def test_set_and_get_complex_value(self, fake_script: FakeScript): """) # Note that the content is yaml in a string, embedded inside YAML to declare the Key: # Value of where to store the entry. - fake_script.write('state-get', dedent(""" + fake_script.write( + 'state-get', + dedent(""" echo "foo: 2 3: [1, 2, '3'] four: !!set {2: null, 3: null} @@ -511,7 +525,8 @@ def test_set_and_get_complex_value(self, fake_script: FakeScript): seven: !!binary | MTIzNA== " - """)) + """), + ) out = backend.get('Class[foo]/_stored') assert out == complex_val diff --git a/test/test_testing.py b/test/test_testing.py index 98eed62bb..82ddd6e65 100644 --- a/test/test_testing.py +++ b/test/test_testing.py @@ -52,10 +52,8 @@ class StorageTester(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) self.observed_events: typing.List[ops.EventBase] = [] - self.framework.observe(self.on.test_storage_attached, - self._on_test_storage_attached) - self.framework.observe(self.on.test_storage_detaching, - self._on_test_storage_detaching) + self.framework.observe(self.on.test_storage_attached, self._on_test_storage_attached) + self.framework.observe(self.on.test_storage_detaching, self._on_test_storage_detaching) def _on_test_storage_attached(self, event: ops.EventBase): self.observed_events.append(event) @@ -68,10 +66,12 @@ class StorageWithHyphensHelper(ops.Object): def __init__(self, parent: ops.Object, key: str): super().__init__(parent, key) self.changes: typing.List[ops.EventBase] = [] - parent.framework.observe(parent.on.test_with_hyphens_storage_attached, - self.on_storage_changed) - parent.framework.observe(parent.on.test_with_hyphens_storage_detaching, - self.on_storage_changed) + parent.framework.observe( + parent.on.test_with_hyphens_storage_attached, self.on_storage_changed + ) + parent.framework.observe( + parent.on.test_with_hyphens_storage_detaching, self.on_storage_changed + ) def on_storage_changed(self, event: ops.EventBase): self.changes.append(event) @@ -79,18 +79,21 @@ def on_storage_changed(self, event: ops.EventBase): class TestHarness: def test_add_relation_no_meta_fails(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta="name: mycharm") + harness = ops.testing.Harness(ops.CharmBase, meta='name: mycharm') request.addfinalizer(harness.cleanup) with pytest.raises(ops.RelationNotFoundError): harness.add_relation('db', 'postgresql') def test_add_relation(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql') assert isinstance(rel_id, int) @@ -102,12 +105,15 @@ def test_add_relation(self, request: pytest.FixtureRequest): assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == {} def test_add_relation_with_app_data(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql', app_data={'x': '1', 'y': '2'}) assert isinstance(rel_id, int) @@ -118,12 +124,15 @@ def test_add_relation_with_app_data(self, request: pytest.FixtureRequest): assert harness.get_relation_data(rel_id, 'postgresql/0') == {} def test_add_relation_with_unit_data(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql', unit_data={'a': '1', 'b': '2'}) assert isinstance(rel_id, int) @@ -134,12 +143,15 @@ def test_add_relation_with_unit_data(self, request: pytest.FixtureRequest): assert harness.get_relation_data(rel_id, 'postgresql/0') == {'a': '1', 'b': '2'} def test_can_connect_default(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: foo: resource: foo-image - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() @@ -172,14 +184,17 @@ def _on_pebble_ready(self, event: ops.PebbleReadyEvent): assert event.workload.can_connect() pebble_ready_calls[event.workload.name] += 1 - harness = ops.testing.Harness(MyCharm, meta=''' + harness = ops.testing.Harness( + MyCharm, + meta=""" name: test-app containers: foo: resource: foo-image bar: resource: bar-image - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin_with_initial_hooks() @@ -196,12 +211,15 @@ def _on_pebble_ready(self, event: ops.PebbleReadyEvent): container.get_plan() # shouldn't raise ConnectionError def test_add_relation_and_unit(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql') assert isinstance(rel_id, int) @@ -210,17 +228,19 @@ def test_add_relation_and_unit(self, request: pytest.FixtureRequest): backend = harness._backend assert backend.relation_ids('db') == [rel_id] assert backend.relation_list(rel_id) == ['postgresql/0'] - assert backend.relation_get(rel_id, 'postgresql/0', is_app=False) == \ - {'foo': 'bar'} + assert backend.relation_get(rel_id, 'postgresql/0', is_app=False) == {'foo': 'bar'} def test_add_relation_with_remote_app_data(self, request: pytest.FixtureRequest): # language=YAML - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) remote_app = 'postgresql' rel_id = harness.add_relation('db', remote_app) @@ -231,7 +251,6 @@ def test_add_relation_with_remote_app_data(self, request: pytest.FixtureRequest) assert backend.relation_get(rel_id, remote_app, is_app=True) == {'app': 'data'} def test_add_relation_with_our_initial_data(self, request: pytest.FixtureRequest): - class InitialDataTester(ops.CharmBase): """Record the relation-changed events.""" @@ -244,25 +263,30 @@ def _on_db_relation_changed(self, event: ops.EventBase): self.observed_events.append(event) # language=YAML - harness = ops.testing.Harness(InitialDataTester, meta=''' + harness = ops.testing.Harness( + InitialDataTester, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql') harness.update_relation_data(rel_id, 'test-app', {'k': 'v1'}) harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.1'}) backend = harness._backend assert backend.relation_get(rel_id, 'test-app', is_app=True) == {'k': 'v1'} - assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == \ - {'ingress-address': '192.0.2.1'} + assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == { + 'ingress-address': '192.0.2.1' + } harness.begin() assert backend.relation_get(rel_id, 'test-app', is_app=True) == {'k': 'v1'} - assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == \ - {'ingress-address': '192.0.2.1'} + assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == { + 'ingress-address': '192.0.2.1' + } # Make sure no relation-changed events are emitted for our own data bags. assert harness.charm.observed_events == [] @@ -281,26 +305,29 @@ def _on_db_relation_changed(self, event: ops.EventBase): assert harness.charm.observed_events == [] def test_add_peer_relation_with_initial_data_leader(self, request: pytest.FixtureRequest): - class InitialDataTester(ops.CharmBase): """Record the relation-changed events.""" def __init__(self, framework: ops.Framework): super().__init__(framework) self.observed_events: typing.List[ops.EventBase] = [] - self.framework.observe(self.on.cluster_relation_changed, - self._on_cluster_relation_changed) + self.framework.observe( + self.on.cluster_relation_changed, self._on_cluster_relation_changed + ) def _on_cluster_relation_changed(self, event: ops.EventBase): self.observed_events.append(event) # language=YAML - harness = ops.testing.Harness(InitialDataTester, meta=''' + harness = ops.testing.Harness( + InitialDataTester, + meta=""" name: test-app peers: cluster: interface: cluster - ''') + """, + ) request.addfinalizer(harness.cleanup) # TODO: dmitriis 2020-04-07 test a minion unit and initial peer relation app data # events when the harness begins to emit events for initial data. @@ -310,13 +337,15 @@ def _on_cluster_relation_changed(self, event: ops.EventBase): harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.1'}) backend = harness._backend assert backend.relation_get(rel_id, 'test-app', is_app=True) == {'k': 'v'} - assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == \ - {'ingress-address': '192.0.2.1'} + assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == { + 'ingress-address': '192.0.2.1' + } harness.begin() assert backend.relation_get(rel_id, 'test-app', is_app=True) == {'k': 'v'} - assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == \ - {'ingress-address': '192.0.2.1'} + assert backend.relation_get(rel_id, 'test-app/0', is_app=False) == { + 'ingress-address': '192.0.2.1' + } # Make sure no relation-changed events are emitted for our own data bags. assert harness.charm.observed_events == [] @@ -334,12 +363,15 @@ def _on_cluster_relation_changed(self, event: ops.EventBase): assert isinstance(harness.charm.observed_events[0], ops.RelationEvent) def test_remove_relation(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RelationEventCharm, meta=''' + harness = ops.testing.Harness( + RelationEventCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') @@ -359,27 +391,32 @@ def test_remove_relation(self, request: pytest.FixtureRequest): pytest.raises(ops.RelationNotFoundError, backend.relation_list, rel_id) # Check relation broken event is raised with correct data changes = harness.charm.get_changes() - assert changes[0] == \ - {'name': 'relation-departed', - 'relation': 'db', - 'data': {'app': 'postgresql', - 'unit': 'postgresql/0', - 'departing_unit': 'postgresql/0', - 'relation_id': 0}} - assert changes[1] == \ - {'name': 'relation-broken', - 'relation': 'db', - 'data': {'app': 'postgresql', - 'unit': None, - 'relation_id': rel_id}} + assert changes[0] == { + 'name': 'relation-departed', + 'relation': 'db', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/0', + 'departing_unit': 'postgresql/0', + 'relation_id': 0, + }, + } + assert changes[1] == { + 'name': 'relation-broken', + 'relation': 'db', + 'data': {'app': 'postgresql', 'unit': None, 'relation_id': rel_id}, + } def test_remove_specific_relation_id(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RelationEventCharm, meta=''' + harness = ops.testing.Harness( + RelationEventCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') @@ -413,27 +450,32 @@ def test_remove_specific_relation_id(self, request: pytest.FixtureRequest): # Check relation broken event is raised with correct data changes = harness.charm.get_changes() - assert changes[0] == \ - {'name': 'relation-departed', - 'relation': 'db', - 'data': {'app': 'postgresql', - 'unit': 'postgresql/1', - 'departing_unit': 'postgresql/1', - 'relation_id': rel_id_2}} - assert changes[1] == \ - {'name': 'relation-broken', - 'relation': 'db', - 'data': {'app': 'postgresql', - 'unit': None, - 'relation_id': rel_id_2}} + assert changes[0] == { + 'name': 'relation-departed', + 'relation': 'db', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/1', + 'departing_unit': 'postgresql/1', + 'relation_id': rel_id_2, + }, + } + assert changes[1] == { + 'name': 'relation-broken', + 'relation': 'db', + 'data': {'app': 'postgresql', 'unit': None, 'relation_id': rel_id_2}, + } def test_removing_invalid_relation_id_raises_exception(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RelationEventCharm, meta=''' + harness = ops.testing.Harness( + RelationEventCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') @@ -451,12 +493,15 @@ def test_removing_invalid_relation_id_raises_exception(self, request: pytest.Fix harness.remove_relation(rel_id + 1) def test_remove_relation_unit(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RelationEventCharm, meta=''' + harness = ops.testing.Harness( + RelationEventCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') @@ -490,26 +535,34 @@ def test_remove_relation_unit(self, request: pytest.FixtureRequest): assert len(rel.units) == 0 assert rel_unit not in rel.data # Check relation departed was raised with correct data - assert harness.charm.get_changes()[0] == \ - {'name': 'relation-departed', - 'relation': 'db', - 'data': {'app': 'postgresql', - 'unit': 'postgresql/0', - 'departing_unit': 'postgresql/0', - 'relation_id': 0, - 'relation_data': {'test-app/0': {}, - 'test-app': {}, - 'postgresql/0': {'foo': 'bar'}, - 'postgresql': {}}}} + assert harness.charm.get_changes()[0] == { + 'name': 'relation-departed', + 'relation': 'db', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/0', + 'departing_unit': 'postgresql/0', + 'relation_id': 0, + 'relation_data': { + 'test-app/0': {}, + 'test-app': {}, + 'postgresql/0': {'foo': 'bar'}, + 'postgresql': {}, + }, + }, + } def test_removing_relation_removes_remote_app_data(self, request: pytest.FixtureRequest): # language=YAML - harness = ops.testing.Harness(RelationEventCharm, meta=''' + harness = ops.testing.Harness( + RelationEventCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') @@ -526,17 +579,21 @@ def test_removing_relation_removes_remote_app_data(self, request: pytest.Fixture # Check relation and app data are removed assert backend.relation_ids('db') == [] with harness._event_context('foo'): - pytest.raises(ops.RelationNotFoundError, backend.relation_get, - rel_id, remote_app, is_app=True) + pytest.raises( + ops.RelationNotFoundError, backend.relation_get, rel_id, remote_app, is_app=True + ) def test_removing_relation_refreshes_charm_model(self, request: pytest.FixtureRequest): # language=YAML - harness = ops.testing.Harness(RelationEventCharm, meta=''' + harness = ops.testing.Harness( + RelationEventCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') @@ -566,14 +623,17 @@ def __init__(self, framework: ops.Framework): def _db_relation_broken(self, event: ops.RelationBrokenEvent): nonlocal is_broken, relations is_broken = not event.relation.active - relations = [rel.name for rel in self.model.relations["db"]] + relations = [rel.name for rel in self.model.relations['db']] - harness = ops.testing.Harness(MyCharm, meta=''' + harness = ops.testing.Harness( + MyCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() rel_id = harness.add_relation('db', 'postgresql') @@ -582,9 +642,8 @@ def _db_relation_broken(self, event: ops.RelationBrokenEvent): assert not relations, 'Model.relations contained broken relation' def _find_relation_in_model_by_id( - self, - harness: ops.testing.Harness['RelationEventCharm'], - rel_id: int): + self, harness: ops.testing.Harness['RelationEventCharm'], rel_id: int + ): for relations in harness.charm.model.relations.values(): for relation in relations: if rel_id == relation.id: @@ -592,12 +651,15 @@ def _find_relation_in_model_by_id( return None def test_removing_relation_unit_removes_data_also(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RelationEventCharm, meta=''' + harness = ops.testing.Harness( + RelationEventCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') @@ -610,38 +672,39 @@ def test_removing_relation_unit_removes_data_also(self, request: pytest.FixtureR backend = harness._backend assert backend.relation_ids('db') == [rel_id] assert backend.relation_list(rel_id) == ['postgresql/0'] - assert backend.relation_get(rel_id, 'postgresql/0', is_app=False) == \ - {'foo': 'bar'} + assert backend.relation_get(rel_id, 'postgresql/0', is_app=False) == {'foo': 'bar'} harness.charm.get_changes(reset=True) # ignore relation created events # Remove unit but not relation harness.remove_relation_unit(rel_id, 'postgresql/0') # Check relation exists but unit and data are removed assert backend.relation_ids('db') == [rel_id] assert backend.relation_list(rel_id) == [] - pytest.raises(KeyError, - backend.relation_get, - rel_id, - 'postgresql/0', - is_app=False) + pytest.raises(KeyError, backend.relation_get, rel_id, 'postgresql/0', is_app=False) # Check relation departed was raised with correct data - assert harness.charm.get_changes()[0] == \ - {'name': 'relation-departed', - 'relation': 'db', - 'data': {'app': 'postgresql', - 'unit': 'postgresql/0', - 'departing_unit': 'postgresql/0', - 'relation_id': rel_id}} + assert harness.charm.get_changes()[0] == { + 'name': 'relation-departed', + 'relation': 'db', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/0', + 'departing_unit': 'postgresql/0', + 'relation_id': rel_id, + }, + } def test_removing_relation_unit_does_not_remove_other_unit_and_data( self, request: pytest.FixtureRequest, ): - harness = ops.testing.Harness(RelationEventCharm, meta=''' + harness = ops.testing.Harness( + RelationEventCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') @@ -655,84 +718,97 @@ def test_removing_relation_unit_does_not_remove_other_unit_and_data( # Check both unit and data are present backend = harness._backend assert backend.relation_ids('db') == [rel_id] - assert backend.relation_list(rel_id) == \ - ['postgresql/0', 'postgresql/1'] - assert backend.relation_get(rel_id, 'postgresql/0', is_app=False) == \ - {'foo0': 'bar0'} - assert backend.relation_get(rel_id, 'postgresql/1', is_app=False) == \ - {'foo1': 'bar1'} + assert backend.relation_list(rel_id) == ['postgresql/0', 'postgresql/1'] + assert backend.relation_get(rel_id, 'postgresql/0', is_app=False) == {'foo0': 'bar0'} + assert backend.relation_get(rel_id, 'postgresql/1', is_app=False) == {'foo1': 'bar1'} harness.charm.get_changes(reset=True) # ignore relation created events # Remove only one unit harness.remove_relation_unit(rel_id, 'postgresql/1') # Check other unit and data still exists - assert backend.relation_list(rel_id) == \ - ['postgresql/0'] - assert backend.relation_get(rel_id, 'postgresql/0', is_app=False) == \ - {'foo0': 'bar0'} + assert backend.relation_list(rel_id) == ['postgresql/0'] + assert backend.relation_get(rel_id, 'postgresql/0', is_app=False) == {'foo0': 'bar0'} # Check relation departed was raised with correct data - assert harness.charm.get_changes()[0] == \ - {'name': 'relation-departed', - 'relation': 'db', - 'data': {'app': 'postgresql', - 'unit': 'postgresql/1', - 'departing_unit': 'postgresql/1', - 'relation_id': rel_id}} + assert harness.charm.get_changes()[0] == { + 'name': 'relation-departed', + 'relation': 'db', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/1', + 'departing_unit': 'postgresql/1', + 'relation_id': rel_id, + }, + } def test_relation_events(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RelationEventCharm, meta=''' + harness = ops.testing.Harness( + RelationEventCharm, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_relation_events('db') assert harness.charm.get_changes() == [] rel_id = harness.add_relation('db', 'postgresql') - assert harness.charm.get_changes() == \ - [{'name': 'relation-created', - 'relation': 'db', - 'data': { - 'app': 'postgresql', - 'unit': None, - 'relation_id': rel_id, - }}] + assert harness.charm.get_changes() == [ + { + 'name': 'relation-created', + 'relation': 'db', + 'data': { + 'app': 'postgresql', + 'unit': None, + 'relation_id': rel_id, + }, + } + ] harness.add_relation_unit(rel_id, 'postgresql/0') - assert harness.charm.get_changes() == \ - [{'name': 'relation-joined', - 'relation': 'db', - 'data': { - 'app': 'postgresql', - 'unit': 'postgresql/0', - 'relation_id': rel_id, - }}] + assert harness.charm.get_changes() == [ + { + 'name': 'relation-joined', + 'relation': 'db', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/0', + 'relation_id': rel_id, + }, + } + ] harness.update_relation_data(rel_id, 'postgresql', {'foo': 'bar'}) - assert harness.charm.get_changes() == \ - [{'name': 'relation-changed', - 'relation': 'db', - 'data': { - 'app': 'postgresql', - 'unit': None, - 'relation_id': rel_id, - }}] + assert harness.charm.get_changes() == [ + { + 'name': 'relation-changed', + 'relation': 'db', + 'data': { + 'app': 'postgresql', + 'unit': None, + 'relation_id': rel_id, + }, + } + ] harness.update_relation_data(rel_id, 'postgresql/0', {'baz': 'bing'}) - assert harness.charm.get_changes() == \ - [{'name': 'relation-changed', - 'relation': 'db', - 'data': { - 'app': 'postgresql', - 'unit': 'postgresql/0', - 'relation_id': rel_id, - }}] + assert harness.charm.get_changes() == [ + { + 'name': 'relation-changed', + 'relation': 'db', + 'data': { + 'app': 'postgresql', + 'unit': 'postgresql/0', + 'relation_id': rel_id, + }, + } + ] def test_get_relation_data(self, request: pytest.FixtureRequest): - charm_meta = ''' + charm_meta = """ name: test-app requires: db: interface: pgsql - ''' + """ harness = ops.testing.Harness(ops.CharmBase, meta=charm_meta) request.addfinalizer(harness.cleanup) rel_id = harness.add_relation('db', 'postgresql') @@ -757,20 +833,20 @@ def test_get_relation_data(self, request: pytest.FixtureRequest): assert harness.get_relation_data(rel_id, pg_app) == {'remote': 'data'} def test_create_harness_twice(self, request: pytest.FixtureRequest): - metadata = ''' + metadata = """ name: my-charm requires: db: interface: pgsql - ''' + """ harness1 = ops.testing.Harness(ops.CharmBase, meta=metadata) request.addfinalizer(harness1.cleanup) harness2 = ops.testing.Harness(ops.CharmBase, meta=metadata) request.addfinalizer(harness2.cleanup) harness1.begin() harness2.begin() - helper1 = DBRelationChangedHelper(harness1.charm, "helper1") - helper2 = DBRelationChangedHelper(harness2.charm, "helper2") + helper1 = DBRelationChangedHelper(harness1.charm, 'helper1') + helper2 = DBRelationChangedHelper(harness2.charm, 'helper2') rel_id = harness2.add_relation('db', 'postgresql') harness2.update_relation_data(rel_id, 'postgresql', {'key': 'value'}) # Helper2 should see the event triggered by harness2, but helper1 should see no events. @@ -779,24 +855,30 @@ def test_create_harness_twice(self, request: pytest.FixtureRequest): def test_begin_twice(self, request: pytest.FixtureRequest): # language=YAML - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() with pytest.raises(RuntimeError): harness.begin() def test_update_relation_exposes_new_data(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: my-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') @@ -805,20 +887,22 @@ def test_update_relation_exposes_new_data(self, request: pytest.FixtureRequest): harness.update_relation_data(rel_id, 'postgresql/0', {'initial': 'data'}) assert viewer.changes == [{'initial': 'data'}] harness.update_relation_data(rel_id, 'postgresql/0', {'new': 'value'}) - assert viewer.changes == [{'initial': 'data'}, - {'initial': 'data', 'new': 'value'}] + assert viewer.changes == [{'initial': 'data'}, {'initial': 'data', 'new': 'value'}] def test_update_relation_no_local_unit_change_event(self, request: pytest.FixtureRequest): # language=YAML - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: my-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() - helper = DBRelationChangedHelper(harness.charm, "helper") + helper = DBRelationChangedHelper(harness.charm, 'helper') rel_id = harness.add_relation('db', 'postgresql') rel = harness.charm.model.get_relation('db') assert rel is not None @@ -836,15 +920,18 @@ def test_update_relation_no_local_unit_change_event(self, request: pytest.Fixtur def test_update_peer_relation_no_local_unit_change_event(self, request: pytest.FixtureRequest): # language=YAML - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: postgresql peers: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() - helper = DBRelationChangedHelper(harness.charm, "helper") + helper = DBRelationChangedHelper(harness.charm, 'helper') rel_id = harness.add_relation('db', 'postgresql') rel = harness.charm.model.get_relation('db') @@ -875,16 +962,19 @@ def test_update_peer_relation_no_local_unit_change_event(self, request: pytest.F def test_update_peer_relation_app_data(self, request: pytest.FixtureRequest): # language=YAML - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: postgresql peers: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(is_leader=True) - helper = DBRelationChangedHelper(harness.charm, "helper") + helper = DBRelationChangedHelper(harness.charm, 'helper') rel_id = harness.add_relation('db', 'postgresql') rel = harness.charm.model.get_relation('db') assert rel is not None @@ -907,16 +997,19 @@ def test_update_peer_relation_app_data(self, request: pytest.FixtureRequest): def test_update_relation_no_local_app_change_event(self, request: pytest.FixtureRequest): # language=YAML - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: my-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(False) - helper = DBRelationChangedHelper(harness.charm, "helper") + helper = DBRelationChangedHelper(harness.charm, 'helper') rel_id = harness.add_relation('db', 'postgresql') # TODO: remove this as soon as https://github.com/canonical/operator/issues/175 is fixed. harness.add_relation_unit(rel_id, 'postgresql/0') @@ -933,12 +1026,15 @@ def test_update_relation_no_local_app_change_event(self, request: pytest.Fixture assert helper.changes == [] def test_update_relation_remove_data(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: my-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') @@ -949,12 +1045,15 @@ def test_update_relation_remove_data(self, request: pytest.FixtureRequest): assert viewer.changes == [{'initial': 'data'}, {}] def test_no_event_on_empty_update_relation_unit_app(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: my-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') @@ -965,12 +1064,15 @@ def test_no_event_on_empty_update_relation_unit_app(self, request: pytest.Fixtur assert viewer.changes == [{'initial': 'data'}] def test_no_event_on_no_diff_update_relation_unit_app(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: my-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') @@ -981,12 +1083,15 @@ def test_no_event_on_no_diff_update_relation_unit_app(self, request: pytest.Fixt assert viewer.changes == [{'initial': 'data'}] def test_no_event_on_empty_update_relation_unit_bag(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: my-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') @@ -997,12 +1102,15 @@ def test_no_event_on_empty_update_relation_unit_bag(self, request: pytest.Fixtur assert viewer.changes == [{'initial': 'data'}] def test_no_event_on_no_diff_update_relation_unit_bag(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: my-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() viewer = RelationChangedViewer(harness.charm, 'db') @@ -1017,7 +1125,9 @@ def test_empty_config_raises(self): ops.testing.Harness(RecordingCharm, config='') def test_update_config(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, config=''' + harness = ops.testing.Harness( + RecordingCharm, + config=""" options: a: description: a config option @@ -1025,23 +1135,24 @@ def test_update_config(self, request: pytest.FixtureRequest): b: description: another config option type: int - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.update_config(key_values={'a': 'foo', 'b': 2}) - assert harness.charm.changes == \ - [{'name': 'config-changed', 'data': {'a': 'foo', 'b': 2}}] + assert harness.charm.changes == [{'name': 'config-changed', 'data': {'a': 'foo', 'b': 2}}] harness.update_config(key_values={'b': 3}) - assert harness.charm.changes == \ - [{'name': 'config-changed', 'data': {'a': 'foo', 'b': 2}}, - {'name': 'config-changed', 'data': {'a': 'foo', 'b': 3}}] + assert harness.charm.changes == [ + {'name': 'config-changed', 'data': {'a': 'foo', 'b': 2}}, + {'name': 'config-changed', 'data': {'a': 'foo', 'b': 3}}, + ] # you can set config values to the empty string, you can use unset to actually remove items harness.update_config(key_values={'a': ''}, unset=set('b')) - assert harness.charm.changes == \ - [{'name': 'config-changed', 'data': {'a': 'foo', 'b': 2}}, - {'name': 'config-changed', 'data': {'a': 'foo', 'b': 3}}, - {'name': 'config-changed', 'data': {'a': ''}}, - ] + assert harness.charm.changes == [ + {'name': 'config-changed', 'data': {'a': 'foo', 'b': 2}}, + {'name': 'config-changed', 'data': {'a': 'foo', 'b': 3}}, + {'name': 'config-changed', 'data': {'a': ''}}, + ] def test_update_config_undefined_option(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm) @@ -1051,13 +1162,16 @@ def test_update_config_undefined_option(self, request: pytest.FixtureRequest): harness.update_config(key_values={'nonexistent': 'foo'}) def test_update_config_bad_type(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, config=''' + harness = ops.testing.Harness( + RecordingCharm, + config=""" options: a: description: a config option type: boolean default: false - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() with pytest.raises(RuntimeError): @@ -1077,22 +1191,28 @@ def test_update_config_bad_type(self, request: pytest.FixtureRequest): def test_bad_config_option_type(self): with pytest.raises(RuntimeError): - ops.testing.Harness(RecordingCharm, config=''' + ops.testing.Harness( + RecordingCharm, + config=""" options: a: description: a config option type: gibberish default: False - ''') + """, + ) def test_config_secret_option(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, config=''' + harness = ops.testing.Harness( + RecordingCharm, + config=""" options: a: description: a config option type: secret default: "" - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() secret_id = harness.add_user_secret({'key': 'value'}) @@ -1101,31 +1221,40 @@ def test_config_secret_option(self, request: pytest.FixtureRequest): def test_no_config_option_type(self): with pytest.raises(RuntimeError): - ops.testing.Harness(RecordingCharm, config=''' + ops.testing.Harness( + RecordingCharm, + config=""" options: a: description: a config option default: False - ''') + """, + ) def test_uncastable_config_option_type(self): with pytest.raises(RuntimeError): - ops.testing.Harness(RecordingCharm, config=''' + ops.testing.Harness( + RecordingCharm, + config=""" options: a: description: a config option type: boolean default: peek-a-bool! - ''') + """, + ) def test_update_config_unset_boolean(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, config=''' + harness = ops.testing.Harness( + RecordingCharm, + config=""" options: a: description: a config option type: boolean default: False - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() # Check the default was set correctly @@ -1135,9 +1264,10 @@ def test_update_config_unset_boolean(self, request: pytest.FixtureRequest): assert harness.charm.changes == [{'name': 'config-changed', 'data': {'a': True}}] # Unset the boolean value harness.update_config(unset={'a'}) - assert harness.charm.changes == \ - [{'name': 'config-changed', 'data': {'a': True}}, - {'name': 'config-changed', 'data': {'a': False}}] + assert harness.charm.changes == [ + {'name': 'config-changed', 'data': {'a': True}}, + {'name': 'config-changed', 'data': {'a': False}}, + ] def test_set_leader(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(RecordingCharm) @@ -1161,12 +1291,15 @@ def test_set_leader(self, request: pytest.FixtureRequest): assert harness.charm.get_changes(reset=True) == [] def test_relation_set_app_not_leader(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, meta=''' + harness = ops.testing.Harness( + RecordingCharm, + meta=""" name: test-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(False) @@ -1186,69 +1319,82 @@ def test_relation_set_app_not_leader(self, request: pytest.FixtureRequest): def test_hooks_enabled_and_disabled(self, request: pytest.FixtureRequest): harness = ops.testing.Harness( RecordingCharm, - meta=''' + meta=""" name: test-charm - ''', - config=''' + """, + config=""" options: value: type: string third: type: string - ''') + """, + ) request.addfinalizer(harness.cleanup) # Before begin() there are no events. harness.update_config({'value': 'first'}) # By default, after begin the charm is set up to receive events. harness.begin() harness.update_config({'value': 'second'}) - assert harness.charm.get_changes(reset=True) == \ - [{'name': 'config-changed', 'data': {'value': 'second'}}] + assert harness.charm.get_changes(reset=True) == [ + {'name': 'config-changed', 'data': {'value': 'second'}} + ] # Once disabled, we won't see config-changed when we make an update harness.disable_hooks() harness.update_config({'third': '3'}) assert harness.charm.get_changes(reset=True) == [] harness.enable_hooks() harness.update_config({'value': 'fourth'}) - assert harness.charm.get_changes(reset=True) == \ - [{'name': 'config-changed', 'data': {'value': 'fourth', 'third': '3'}}] + assert harness.charm.get_changes(reset=True) == [ + {'name': 'config-changed', 'data': {'value': 'fourth', 'third': '3'}} + ] def test_hooks_disabled_contextmanager(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, meta=''' + harness = ops.testing.Harness( + RecordingCharm, + meta=""" name: test-charm - ''', config=''' + """, + config=""" options: value: type: string third: type: string - ''') + """, + ) request.addfinalizer(harness.cleanup) # Before begin() there are no events. harness.update_config({'value': 'first'}) # By default, after begin the charm is set up to receive events. harness.begin() harness.update_config({'value': 'second'}) - assert harness.charm.get_changes(reset=True) == \ - [{'name': 'config-changed', 'data': {'value': 'second'}}] + assert harness.charm.get_changes(reset=True) == [ + {'name': 'config-changed', 'data': {'value': 'second'}} + ] # Once disabled, we won't see config-changed when we make an update with harness.hooks_disabled(): harness.update_config({'third': '3'}) assert harness.charm.get_changes(reset=True) == [] harness.update_config({'value': 'fourth'}) - assert harness.charm.get_changes(reset=True) == \ - [{'name': 'config-changed', 'data': {'value': 'fourth', 'third': '3'}}] + assert harness.charm.get_changes(reset=True) == [ + {'name': 'config-changed', 'data': {'value': 'fourth', 'third': '3'}} + ] def test_hooks_disabled_nested_contextmanager(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, meta=''' + harness = ops.testing.Harness( + RecordingCharm, + meta=""" name: test-charm - ''', config=''' + """, + config=""" options: fifth: type: string sixth: type: string - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() # Context manager can be nested, so a test using it can invoke a helper using it. @@ -1259,15 +1405,19 @@ def test_hooks_disabled_nested_contextmanager(self, request: pytest.FixtureReque assert harness.charm.get_changes(reset=True) == [] def test_hooks_disabled_noop(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, meta=''' + harness = ops.testing.Harness( + RecordingCharm, + meta=""" name: test-charm - ''', config=''' + """, + config=""" options: seventh: type: string eighth: type: string - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() # If hooks are already disabled, it is a no op, and on exit hooks remain disabled. @@ -1282,12 +1432,14 @@ def test_metadata_from_directory(self, request: pytest.FixtureRequest): request.addfinalizer(lambda: shutil.rmtree(tmp)) metadata_filename = tmp / 'metadata.yaml' with metadata_filename.open('wt') as metadata: - metadata.write(textwrap.dedent(''' + metadata.write( + textwrap.dedent(""" name: my-charm requires: db: interface: pgsql - ''')) + """) + ) harness = self._get_dummy_charm_harness(request, tmp) harness.begin() assert list(harness.model.relations) == ['db'] @@ -1298,7 +1450,8 @@ def test_metadata_from_directory_charmcraft_yaml(self, request: pytest.FixtureRe tmp = pathlib.Path(tempfile.mkdtemp()) request.addfinalizer(lambda: shutil.rmtree(tmp)) charmcraft_filename = tmp / 'charmcraft.yaml' - charmcraft_filename.write_text(textwrap.dedent(''' + charmcraft_filename.write_text( + textwrap.dedent(""" type: charm bases: - build-on: @@ -1312,7 +1465,8 @@ def test_metadata_from_directory_charmcraft_yaml(self, request: pytest.FixtureRe requires: db: interface: pgsql - ''')) + """) + ) harness = self._get_dummy_charm_harness(request, tmp) harness.begin() assert list(harness.model.relations) == ['db'] @@ -1324,7 +1478,8 @@ def test_config_from_directory(self, request: pytest.FixtureRequest): request.addfinalizer(lambda: shutil.rmtree(tmp)) config_filename = tmp / 'config.yaml' with config_filename.open('wt') as config: - config.write(textwrap.dedent(''' + config.write( + textwrap.dedent(""" options: opt_str: type: string @@ -1346,7 +1501,8 @@ def test_config_from_directory(self, request: pytest.FixtureRequest): default: 1.0 opt_no_default: type: string - ''')) + """) + ) harness = self._get_dummy_charm_harness(request, tmp) assert harness.model.config['opt_str'] == 'val' assert harness.model.config['opt_str_empty'] == '' @@ -1363,7 +1519,8 @@ def test_config_from_directory_charmcraft_yaml(self, request: pytest.FixtureRequ tmp = pathlib.Path(tempfile.mkdtemp()) request.addfinalizer(lambda: shutil.rmtree(tmp)) charmcraft_filename = tmp / 'charmcraft.yaml' - charmcraft_filename.write_text(textwrap.dedent(''' + charmcraft_filename.write_text( + textwrap.dedent(""" type: charm bases: - build-on: @@ -1381,7 +1538,8 @@ def test_config_from_directory_charmcraft_yaml(self, request: pytest.FixtureRequ opt_int: type: int default: 1 - ''')) + """) + ) harness = self._get_dummy_charm_harness(request, tmp) assert harness.model.config['opt_str'] == 'val' assert harness.model.config['opt_int'] == 1 @@ -1391,35 +1549,45 @@ def test_config_in_repl(self, request: pytest.FixtureRequest): # In a REPL, there is no "source file", but we should still be able to # provide explicit metadata, and fall back to the default otherwise. with patch.object(inspect, 'getfile', side_effect=OSError()): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: repl-charm - ''', config=''' + """, + config=""" options: foo: type: int default: 42 - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() - assert harness._meta.name == "repl-charm" + assert harness._meta.name == 'repl-charm' assert harness.charm.model.config['foo'] == 42 harness = ops.testing.Harness(ops.CharmBase) request.addfinalizer(harness.cleanup) - assert harness._meta.name == "test-charm" + assert harness._meta.name == 'test-charm' def test_set_model_name(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_model_name('foo') assert harness.model.name == 'foo' def test_set_model_name_after_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_model_name('bar') harness.begin() @@ -1428,9 +1596,12 @@ def test_set_model_name_after_begin(self, request: pytest.FixtureRequest): assert harness.model.name == 'bar' def test_set_model_uuid_after_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_model_name('bar') harness.set_model_uuid('96957e90-e006-11eb-ba80-0242ac130004') @@ -1440,9 +1611,12 @@ def test_set_model_uuid_after_begin(self, request: pytest.FixtureRequest): assert harness.model.uuid == '96957e90-e006-11eb-ba80-0242ac130004' def test_set_model_info_after_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_model_info('foo', '96957e90-e006-11eb-ba80-0242ac130004') harness.begin() @@ -1460,7 +1634,9 @@ def test_set_model_info_after_begin(self, request: pytest.FixtureRequest): assert harness.model.uuid == '96957e90-e006-11eb-ba80-0242ac130004' def test_add_storage_before_harness_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1470,19 +1646,22 @@ def test_add_storage_before_harness_begin(self, request: pytest.FixtureRequest): type: filesystem multiple: range: 1-3 - ''') + """, + ) request.addfinalizer(harness.cleanup) - stor_ids = harness.add_storage("test", count=3) + stor_ids = harness.add_storage('test', count=3) for s in stor_ids: # before begin, adding storage does not attach it. - assert s not in harness._backend.storage_list("test") + assert s not in harness._backend.storage_list('test') with pytest.raises(ops.ModelError): - harness._backend.storage_get("test/0", "location")[-6:] + harness._backend.storage_get('test/0', 'location')[-6:] def test_add_storage_then_harness_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1492,13 +1671,14 @@ def test_add_storage_then_harness_begin(self, request: pytest.FixtureRequest): type: filesystem multiple: range: 1-3 - ''') + """, + ) request.addfinalizer(harness.cleanup) - harness.add_storage("test", count=3) + harness.add_storage('test', count=3) with pytest.raises(ops.ModelError): - harness._backend.storage_get("test/0", "location")[-6:] + harness._backend.storage_get('test/0', 'location')[-6:] harness.begin_with_initial_hooks() assert len(harness.charm.observed_events) == 3 @@ -1506,10 +1686,12 @@ def test_add_storage_then_harness_begin(self, request: pytest.FixtureRequest): assert isinstance(harness.charm.observed_events[i], ops.StorageAttachedEvent) want = str(pathlib.PurePath('test', '0')) - assert want == harness._backend.storage_get("test/0", "location")[-6:] + assert want == harness._backend.storage_get('test/0', 'location')[-6:] def test_add_storage_not_attached_default(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app requires: db: @@ -1517,30 +1699,38 @@ def test_add_storage_not_attached_default(self, request: pytest.FixtureRequest): storage: test: type: filesystem - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.add_storage('test') harness.begin() - assert len(harness.model.storages['test']) == 0, \ - 'storage should start in detached state and be excluded from storage listing' + assert ( + len(harness.model.storages['test']) == 0 + ), 'storage should start in detached state and be excluded from storage listing' def test_add_storage_without_metadata_key_fails(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) with pytest.raises(RuntimeError) as excinfo: - harness.add_storage("test") - assert excinfo.value.args[0] == \ - "the key 'test' is not specified as a storage key in metadata" + harness.add_storage('test') + assert ( + excinfo.value.args[0] == "the key 'test' is not specified as a storage key in metadata" + ) def test_add_storage_after_harness_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1550,34 +1740,37 @@ def test_add_storage_after_harness_begin(self, request: pytest.FixtureRequest): type: filesystem multiple: range: 1-3 - ''') + """, + ) request.addfinalizer(harness.cleanup) # Set up initial storage - harness.add_storage("test")[0] + harness.add_storage('test')[0] harness.begin_with_initial_hooks() assert len(harness.charm.observed_events) == 1 assert isinstance(harness.charm.observed_events[0], ops.StorageAttachedEvent) # Add additional storage - stor_ids = harness.add_storage("test", count=3, attach=True) + stor_ids = harness.add_storage('test', count=3, attach=True) # NOTE: stor_id now reflects the 4th ID. The 2nd and 3rd IDs are created and # used, but not returned by Harness.add_storage. # (Should we consider changing its return type?) added_indices = {self._extract_storage_index(stor_id) for stor_id in stor_ids} - assert added_indices.issubset(set(harness._backend.storage_list("test"))) + assert added_indices.issubset(set(harness._backend.storage_list('test'))) for i in ['1', '2', '3']: - storage_name = f"test/{i}" + storage_name = f'test/{i}' want = str(pathlib.PurePath('test', i)) - assert harness._backend.storage_get(storage_name, "location").endswith(want) + assert harness._backend.storage_get(storage_name, 'location').endswith(want) assert len(harness.charm.observed_events) == 4 for i in range(1, 4): assert isinstance(harness.charm.observed_events[i], ops.StorageAttachedEvent) def test_detach_storage(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1585,11 +1778,12 @@ def test_detach_storage(self, request: pytest.FixtureRequest): storage: test: type: filesystem - ''') + """, + ) request.addfinalizer(harness.cleanup) # Set up initial storage - stor_id = harness.add_storage("test")[0] + stor_id = harness.add_storage('test')[0] harness.begin_with_initial_hooks() assert len(harness.charm.observed_events) == 1 assert isinstance(harness.charm.observed_events[0], ops.StorageAttachedEvent) @@ -1601,13 +1795,15 @@ def test_detach_storage(self, request: pytest.FixtureRequest): # Verify backend functions return appropriate values. # Real backend would return info only for actively attached storage units. - assert stor_id not in harness._backend.storage_list("test") + assert stor_id not in harness._backend.storage_list('test') with pytest.raises(ops.ModelError) as excinfo: - harness._backend.storage_get("test/0", "location") + harness._backend.storage_get('test/0', 'location') # Error message modeled after output of # "storage-get -s location" on real deployment - assert excinfo.value.args[0] == \ - 'ERROR invalid value "test/0" for option -s: storage not found' + assert ( + excinfo.value.args[0] + == 'ERROR invalid value "test/0" for option -s: storage not found' + ) # Retry detach # Since already detached, no more hooks should fire @@ -1616,7 +1812,9 @@ def test_detach_storage(self, request: pytest.FixtureRequest): assert isinstance(harness.charm.observed_events[1], ops.StorageDetachingEvent) def test_detach_storage_before_harness_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1624,17 +1822,19 @@ def test_detach_storage_before_harness_begin(self, request: pytest.FixtureReques storage: test: type: filesystem - ''') + """, + ) request.addfinalizer(harness.cleanup) - stor_id = harness.add_storage("test")[0] + stor_id = harness.add_storage('test')[0] with pytest.raises(RuntimeError) as excinfo: - harness.detach_storage(f"test/{stor_id}") - assert excinfo.value.args[0] == \ - "cannot detach storage before Harness is initialised" + harness.detach_storage(f'test/{stor_id}') + assert excinfo.value.args[0] == 'cannot detach storage before Harness is initialised' def test_storage_with_hyphens_works(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1644,18 +1844,21 @@ def test_storage_with_hyphens_works(self, request: pytest.FixtureRequest): type: filesystem test-with-hyphens: type: filesystem - ''') + """, + ) request.addfinalizer(harness.cleanup) # Set up initial storage harness.begin() - helper = StorageWithHyphensHelper(harness.charm, "helper") - harness.add_storage("test-with-hyphens", attach=True)[0] + helper = StorageWithHyphensHelper(harness.charm, 'helper') + harness.add_storage('test-with-hyphens', attach=True)[0] assert len(helper.changes) == 1 def test_attach_storage(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1663,11 +1866,12 @@ def test_attach_storage(self, request: pytest.FixtureRequest): storage: test: type: filesystem - ''') + """, + ) request.addfinalizer(harness.cleanup) # Set up initial storage - stor_id = harness.add_storage("test")[0] + stor_id = harness.add_storage('test')[0] harness.begin_with_initial_hooks() assert len(harness.charm.observed_events) == 1 assert isinstance(harness.charm.observed_events[0], ops.StorageAttachedEvent) @@ -1684,9 +1888,9 @@ def test_attach_storage(self, request: pytest.FixtureRequest): # Verify backend functions return appropriate values. # Real backend would return info only for actively attached storage units. - assert self._extract_storage_index(stor_id) in harness._backend.storage_list("test") + assert self._extract_storage_index(stor_id) in harness._backend.storage_list('test') want = str(pathlib.PurePath('test', '0')) - assert want == harness._backend.storage_get("test/0", "location")[-6:] + assert want == harness._backend.storage_get('test/0', 'location')[-6:] # Retry attach # Since already detached, no more hooks should fire @@ -1695,7 +1899,9 @@ def test_attach_storage(self, request: pytest.FixtureRequest): assert isinstance(harness.charm.observed_events[2], ops.StorageAttachedEvent) def test_attach_storage_before_harness_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1703,16 +1909,19 @@ def test_attach_storage_before_harness_begin(self, request: pytest.FixtureReques storage: test: type: filesystem - ''') + """, + ) request.addfinalizer(harness.cleanup) # We deliberately don't guard against attaching storage before the harness begins, # as there are legitimate reasons to do so. - stor_id = harness.add_storage("test")[0] + stor_id = harness.add_storage('test')[0] assert stor_id def test_remove_storage_before_harness_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1722,10 +1931,11 @@ def test_remove_storage_before_harness_begin(self, request: pytest.FixtureReques type: filesystem multiple: range: 1-3 - ''') + """, + ) request.addfinalizer(harness.cleanup) - stor_ids = harness.add_storage("test", count=2) + stor_ids = harness.add_storage('test', count=2) harness.remove_storage(stor_ids[0]) # Note re: delta between real behavior and Harness: Juju doesn't allow removal # of the last attached storage unit while a workload is still running. To more @@ -1740,23 +1950,29 @@ def test_remove_storage_before_harness_begin(self, request: pytest.FixtureReques assert isinstance(harness.charm.observed_events[0], ops.StorageAttachedEvent) def test_remove_storage_without_metadata_key_fails(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) # Doesn't really make sense since we already can't add storage which isn't in the metadata, # but included for completeness. with pytest.raises(RuntimeError) as excinfo: - harness.remove_storage("test/0") - assert excinfo.value.args[0] == \ - "the key 'test' is not specified as a storage key in metadata" + harness.remove_storage('test/0') + assert ( + excinfo.value.args[0] == "the key 'test' is not specified as a storage key in metadata" + ) def test_remove_storage_after_harness_begin(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1766,10 +1982,11 @@ def test_remove_storage_after_harness_begin(self, request: pytest.FixtureRequest type: filesystem multiple: range: 1-3 - ''') + """, + ) request.addfinalizer(harness.cleanup) - stor_ids = harness.add_storage("test", count=2) + stor_ids = harness.add_storage('test', count=2) harness.begin_with_initial_hooks() assert len(harness.charm.observed_events) == 2 assert isinstance(harness.charm.observed_events[0], ops.StorageAttachedEvent) @@ -1779,7 +1996,7 @@ def test_remove_storage_after_harness_begin(self, request: pytest.FixtureRequest assert len(harness.charm.observed_events) == 3 assert isinstance(harness.charm.observed_events[2], ops.StorageDetachingEvent) - attached_storage_ids = harness._backend.storage_list("test") + attached_storage_ids = harness._backend.storage_list('test') assert self._extract_storage_index(stor_ids[0]) in attached_storage_ids assert self._extract_storage_index(stor_ids[1]) not in attached_storage_ids @@ -1787,7 +2004,9 @@ def _extract_storage_index(self, stor_id: str): return int(stor_id.split('/')[-1]) def test_remove_detached_storage(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(StorageTester, meta=''' + harness = ops.testing.Harness( + StorageTester, + meta=""" name: test-app requires: db: @@ -1797,10 +2016,11 @@ def test_remove_detached_storage(self, request: pytest.FixtureRequest): type: filesystem multiple: range: 1-3 - ''') + """, + ) request.addfinalizer(harness.cleanup) - stor_ids = harness.add_storage("test", count=2) + stor_ids = harness.add_storage('test', count=2) harness.begin_with_initial_hooks() harness.detach_storage(stor_ids[0]) harness.remove_storage(stor_ids[0]) # Already detached, so won't fire a hook @@ -1814,10 +2034,12 @@ def test_actions_from_directory(self, request: pytest.FixtureRequest): request.addfinalizer(lambda: shutil.rmtree(tmp)) actions_filename = tmp / 'actions.yaml' with actions_filename.open('wt') as actions: - actions.write(textwrap.dedent(''' + actions.write( + textwrap.dedent(""" test: description: a dummy action - ''')) + """) + ) harness = self._get_dummy_charm_harness(request, tmp) harness.begin() assert list(harness.framework.meta.actions) == ['test'] @@ -1828,7 +2050,8 @@ def test_actions_from_directory_charmcraft_yaml(self, request: pytest.FixtureReq tmp = pathlib.Path(tempfile.mkdtemp()) request.addfinalizer(lambda: shutil.rmtree(tmp)) charmcraft_filename = tmp / 'charmcraft.yaml' - charmcraft_filename.write_text(textwrap.dedent(''' + charmcraft_filename.write_text( + textwrap.dedent(""" type: charm bases: - build-on: @@ -1841,7 +2064,8 @@ def test_actions_from_directory_charmcraft_yaml(self, request: pytest.FixtureReq actions: test: description: a dummy action - ''')) + """) + ) harness = self._get_dummy_charm_harness(request, tmp) harness.begin() assert list(harness.framework.meta.actions) == ['test'] @@ -1861,11 +2085,13 @@ def _write_dummy_charm(self, request: pytest.FixtureRequest, tmp: pathlib.Path): charm_filename = srcdir / 'testcharm.py' with charm_filename.open('wt') as charmpy: # language=Python - charmpy.write(textwrap.dedent(''' + charmpy.write( + textwrap.dedent(""" from ops import CharmBase class MyTestingCharm(CharmBase): pass - ''')) + """) + ) orig = sys.path[:] sys.path.append(str(srcdir)) @@ -1878,13 +2104,14 @@ def cleanup(): def test_actions_passed_in(self, request: pytest.FixtureRequest): harness = ops.testing.Harness( ops.CharmBase, - meta=''' + meta=""" name: test-app - ''', - actions=''' + """, + actions=""" test-action: description: a dummy test action - ''') + """, + ) request.addfinalizer(harness.cleanup) assert list(harness.framework.meta.actions) == ['test-action'] @@ -1895,12 +2122,15 @@ def event_handler(self, evt: ops.RelationEvent): assert rel is not None and rel.app is not None rel.data[rel.app]['foo'] = 'bar' - harness = ops.testing.Harness(MyCharm, meta=''' + harness = ops.testing.Harness( + MyCharm, + meta=""" name: test-charm requires: db: interface: pgsql - ''') + """, + ) harness.begin() rel_id = harness.add_relation('db', 'postgresql') rel = harness.charm.model.get_relation('db', rel_id) @@ -1916,19 +2146,21 @@ def test_event_context_inverse(self): class MyCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) - self.framework.observe(self.on.db_relation_joined, - self._join_db) + self.framework.observe(self.on.db_relation_joined, self._join_db) def _join_db(self, event: ops.EventBase) -> None: # do things with APIs we cannot easily mock raise NotImplementedError - harness = ops.testing.Harness(MyCharm, meta=''' + harness = ops.testing.Harness( + MyCharm, + meta=""" name: test-charm requires: db: interface: pgsql - ''') + """, + ) harness.begin() def mock_join_db(event: ops.EventBase): @@ -1945,20 +2177,22 @@ def mock_join_db(event: ops.EventBase): harness.add_relation_unit(rel_id, 'remote/0') rel = harness.charm.model.get_relation('db', rel_id) assert rel is not None - assert harness.get_relation_data(rel_id, 'test-charm') == \ - {'foo': 'bar'} + assert harness.get_relation_data(rel_id, 'test-charm') == {'foo': 'bar'} # now we're outside of the hook context: assert not harness._backend._hook_is_running assert rel.data[harness.charm.app]['foo'] == 'bar' def test_relation_set_deletes(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(False) @@ -1971,25 +2205,30 @@ def test_relation_set_deletes(self, request: pytest.FixtureRequest): assert harness.get_relation_data(rel_id, 'test-charm/0') == {} def test_relation_set_nonstring(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.set_leader(False) rel_id = harness.add_relation('db', 'postgresql') for invalid_value in (1, 1.2, {}, [], set(), True, object(), type): # type: ignore with pytest.raises(ops.RelationDataError): - harness.update_relation_data(rel_id, 'test-charm/0', - {'foo': invalid_value}) # type: ignore + harness.update_relation_data(rel_id, 'test-charm/0', {'foo': invalid_value}) # type: ignore def test_set_workload_version(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: app - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() assert harness.get_workload_version() is None @@ -1997,12 +2236,15 @@ def test_set_workload_version(self, request: pytest.FixtureRequest): assert harness.get_workload_version() == '1.2.3' def test_get_backend_calls(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() # No calls to the backend yet @@ -2020,7 +2262,7 @@ def test_get_backend_calls(self, request: pytest.FixtureRequest): test_charm_unit = harness.model.get_unit('test-charm/0') assert harness._get_backend_calls(reset=True) == [ ('relation_get', 0, 'test-charm/0', False), - ('update_relation_data', 0, test_charm_unit, 'foo', 'bar') + ('update_relation_data', 0, test_charm_unit, 'foo', 'bar'), ] # add_relation_unit resets the relation_list, but doesn't trigger backend calls @@ -2034,25 +2276,28 @@ def test_get_backend_calls(self, request: pytest.FixtureRequest): ('relation_ids', 'db'), ('relation_list', rel_id), ('relation_get', 0, 'postgresql/0', False), - ('update_relation_data', 0, pgql_unit, 'foo', 'bar') + ('update_relation_data', 0, pgql_unit, 'foo', 'bar'), ] # If we check again, they are still there, but now we reset it assert harness._get_backend_calls(reset=True) == [ ('relation_ids', 'db'), ('relation_list', rel_id), ('relation_get', 0, 'postgresql/0', False), - ('update_relation_data', 0, pgql_unit, 'foo', 'bar') + ('update_relation_data', 0, pgql_unit, 'foo', 'bar'), ] # And the calls are gone assert harness._get_backend_calls() == [] def test_get_backend_calls_with_kwargs(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm requires: db: interface: pgsql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() unit = harness.charm.model.unit @@ -2065,7 +2310,9 @@ def test_get_backend_calls_with_kwargs(self, request: pytest.FixtureRequest): harness._get_backend_calls(reset=True) app.status = ops.ActiveStatus('message') assert harness._get_backend_calls() == [ - ('is_leader',), ('status_set', 'active', 'message', {'is_app': True})] + ('is_leader',), + ('status_set', 'active', 'message', {'is_app': True}), + ] def test_unit_status(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: test-app') @@ -2090,7 +2337,9 @@ def test_app_status(self, request: pytest.FixtureRequest): assert harness.model.app.status == status def test_populate_oci_resources(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: @@ -2099,7 +2348,8 @@ def test_populate_oci_resources(self, request: pytest.FixtureRequest): image2: type: oci-image description: "Another image." - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.populate_oci_resources() path = harness.model.resources.fetch('image') @@ -2115,13 +2365,16 @@ def test_populate_oci_resources(self, request: pytest.FixtureRequest): assert path.parent.name == 'image2' def test_resource_folder_cleanup(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: oci-image description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.populate_oci_resources() path = harness.model.resources.fetch('image') @@ -2132,12 +2385,15 @@ def test_resource_folder_cleanup(self, request: pytest.FixtureRequest): assert not path.parent.parent.exists() def test_container_isdir_and_exists(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: foo: resource: foo-image - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.set_can_connect('foo', True) @@ -2160,18 +2416,21 @@ def test_container_isdir_and_exists(self, request: pytest.FixtureRequest): assert c.exists(file_path) def test_add_oci_resource_custom(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: oci-image description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) custom = { - "registrypath": "custompath", - "username": "custom_username", - "password": "custom_password", + 'registrypath': 'custompath', + 'username': 'custom_username', + 'password': 'custom_password', } harness.add_oci_resource('image', custom) resource = harness.model.resources.fetch('image') @@ -2182,53 +2441,65 @@ def test_add_oci_resource_custom(self, request: pytest.FixtureRequest): assert contents['password'] == 'custom_password' def test_add_oci_resource_no_image(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: file description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) with pytest.raises(RuntimeError): - harness.add_oci_resource("image") + harness.add_oci_resource('image') with pytest.raises(RuntimeError): - harness.add_oci_resource("missing-resource") + harness.add_oci_resource('missing-resource') assert len(harness._backend._resources_map) == 0 def test_add_resource_unknown(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: file description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) with pytest.raises(RuntimeError): harness.add_resource('unknown', 'content') def test_add_resource_but_oci(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: oci-image description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) with pytest.raises(RuntimeError): harness.add_resource('image', 'content') def test_add_resource_string(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: file filename: foo.txt description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.add_resource('image', 'foo contents\n') path = harness.model.resources.fetch('image') @@ -2238,14 +2509,17 @@ def test_add_resource_string(self, request: pytest.FixtureRequest): assert f.read() == 'foo contents\n' def test_add_resource_bytes(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: file filename: foo.zip description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) raw_contents = b'\xff\xff\x00blah\n' harness.add_resource('image', raw_contents) @@ -2256,13 +2530,16 @@ def test_add_resource_bytes(self, request: pytest.FixtureRequest): assert raw_contents == f.read() def test_add_resource_unknown_filename(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: file description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.add_resource('image', 'foo contents\n') path = harness.model.resources.fetch('image') @@ -2270,9 +2547,12 @@ def test_add_resource_unknown_filename(self, request: pytest.FixtureRequest): assert path.parent.name == 'image' def test_get_pod_spec(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_leader(True) container_spec = {'container': 'spec'} @@ -2281,14 +2561,18 @@ def test_get_pod_spec(self, request: pytest.FixtureRequest): assert harness.get_pod_spec() == (container_spec, k8s_resources) def test_begin_with_initial_hooks_no_relations(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, meta=''' + harness = ops.testing.Harness( + RecordingCharm, + meta=""" name: test-app - ''', config=''' + """, + config=""" options: foo: description: a config option type: string - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.update_config({'foo': 'bar'}) harness.set_leader(True) @@ -2296,56 +2580,63 @@ def test_begin_with_initial_hooks_no_relations(self, request: pytest.FixtureRequ _ = harness.charm harness.begin_with_initial_hooks() assert harness.charm is not None - assert harness.charm.changes == \ - [ - {'name': 'install'}, - {'name': 'leader-elected'}, - {'name': 'config-changed', 'data': {'foo': 'bar'}}, - {'name': 'start'}, - ] + assert harness.charm.changes == [ + {'name': 'install'}, + {'name': 'leader-elected'}, + {'name': 'config-changed', 'data': {'foo': 'bar'}}, + {'name': 'start'}, + ] def test_begin_with_initial_hooks_no_relations_not_leader( self, request: pytest.FixtureRequest, ): - harness = ops.testing.Harness(RecordingCharm, meta=''' + harness = ops.testing.Harness( + RecordingCharm, + meta=""" name: test-app - ''', config=''' + """, + config=""" options: foo: description: a config option type: string - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.update_config({'foo': 'bar'}) with pytest.raises(RuntimeError): _ = harness.charm harness.begin_with_initial_hooks() assert harness.charm is not None - assert harness.charm.changes == \ - [ - {'name': 'install'}, - {'name': 'leader-settings-changed'}, - {'name': 'config-changed', 'data': {'foo': 'bar'}}, - {'name': 'start'}, - ] + assert harness.charm.changes == [ + {'name': 'install'}, + {'name': 'leader-settings-changed'}, + {'name': 'config-changed', 'data': {'foo': 'bar'}}, + {'name': 'start'}, + ] def test_begin_with_initial_hooks_with_peer_relation(self, request: pytest.FixtureRequest): class PeerCharm(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) self.observe_relation_events('peer') - harness = ops.testing.Harness(PeerCharm, meta=''' + + harness = ops.testing.Harness( + PeerCharm, + meta=""" name: test-app peers: peer: interface: app-peer - ''', config=''' + """, + config=""" options: foo: description: a config option type: string - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.update_config({'foo': 'bar'}) with pytest.raises(RuntimeError): @@ -2355,20 +2646,21 @@ def __init__(self, framework: ops.Framework): rel = harness.model.get_relation('peer') assert rel is not None rel_id = rel.id - assert harness.charm.changes == \ - [ - {'name': 'install'}, - {'name': 'relation-created', - 'relation': 'peer', - 'data': { - 'relation_id': rel_id, - 'unit': None, - 'app': 'test-app', - }}, - {'name': 'leader-settings-changed'}, - {'name': 'config-changed', 'data': {'foo': 'bar'}}, - {'name': 'start'}, - ] + assert harness.charm.changes == [ + {'name': 'install'}, + { + 'name': 'relation-created', + 'relation': 'peer', + 'data': { + 'relation_id': rel_id, + 'unit': None, + 'app': 'test-app', + }, + }, + {'name': 'leader-settings-changed'}, + {'name': 'config-changed', 'data': {'foo': 'bar'}}, + {'name': 'start'}, + ] # With a single unit, no peer-relation-joined is fired def test_begin_with_initial_hooks_peer_relation_pre_defined( @@ -2379,31 +2671,36 @@ class PeerCharm(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) self.observe_relation_events('peer') - harness = ops.testing.Harness(PeerCharm, meta=''' + + harness = ops.testing.Harness( + PeerCharm, + meta=""" name: test-app peers: peer: interface: app-peer - ''') + """, + ) request.addfinalizer(harness.cleanup) peer_rel_id = harness.add_relation('peer', 'test-app') harness.begin_with_initial_hooks() # If the peer relation is already defined by the user, we don't create the relation a # second time, but we do still fire relation-created. - assert harness.charm.changes == \ - [ - {'name': 'install'}, - {'name': 'relation-created', - 'relation': 'peer', - 'data': { - 'relation_id': peer_rel_id, - 'unit': None, - 'app': 'test-app', - }}, - {'name': 'leader-settings-changed'}, - {'name': 'config-changed', 'data': {}}, - {'name': 'start'}, - ] + assert harness.charm.changes == [ + {'name': 'install'}, + { + 'name': 'relation-created', + 'relation': 'peer', + 'data': { + 'relation_id': peer_rel_id, + 'unit': None, + 'app': 'test-app', + }, + }, + {'name': 'leader-settings-changed'}, + {'name': 'config-changed', 'data': {}}, + {'name': 'start'}, + ] def test_begin_with_initial_hooks_relation_charm_with_no_relation( self, @@ -2413,80 +2710,96 @@ class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) self.observe_relation_events('db') - harness = ops.testing.Harness(CharmWithDB, meta=''' + + harness = ops.testing.Harness( + CharmWithDB, + meta=""" name: test-app requires: db: interface: sql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_leader() harness.begin_with_initial_hooks() - assert harness.charm.changes == \ - [ - {'name': 'install'}, - {'name': 'leader-elected'}, - {'name': 'config-changed', 'data': {}}, - {'name': 'start'}, - ] + assert harness.charm.changes == [ + {'name': 'install'}, + {'name': 'leader-elected'}, + {'name': 'config-changed', 'data': {}}, + {'name': 'start'}, + ] def test_begin_with_initial_hooks_with_one_relation(self, request: pytest.FixtureRequest): class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) self.observe_relation_events('db') - harness = ops.testing.Harness(CharmWithDB, meta=''' + + harness = ops.testing.Harness( + CharmWithDB, + meta=""" name: test-app requires: db: interface: sql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_leader() rel_id = harness.add_relation('db', 'postgresql') harness.add_relation_unit(rel_id, 'postgresql/0') harness.update_relation_data(rel_id, 'postgresql/0', {'new': 'data'}) harness.begin_with_initial_hooks() - assert harness.charm.changes == \ - [ - {'name': 'install'}, - {'name': 'relation-created', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': None, - 'app': 'postgresql', - }}, - {'name': 'leader-elected'}, - {'name': 'config-changed', 'data': {}}, - {'name': 'start'}, - {'name': 'relation-joined', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': 'postgresql/0', - 'app': 'postgresql', - }}, - {'name': 'relation-changed', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': 'postgresql/0', - 'app': 'postgresql', - }}, - ] + assert harness.charm.changes == [ + {'name': 'install'}, + { + 'name': 'relation-created', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': None, + 'app': 'postgresql', + }, + }, + {'name': 'leader-elected'}, + {'name': 'config-changed', 'data': {}}, + {'name': 'start'}, + { + 'name': 'relation-joined', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': 'postgresql/0', + 'app': 'postgresql', + }, + }, + { + 'name': 'relation-changed', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': 'postgresql/0', + 'app': 'postgresql', + }, + }, + ] def test_begin_with_initial_hooks_with_application_data(self, request: pytest.FixtureRequest): class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) self.observe_relation_events('db') - harness = ops.testing.Harness(CharmWithDB, meta=''' + + harness = ops.testing.Harness( + CharmWithDB, + meta=""" name: test-app requires: db: interface: sql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_leader() rel_id = harness.add_relation('db', 'postgresql') @@ -2494,53 +2807,64 @@ def __init__(self, framework: ops.Framework): harness.update_relation_data(rel_id, 'postgresql/0', {'new': 'data'}) harness.update_relation_data(rel_id, 'postgresql', {'app': 'data'}) harness.begin_with_initial_hooks() - assert harness.charm.changes == \ - [ - {'name': 'install'}, - {'name': 'relation-created', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': None, - 'app': 'postgresql', - }}, - {'name': 'leader-elected'}, - {'name': 'config-changed', 'data': {}}, - {'name': 'start'}, - {'name': 'relation-changed', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': None, - 'app': 'postgresql', - }}, - {'name': 'relation-joined', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': 'postgresql/0', - 'app': 'postgresql', - }}, - {'name': 'relation-changed', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': 'postgresql/0', - 'app': 'postgresql', - }}, - ] + assert harness.charm.changes == [ + {'name': 'install'}, + { + 'name': 'relation-created', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': None, + 'app': 'postgresql', + }, + }, + {'name': 'leader-elected'}, + {'name': 'config-changed', 'data': {}}, + {'name': 'start'}, + { + 'name': 'relation-changed', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': None, + 'app': 'postgresql', + }, + }, + { + 'name': 'relation-joined', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': 'postgresql/0', + 'app': 'postgresql', + }, + }, + { + 'name': 'relation-changed', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': 'postgresql/0', + 'app': 'postgresql', + }, + }, + ] def test_begin_with_initial_hooks_with_multiple_units(self, request: pytest.FixtureRequest): class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) self.observe_relation_events('db') - harness = ops.testing.Harness(CharmWithDB, meta=''' + + harness = ops.testing.Harness( + CharmWithDB, + meta=""" name: test-app requires: db: interface: sql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_leader() rel_id = harness.add_relation('db', 'postgresql') @@ -2549,48 +2873,57 @@ def __init__(self, framework: ops.Framework): # We intentionally add 0 after 1 to assert that the code triggers them in order harness.add_relation_unit(rel_id, 'postgresql/0') harness.begin_with_initial_hooks() - assert harness.charm.changes == \ - [ - {'name': 'install'}, - {'name': 'relation-created', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': None, - 'app': 'postgresql', - }}, - {'name': 'leader-elected'}, - {'name': 'config-changed', 'data': {}}, - {'name': 'start'}, - {'name': 'relation-joined', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': 'postgresql/0', - 'app': 'postgresql', - }}, - {'name': 'relation-changed', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': 'postgresql/0', - 'app': 'postgresql', - }}, - {'name': 'relation-joined', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': 'postgresql/1', - 'app': 'postgresql', - }}, - {'name': 'relation-changed', - 'relation': 'db', - 'data': { - 'relation_id': rel_id, - 'unit': 'postgresql/1', - 'app': 'postgresql', - }}, - ] + assert harness.charm.changes == [ + {'name': 'install'}, + { + 'name': 'relation-created', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': None, + 'app': 'postgresql', + }, + }, + {'name': 'leader-elected'}, + {'name': 'config-changed', 'data': {}}, + {'name': 'start'}, + { + 'name': 'relation-joined', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': 'postgresql/0', + 'app': 'postgresql', + }, + }, + { + 'name': 'relation-changed', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': 'postgresql/0', + 'app': 'postgresql', + }, + }, + { + 'name': 'relation-joined', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': 'postgresql/1', + 'app': 'postgresql', + }, + }, + { + 'name': 'relation-changed', + 'relation': 'db', + 'data': { + 'relation_id': rel_id, + 'unit': 'postgresql/1', + 'app': 'postgresql', + }, + }, + ] def test_begin_with_initial_hooks_multiple_relation_same_endpoint( self, @@ -2600,12 +2933,16 @@ class CharmWithDB(RelationEventCharm): def __init__(self, framework: ops.Framework): super().__init__(framework) self.observe_relation_events('db') - harness = ops.testing.Harness(CharmWithDB, meta=''' + + harness = ops.testing.Harness( + CharmWithDB, + meta=""" name: test-app requires: db: interface: sql - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.set_leader() rel_id_a = harness.add_relation('db', 'pg-a') @@ -2618,29 +2955,35 @@ def __init__(self, framework: ops.Framework): {'name': 'install'}, ] # The first events are always the same - assert changes[:len(expected_prefix)] == expected_prefix - changes = changes[len(expected_prefix):] + assert changes[: len(expected_prefix)] == expected_prefix + changes = changes[len(expected_prefix) :] # However, the order of relation-created events can be in any order expected_relation_created = [ - {'name': 'relation-created', - 'relation': 'db', - 'data': { - 'relation_id': rel_id_a, - 'unit': None, - 'app': 'pg-a', - }}, - {'name': 'relation-created', - 'relation': 'db', - 'data': { - 'relation_id': rel_id_b, - 'unit': None, - 'app': 'pg-b', - }}, + { + 'name': 'relation-created', + 'relation': 'db', + 'data': { + 'relation_id': rel_id_a, + 'unit': None, + 'app': 'pg-a', + }, + }, + { + 'name': 'relation-created', + 'relation': 'db', + 'data': { + 'relation_id': rel_id_b, + 'unit': None, + 'app': 'pg-b', + }, + }, ] if changes[:2] != expected_relation_created: # change the order - expected_relation_created = [expected_relation_created[1], - expected_relation_created[0]] + expected_relation_created = [ + expected_relation_created[1], + expected_relation_created[0], + ] assert changes[:2] == expected_relation_created changes = changes[2:] expected_middle: typing.List[typing.Dict[str, typing.Any]] = [ @@ -2648,37 +2991,45 @@ def __init__(self, framework: ops.Framework): {'name': 'config-changed', 'data': {}}, {'name': 'start'}, ] - assert changes[:len(expected_middle)] == expected_middle - changes = changes[len(expected_middle):] + assert changes[: len(expected_middle)] == expected_middle + changes = changes[len(expected_middle) :] a_first = [ - {'name': 'relation-joined', - 'relation': 'db', - 'data': { - 'relation_id': rel_id_a, - 'unit': 'pg-a/0', - 'app': 'pg-a', - }}, - {'name': 'relation-changed', - 'relation': 'db', - 'data': { - 'relation_id': rel_id_a, - 'unit': 'pg-a/0', - 'app': 'pg-a', - }}, - {'name': 'relation-joined', - 'relation': 'db', - 'data': { - 'relation_id': rel_id_b, - 'unit': 'pg-b/0', - 'app': 'pg-b', - }}, - {'name': 'relation-changed', - 'relation': 'db', - 'data': { - 'relation_id': rel_id_b, - 'unit': 'pg-b/0', - 'app': 'pg-b', - }}, + { + 'name': 'relation-joined', + 'relation': 'db', + 'data': { + 'relation_id': rel_id_a, + 'unit': 'pg-a/0', + 'app': 'pg-a', + }, + }, + { + 'name': 'relation-changed', + 'relation': 'db', + 'data': { + 'relation_id': rel_id_a, + 'unit': 'pg-a/0', + 'app': 'pg-a', + }, + }, + { + 'name': 'relation-joined', + 'relation': 'db', + 'data': { + 'relation_id': rel_id_b, + 'unit': 'pg-b/0', + 'app': 'pg-b', + }, + }, + { + 'name': 'relation-changed', + 'relation': 'db', + 'data': { + 'relation_id': rel_id_b, + 'unit': 'pg-b/0', + 'app': 'pg-b', + }, + }, ] if changes != a_first: b_first = [a_first[2], a_first[3], a_first[0], a_first[1]] @@ -2687,56 +3038,69 @@ def __init__(self, framework: ops.Framework): def test_begin_with_initial_hooks_unknown_status(self, request: pytest.FixtureRequest): # Verify that a charm that does not set a status in the install hook will have an # unknown status in the harness. - harness = ops.testing.Harness(RecordingCharm, meta=''' + harness = ops.testing.Harness( + RecordingCharm, + meta=""" name: test-app - ''', config=''' + """, + config=""" options: foo: description: a config option type: string - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend harness.begin_with_initial_hooks() - assert backend.status_get(is_app=False) == \ - {'status': 'unknown', 'message': ''} + assert backend.status_get(is_app=False) == {'status': 'unknown', 'message': ''} - assert backend.status_get(is_app=True) == \ - {'status': 'unknown', 'message': ''} + assert backend.status_get(is_app=True) == {'status': 'unknown', 'message': ''} def test_begin_with_initial_hooks_install_sets_status(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(RecordingCharm, meta=''' + harness = ops.testing.Harness( + RecordingCharm, + meta=""" name: test-app - ''', config=''' + """, + config=""" options: set_status: description: a config option type: boolean - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend - harness.update_config(key_values={"set_status": True}) + harness.update_config(key_values={'set_status': True}) harness.begin_with_initial_hooks() - assert backend.status_get(is_app=False) == \ - {'status': 'maintenance', 'message': 'Status set on install'} + assert backend.status_get(is_app=False) == { + 'status': 'maintenance', + 'message': 'Status set on install', + } def test_get_pebble_container_plan(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: foo: resource: foo-image - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.set_can_connect('foo', True) initial_plan = harness.get_container_pebble_plan('foo') assert initial_plan.to_yaml() == '{}\n' container = harness.model.unit.get_container('foo') - container.pebble.add_layer('test-ab', '''\ + container.pebble.add_layer( + 'test-ab', + """\ summary: test-layer description: a layer that we can use for testing services: @@ -2744,15 +3108,19 @@ def test_get_pebble_container_plan(self, request: pytest.FixtureRequest): command: /bin/echo hello from a b: command: /bin/echo hello from b - ''') - container.pebble.add_layer('test-c', '''\ + """, + ) + container.pebble.add_layer( + 'test-c', + """\ summary: test-for-c services: c: command: /bin/echo hello from c - ''') + """, + ) plan = container.pebble.get_plan() - assert plan.to_yaml() == textwrap.dedent('''\ + assert plan.to_yaml() == textwrap.dedent("""\ services: a: command: /bin/echo hello from a @@ -2760,12 +3128,12 @@ def test_get_pebble_container_plan(self, request: pytest.FixtureRequest): command: /bin/echo hello from b c: command: /bin/echo hello from c - ''') + """) harness_plan = harness.get_container_pebble_plan('foo') assert harness_plan.to_yaml() == plan.to_yaml() def test_add_layer_with_log_targets_to_plan(self): - layer_yaml = '''\ + layer_yaml = """\ services: foo: override: replace @@ -2781,13 +3149,18 @@ def test_add_layer_with_log_targets_to_plan(self): override: replace type: loki location: https://example.com:3100/loki/api/v1/push - ''' - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'foo', "containers": {"consumer": {"type": "oci-image"}}})) + """ + harness = ops.testing.Harness( + ops.CharmBase, + meta=yaml.safe_dump({ + 'name': 'foo', + 'containers': {'consumer': {'type': 'oci-image'}}, + }), + ) harness.begin() harness.set_can_connect('consumer', True) - container = harness.charm.unit.containers["consumer"] + container = harness.charm.unit.containers['consumer'] layer = pebble.Layer(layer_yaml) container.add_layer('foo', layer) @@ -2798,52 +3171,61 @@ def test_add_layer_with_log_targets_to_plan(self): assert plan.log_targets.get('baz') is not None def test_get_pebble_container_plan_unknown(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: foo: resource: foo-image - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.set_can_connect('foo', True) with pytest.raises(KeyError): harness.get_container_pebble_plan('unknown') plan = harness.get_container_pebble_plan('foo') - assert plan.to_yaml() == "{}\n" + assert plan.to_yaml() == '{}\n' def test_container_pebble_ready(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ContainerEventCharm, meta=''' + harness = ops.testing.Harness( + ContainerEventCharm, + meta=""" name: test-app containers: foo: resource: foo-image - ''') + """, + ) request.addfinalizer(harness.cleanup) # This is a no-op if it is called before begin(), but it isn't an error harness.container_pebble_ready('foo') harness.begin() harness.charm.observe_container_events('foo') harness.container_pebble_ready('foo') - assert harness.charm.changes == \ - [ - {'name': 'pebble-ready', - 'container': 'foo', - }, - ] + assert harness.charm.changes == [ + { + 'name': 'pebble-ready', + 'container': 'foo', + }, + ] def test_get_filesystem_root(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: foo: resource: foo-image - ''') - foo_root = harness.get_filesystem_root("foo") + """, + ) + foo_root = harness.get_filesystem_root('foo') assert foo_root.exists() assert foo_root.is_dir() harness.begin() - container = harness.charm.unit.get_container("foo") + container = harness.charm.unit.get_container('foo') assert foo_root == harness.get_filesystem_root(container) def test_evaluate_status(self): @@ -2896,14 +3278,17 @@ def test_invalid_status_set(self): class TestNetwork: @pytest.fixture def harness(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm requires: db: interface: database foo: interface: xyz - ''') + """, + ) yield harness harness.cleanup() @@ -2944,11 +3329,14 @@ def test_add_network_all_args(self, harness: ops.testing.Harness[ops.CharmBase]) network = binding.network assert network.bind_address == ipaddress.IPv4Address('10.0.0.10') assert network.ingress_address == ipaddress.IPv4Address('10.0.0.1') - assert network.ingress_addresses == \ - [ipaddress.IPv4Address('10.0.0.1'), ipaddress.IPv4Address('10.0.0.2')] - assert network.egress_subnets == \ - [ipaddress.IPv4Network('10.0.0.0/8'), - ipaddress.IPv4Network('10.10.0.0/16')] + assert network.ingress_addresses == [ + ipaddress.IPv4Address('10.0.0.1'), + ipaddress.IPv4Address('10.0.0.2'), + ] + assert network.egress_subnets == [ + ipaddress.IPv4Network('10.0.0.0/8'), + ipaddress.IPv4Network('10.10.0.0/16'), + ] assert len(network.interfaces) == 1 interface = network.interfaces[0] assert interface.name == 'eth1' @@ -2968,8 +3356,7 @@ def test_add_network_specific_endpoint(self, harness: ops.testing.Harness[ops.Ch # Ensure binding for the other interface is still on the default value foo_binding = harness.model.get_binding('foo') assert foo_binding is not None - assert foo_binding.network.bind_address == \ - ipaddress.IPv4Address('10.0.0.1') + assert foo_binding.network.bind_address == ipaddress.IPv4Address('10.0.0.1') def test_add_network_specific_relation(self, harness: ops.testing.Harness[ops.CharmBase]): harness.add_network('10.0.0.1') @@ -2988,8 +3375,7 @@ def test_add_network_specific_relation(self, harness: ops.testing.Harness[ops.Ch # Ensure binding for the other interface is still on the default value foo_binding = harness.model.get_binding('foo') assert foo_binding is not None - assert foo_binding.network.bind_address == \ - ipaddress.IPv4Address('10.0.0.1') + assert foo_binding.network.bind_address == ipaddress.IPv4Address('10.0.0.1') def test_add_network_endpoint_fallback(self, harness: ops.testing.Harness[ops.CharmBase]): relation_id = harness.add_relation('db', 'postgresql') @@ -3046,7 +3432,8 @@ def test_add_network_endpoint_not_in_meta(self, harness: ops.testing.Harness[ops harness.add_network('35.0.0.1', endpoint='xyz') def test_add_network_relation_id_set_endpoint_not_set( - self, harness: ops.testing.Harness[ops.CharmBase]): + self, harness: ops.testing.Harness[ops.CharmBase] + ): relation_id = harness.add_relation('db', 'postgresql') with pytest.raises(TypeError): harness.add_network('35.0.0.1', relation_id=relation_id) @@ -3057,7 +3444,8 @@ def test_add_network_relation_id_incorrect(self, harness: ops.testing.Harness[op harness.add_network('35.0.0.1', endpoint='db', relation_id=relation_id + 1) def test_add_network_endpoint_and_relation_id_do_not_correspond( - self, harness: ops.testing.Harness[ops.CharmBase]): + self, harness: ops.testing.Harness[ops.CharmBase] + ): relation_id = harness.add_relation('db', 'postgresql') with pytest.raises(ops.ModelError): harness.add_network('35.0.0.1', endpoint='foo', relation_id=relation_id) @@ -3121,7 +3509,7 @@ def get_changes(self, reset: bool = True): def _on_install(self, _: ops.InstallEvent): if self.config.get('set_status'): - self.unit.status = ops.MaintenanceStatus("Status set on install") + self.unit.status = ops.MaintenanceStatus('Status set on install') self.changes.append({'name': 'install'}) def _on_start(self, _: ops.StartEvent): @@ -3167,8 +3555,9 @@ def observe_relation_events(self, relation_name: str): self.framework.observe(self.on[relation_name].relation_created, self._on_relation_created) self.framework.observe(self.on[relation_name].relation_joined, self._on_relation_joined) self.framework.observe(self.on[relation_name].relation_changed, self._on_relation_changed) - self.framework.observe(self.on[relation_name].relation_departed, - self._on_relation_departed) + self.framework.observe( + self.on[relation_name].relation_departed, self._on_relation_departed + ) self.framework.observe(self.on[relation_name].relation_broken, self._on_relation_broken) def _on_relation_created(self, event: ops.RelationCreatedEvent): @@ -3206,10 +3595,11 @@ def _observe_relation_event(self, event_name: str, event: ops.RelationEvent): } if self.record_relation_data_on_events: - recording["data"].update({'relation_data': { - str(x.name): dict(event.relation.data[x]) - for x in event.relation.data - }}) + recording['data'].update({ + 'relation_data': { + str(x.name): dict(event.relation.data[x]) for x in event.relation.data + } + }) self.changes.append(recording) @@ -3219,8 +3609,9 @@ class ContainerEventCharm(RecordingCharm): def observe_container_events(self, container_name: str): self.framework.observe(self.on[container_name].pebble_ready, self._on_pebble_ready) - self.framework.observe(self.on[container_name].pebble_custom_notice, - self._on_pebble_custom_notice) + self.framework.observe( + self.on[container_name].pebble_custom_notice, self._on_pebble_custom_notice + ) def _on_pebble_ready(self, event: ops.PebbleReadyEvent): self.changes.append({ @@ -3229,8 +3620,11 @@ def _on_pebble_ready(self, event: ops.PebbleReadyEvent): }) def _on_pebble_custom_notice(self, event: ops.PebbleCustomNoticeEvent): - type_str = (event.notice.type.value if isinstance(event.notice.type, pebble.NoticeType) - else event.notice.type) + type_str = ( + event.notice.type.value + if isinstance(event.notice.type, pebble.NoticeType) + else event.notice.type + ) self.changes.append({ 'name': 'pebble-custom-notice', 'container': event.workload.name, @@ -3253,11 +3647,13 @@ def get_public_methods(obj: object): class TestTestingModelBackend: - def test_conforms_to_model_backend(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: app - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend mb_methods = get_public_methods(_ModelBackend) @@ -3265,44 +3661,52 @@ def test_conforms_to_model_backend(self, request: pytest.FixtureRequest): assert mb_methods == backend_methods def test_model_uuid_is_uuid_v4(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend assert uuid.UUID(backend.model_uuid).version == 4 def test_status_set_get_unit(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: app - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend backend.status_set('blocked', 'message', is_app=False) - assert backend.status_get(is_app=False) == \ - {'status': 'blocked', 'message': 'message'} - assert backend.status_get(is_app=True) == \ - {'status': 'unknown', 'message': ''} + assert backend.status_get(is_app=False) == {'status': 'blocked', 'message': 'message'} + assert backend.status_get(is_app=True) == {'status': 'unknown', 'message': ''} def test_status_set_get_app(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: app - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend backend.status_set('blocked', 'message', is_app=True) - assert backend.status_get(is_app=True) == \ - {'status': 'blocked', 'message': 'message'} - assert backend.status_get(is_app=False) == \ - {'status': 'maintenance', 'message': ''} + assert backend.status_get(is_app=True) == {'status': 'blocked', 'message': 'message'} + assert backend.status_get(is_app=False) == {'status': 'maintenance', 'message': ''} def test_relation_ids_unknown_relation(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm provides: db: interface: mydb - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend # With no relations added, we just get an empty list for the interface @@ -3312,62 +3716,79 @@ def test_relation_ids_unknown_relation(self, request: pytest.FixtureRequest): backend.relation_ids('unknown') def test_relation_get_unknown_relation_id(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend with pytest.raises(ops.RelationNotFoundError): backend.relation_get(1234, 'unit/0', False) def test_relation_list_unknown_relation_id(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend with pytest.raises(ops.RelationNotFoundError): backend.relation_list(1234) def test_lazy_resource_directory(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: oci-image description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) harness.populate_oci_resources() backend = harness._backend assert backend._resource_dir is None path = backend.resource_get('image') assert backend._resource_dir is not None - assert str(path).startswith(str(backend._resource_dir.name)), \ - f'expected {path} to be a subdirectory of {backend._resource_dir.name}' + assert str(path).startswith( + str(backend._resource_dir.name) + ), f'expected {path} to be a subdirectory of {backend._resource_dir.name}' def test_resource_get_no_resource(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app resources: image: type: file description: "Image to deploy." - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend with pytest.raises(ops.ModelError) as excinfo: backend.resource_get('foo') - assert "units/unit-test-app-0/resources/foo: resource#test-app/foo not found" in \ - str(excinfo.value) + assert 'units/unit-test-app-0/resources/foo: resource#test-app/foo not found' in str( + excinfo.value + ) def test_relation_remote_app_name(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-charm requires: db: interface: foo - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend @@ -3382,9 +3803,12 @@ def test_relation_remote_app_name(self, request: pytest.FixtureRequest): assert backend.relation_remote_app_name(7) is None def test_get_pebble_methods(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app - ''') + """, + ) request.addfinalizer(harness.cleanup) backend = harness._backend @@ -3404,9 +3828,12 @@ def _reboot_now(self, event: ops.InstallEvent): def _reboot(self, event: ops.RemoveEvent): self.unit.reboot() - harness = ops.testing.Harness(RebootingCharm, meta=''' + harness = ops.testing.Harness( + RebootingCharm, + meta=""" name: test-app - ''') + """, + ) request.addfinalizer(harness.cleanup) assert harness.reboot_count == 0 backend = harness._backend @@ -3427,11 +3854,14 @@ def _reboot(self, event: ops.RemoveEvent): class TestTestingPebbleClient: @pytest.fixture def client(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: mycontainer: {} - ''') + """, + ) backend = harness._backend client = backend.get_pebble('/charm/containers/mycontainer/pebble.socket') harness.set_can_connect('mycontainer', True) @@ -3448,7 +3878,9 @@ def test_add_layer(self, client: _TestingPebbleClient): plan = client.get_plan() assert isinstance(plan, pebble.Plan) assert plan.to_yaml() == '{}\n' - client.add_layer('foo', pebble.Layer('''\ + client.add_layer( + 'foo', + pebble.Layer("""\ summary: Foo description: | A longer description about Foo @@ -3462,10 +3894,12 @@ def test_add_layer(self, client: _TestingPebbleClient): command: '/bin/echo hello' environment: KEY: VALUE - ''')) + """), + ) plan = client.get_plan() # The YAML should be normalized - assert textwrap.dedent('''\ + assert ( + textwrap.dedent("""\ services: serv: command: /bin/echo hello @@ -3477,13 +3911,17 @@ def test_add_layer(self, client: _TestingPebbleClient): override: replace startup: enabled summary: Serv - ''') == plan.to_yaml() + """) + == plan.to_yaml() + ) def test_add_layer_merge(self, client: _TestingPebbleClient): plan = client.get_plan() assert isinstance(plan, pebble.Plan) assert plan.to_yaml() == '{}\n' - client.add_layer('foo', pebble.Layer('''\ + client.add_layer( + 'foo', + pebble.Layer("""\ summary: Foo description: | A longer description about Foo @@ -3514,10 +3952,12 @@ def test_add_layer_merge(self, client: _TestingPebbleClient): backoff-delay: 1 backoff-factor: 2 backoff-limit: 1 - ''')) + """), + ) plan = client.get_plan() # The YAML should be normalized - assert textwrap.dedent('''\ + assert ( + textwrap.dedent("""\ services: serv: after: @@ -3546,9 +3986,13 @@ def test_add_layer_merge(self, client: _TestingPebbleClient): summary: Serv user: user1 user-id: userID1 - ''') == plan.to_yaml() + """) + == plan.to_yaml() + ) - client.add_layer('foo', pebble.Layer('''\ + client.add_layer( + 'foo', + pebble.Layer("""\ summary: Foo description: | A longer description about Foo @@ -3583,10 +4027,13 @@ def test_add_layer_merge(self, client: _TestingPebbleClient): backoff-delay: 2 backoff-factor: 3 backoff-limit: 2 - '''), combine=True) + """), + combine=True, + ) plan = client.get_plan() # The YAML should be normalized - assert textwrap.dedent('''\ + assert ( + textwrap.dedent("""\ services: serv: after: @@ -3622,13 +4069,15 @@ def test_add_layer_merge(self, client: _TestingPebbleClient): summary: Serv user: user2 user-id: userID2 - ''') == plan.to_yaml() + """) + == plan.to_yaml() + ) def test_add_layer_not_combined(self, client: _TestingPebbleClient): plan = client.get_plan() assert isinstance(plan, pebble.Plan) assert plan.to_yaml() == '{}\n' - service = textwrap.dedent('''\ + service = textwrap.dedent("""\ summary: Foo description: | A longer description about Foo @@ -3642,7 +4091,7 @@ def test_add_layer_not_combined(self, client: _TestingPebbleClient): command: '/bin/echo hello' environment: KEY: VALUE - ''') + """) client.add_layer('foo', pebble.Layer(service)) # TODO: jam 2021-04-19 We should have a clearer error type for this case. The actual # pebble raises an HTTP exception. See https://github.com/canonical/operator/issues/514 @@ -3652,7 +4101,9 @@ def test_add_layer_not_combined(self, client: _TestingPebbleClient): client.add_layer('foo', pebble.Layer(service)) def test_add_layer_three_services(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -3660,8 +4111,11 @@ def test_add_layer_three_services(self, client: _TestingPebbleClient): startup: enabled override: replace command: '/bin/echo foo' - ''') - client.add_layer('bar', '''\ + """, + ) + client.add_layer( + 'bar', + """\ summary: bar services: bar: @@ -3669,8 +4123,11 @@ def test_add_layer_three_services(self, client: _TestingPebbleClient): startup: enabled override: replace command: '/bin/echo bar' - ''') - client.add_layer('baz', '''\ + """, + ) + client.add_layer( + 'baz', + """\ summary: baz services: baz: @@ -3678,10 +4135,12 @@ def test_add_layer_three_services(self, client: _TestingPebbleClient): startup: enabled override: replace command: '/bin/echo baz' - ''') + """, + ) plan = client.get_plan() # Alphabetical services, and the YAML should be normalized - assert textwrap.dedent('''\ + assert ( + textwrap.dedent("""\ services: bar: command: /bin/echo bar @@ -3698,30 +4157,41 @@ def test_add_layer_three_services(self, client: _TestingPebbleClient): override: replace startup: enabled summary: Foo - ''') == plan.to_yaml() + """) + == plan.to_yaml() + ) def test_add_layer_combine_no_override(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: summary: Foo command: '/bin/echo foo' - ''') + """, + ) # TODO: jam 2021-04-19 Pebble currently raises a HTTP Error 500 Internal Service Error # if you don't supply an override directive. That needs to be fixed and this test # should be updated. https://github.com/canonical/operator/issues/514 with pytest.raises(RuntimeError): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: summary: Foo command: '/bin/echo foo' - ''', combine=True) + """, + combine=True, + ) def test_add_layer_combine_override_replace(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: bar: @@ -3730,15 +4200,21 @@ def test_add_layer_combine_override_replace(self, client: _TestingPebbleClient): foo: summary: Foo command: '/bin/echo foo' - ''') - client.add_layer('foo', '''\ + """, + ) + client.add_layer( + 'foo', + """\ summary: foo services: foo: command: '/bin/echo foo new' override: replace - ''', combine=True) - assert textwrap.dedent('''\ + """, + combine=True, + ) + assert ( + textwrap.dedent("""\ services: bar: command: /bin/echo bar @@ -3746,10 +4222,14 @@ def test_add_layer_combine_override_replace(self, client: _TestingPebbleClient): foo: command: /bin/echo foo new override: replace - ''') == client.get_plan().to_yaml() + """) + == client.get_plan().to_yaml() + ) def test_add_layer_combine_override_merge(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: bar: @@ -3758,16 +4238,22 @@ def test_add_layer_combine_override_merge(self, client: _TestingPebbleClient): foo: summary: Foo command: '/bin/echo foo' - ''') - client.add_layer('foo', '''\ + """, + ) + client.add_layer( + 'foo', + """\ summary: foo services: foo: summary: Foo command: '/bin/echo foob' override: merge - ''', combine=True) - assert textwrap.dedent('''\ + """, + combine=True, + ) + assert ( + textwrap.dedent("""\ services: bar: command: /bin/echo bar @@ -3776,10 +4262,14 @@ def test_add_layer_combine_override_merge(self, client: _TestingPebbleClient): command: /bin/echo foob override: merge summary: Foo - ''') == client.get_plan().to_yaml() + """) + == client.get_plan().to_yaml() + ) def test_add_layer_combine_override_unknown(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: bar: @@ -3788,23 +4278,30 @@ def test_add_layer_combine_override_unknown(self, client: _TestingPebbleClient): foo: summary: Foo command: '/bin/echo foo' - ''') + """, + ) with pytest.raises(RuntimeError): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: summary: Foo command: '/bin/echo foob' override: blah - ''', combine=True) + """, + combine=True, + ) def test_get_services_none(self, client: _TestingPebbleClient): service_info = client.get_services() assert service_info == [] def test_get_services_not_started(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -3814,7 +4311,8 @@ def test_get_services_not_started(self, client: _TestingPebbleClient): bar: summary: Bar command: '/bin/echo bar' - ''') + """, + ) infos = client.get_services() assert len(infos) == 2 bar_info = infos[0] @@ -3830,7 +4328,9 @@ def test_get_services_not_started(self, client: _TestingPebbleClient): assert not foo_info.is_running() def test_get_services_autostart(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -3840,7 +4340,8 @@ def test_get_services_autostart(self, client: _TestingPebbleClient): bar: summary: Bar command: '/bin/echo bar' - ''') + """, + ) client.autostart_services() infos = client.get_services() assert len(infos) == 2 @@ -3857,7 +4358,9 @@ def test_get_services_autostart(self, client: _TestingPebbleClient): assert foo_info.is_running() def test_get_services_start_stop(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -3867,7 +4370,8 @@ def test_get_services_start_stop(self, client: _TestingPebbleClient): bar: summary: Bar command: '/bin/echo bar' - ''') + """, + ) client.start_services(['bar']) infos = client.get_services() assert len(infos) == 2 @@ -3889,7 +4393,9 @@ def test_get_services_start_stop(self, client: _TestingPebbleClient): assert bar_info.current == pebble.ServiceStatus.INACTIVE def test_get_services_bad_request(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -3899,14 +4405,17 @@ def test_get_services_bad_request(self, client: _TestingPebbleClient): bar: summary: Bar command: '/bin/echo bar' - ''') + """, + ) # It is a common mistake to pass just a name vs a list of names, so catch it with a # TypeError with pytest.raises(TypeError): client.get_services('foo') def test_get_services_subset(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -3916,7 +4425,8 @@ def test_get_services_subset(self, client: _TestingPebbleClient): bar: summary: Bar command: '/bin/echo bar' - ''') + """, + ) infos = client.get_services(['foo']) assert len(infos) == 1 foo_info = infos[0] @@ -3925,7 +4435,9 @@ def test_get_services_subset(self, client: _TestingPebbleClient): assert foo_info.current == pebble.ServiceStatus.INACTIVE def test_get_services_unknown(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -3935,7 +4447,8 @@ def test_get_services_unknown(self, client: _TestingPebbleClient): bar: summary: Bar command: '/bin/echo bar' - ''') + """, + ) # This doesn't seem to be an error at the moment. # pebble_cli.py service just returns an empty list # pebble service unknown says "No matching services" (but exits 0) @@ -3960,14 +4473,17 @@ def test_stop_service_str(self, client: _TestingPebbleClient): client.stop_services('unknown') def test_mixed_start_service(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: summary: Foo startup: enabled command: '/bin/echo foo' - ''') + """, + ) # TODO: jam 2021-04-20 better error type with pytest.raises(RuntimeError): client.start_services(['foo', 'unknown']) @@ -3980,14 +4496,17 @@ def test_mixed_start_service(self, client: _TestingPebbleClient): assert foo_info.current == pebble.ServiceStatus.INACTIVE def test_stop_services_unknown(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: summary: Foo startup: enabled command: '/bin/echo foo' - ''') + """, + ) client.autostart_services() # TODO: jam 2021-04-20 better error type with pytest.raises(RuntimeError): @@ -4003,7 +4522,9 @@ def test_stop_services_unknown(self, client: _TestingPebbleClient): def test_start_started_service(self, client: _TestingPebbleClient): # Pebble maintains idempotency even if you start a service # which is already started. - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -4013,7 +4534,8 @@ def test_start_started_service(self, client: _TestingPebbleClient): bar: summary: Bar command: '/bin/echo bar' - ''') + """, + ) client.autostart_services() # Foo is now started, but Bar is not client.start_services(['bar', 'foo']) @@ -4033,7 +4555,9 @@ def test_start_started_service(self, client: _TestingPebbleClient): def test_stop_stopped_service(self, client: _TestingPebbleClient): # Pebble maintains idempotency even if you stop a service # which is already stopped. - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -4043,7 +4567,8 @@ def test_stop_stopped_service(self, client: _TestingPebbleClient): bar: summary: Bar command: '/bin/echo bar' - ''') + """, + ) client.autostart_services() # Foo is now started, but Bar is not client.stop_services(['foo', 'bar']) @@ -4060,9 +4585,11 @@ def test_stop_stopped_service(self, client: _TestingPebbleClient): assert foo_info.startup == pebble.ServiceStartup.ENABLED assert foo_info.current == pebble.ServiceStatus.INACTIVE - @ unittest.skipUnless(is_linux, 'Pebble runs on Linux') + @unittest.skipUnless(is_linux, 'Pebble runs on Linux') def test_send_signal(self, client: _TestingPebbleClient): - client.add_layer('foo', '''\ + client.add_layer( + 'foo', + """\ summary: foo services: foo: @@ -4072,32 +4599,39 @@ def test_send_signal(self, client: _TestingPebbleClient): bar: summary: Bar command: '/bin/echo bar' - ''') + """, + ) client.autostart_services() # Foo is now started, but Bar is not # Send a valid signal to a running service - client.send_signal("SIGINT", ("foo",)) + client.send_signal('SIGINT', ('foo',)) # Send a valid signal but omit service name with pytest.raises(TypeError): - client.send_signal("SIGINT", tuple()) + client.send_signal('SIGINT', tuple()) # Send an invalid signal to a running service with pytest.raises(pebble.APIError): - client.send_signal("sigint", ("foo",)) + client.send_signal('sigint', ('foo',)) # Send a valid signal to a stopped service with pytest.raises(pebble.APIError): - client.send_signal("SIGINT", ("bar",)) + client.send_signal('SIGINT', ('bar',)) # Send a valid signal to a non-existing service with pytest.raises(pebble.APIError): - client.send_signal("SIGINT", ("baz",)) + client.send_signal('SIGINT', ('baz',)) # Send a valid signal to a multiple services, one of which is not running with pytest.raises(pebble.APIError): - client.send_signal("SIGINT", ("foo", "bar",)) + client.send_signal( + 'SIGINT', + ( + 'foo', + 'bar', + ), + ) PebbleClientType = typing.Union[_TestingPebbleClient, pebble.Client] @@ -4115,9 +4649,10 @@ def test_push_and_pull_bytes( self._test_push_and_pull_data( pebble_dir, client, - original_data=b"\x00\x01\x02\x03\x04", + original_data=b'\x00\x01\x02\x03\x04', encoding=None, - stream_class=io.BytesIO) + stream_class=io.BytesIO, + ) def test_push_and_pull_non_utf8_data( self, @@ -4129,7 +4664,8 @@ def test_push_and_pull_non_utf8_data( client, original_data='日本語', # "Japanese" in Japanese encoding='sjis', - stream_class=io.StringIO) + stream_class=io.StringIO, + ) def _test_push_and_pull_data( self, @@ -4141,10 +4677,10 @@ def _test_push_and_pull_data( ): # We separate out the calls to make it clearer to type checkers what's happening. if encoding is None: - client.push(f"{pebble_dir}/test", original_data) + client.push(f'{pebble_dir}/test', original_data) else: - client.push(f"{pebble_dir}/test", original_data, encoding=encoding) - with client.pull(f"{pebble_dir}/test", encoding=encoding) as infile: + client.push(f'{pebble_dir}/test', original_data, encoding=encoding) + with client.pull(f'{pebble_dir}/test', encoding=encoding) as infile: received_data = infile.read() assert original_data == received_data @@ -4152,12 +4688,12 @@ def _test_push_and_pull_data( if encoding is None: stream_class = typing.cast(typing.Type[io.BytesIO], stream_class) small_file = stream_class(typing.cast(bytes, original_data)) - client.push(f"{pebble_dir}/test", small_file) + client.push(f'{pebble_dir}/test', small_file) else: stream_class = typing.cast(typing.Type[io.StringIO], stream_class) small_file = stream_class(typing.cast(str, original_data)) - client.push(f"{pebble_dir}/test", small_file, encoding=encoding) - with client.pull(f"{pebble_dir}/test", encoding=encoding) as infile: + client.push(f'{pebble_dir}/test', small_file, encoding=encoding) + with client.pull(f'{pebble_dir}/test', encoding=encoding) as infile: received_data = infile.read() assert original_data == received_data @@ -4167,8 +4703,8 @@ def test_push_bytes_ignore_encoding( client: PebbleClientType, ): # push() encoding param should be ignored if source is bytes - client.push(f"{pebble_dir}/test", b'\x00\x01', encoding='utf-8') - with client.pull(f"{pebble_dir}/test", encoding=None) as infile: + client.push(f'{pebble_dir}/test', b'\x00\x01', encoding='utf-8') + with client.pull(f'{pebble_dir}/test', encoding=None) as infile: received_data = infile.read() assert received_data == b'\x00\x01' @@ -4178,8 +4714,8 @@ def test_push_bytesio_ignore_encoding( client: PebbleClientType, ): # push() encoding param should be ignored if source is binary stream - client.push(f"{pebble_dir}/test", io.BytesIO(b'\x00\x01'), encoding='utf-8') - with client.pull(f"{pebble_dir}/test", encoding=None) as infile: + client.push(f'{pebble_dir}/test', io.BytesIO(b'\x00\x01'), encoding='utf-8') + with client.pull(f'{pebble_dir}/test', encoding=None) as infile: received_data = infile.read() assert received_data == b'\x00\x01' @@ -4194,8 +4730,8 @@ def test_push_and_pull_larger_file( data_size = 1024 * 1024 original_data = os.urandom(data_size) - client.push(f"{pebble_dir}/test", original_data) - with client.pull(f"{pebble_dir}/test", encoding=None) as infile: + client.push(f'{pebble_dir}/test', original_data) + with client.pull(f'{pebble_dir}/test', encoding=None) as infile: received_data = infile.read() assert original_data == received_data @@ -4207,10 +4743,10 @@ def test_push_to_non_existent_subdir( data = 'data' with pytest.raises(pebble.PathError) as excinfo: - client.push(f"{pebble_dir}/nonexistent_dir/test", data, make_dirs=False) + client.push(f'{pebble_dir}/nonexistent_dir/test', data, make_dirs=False) assert excinfo.value.kind == 'not-found' - client.push(f"{pebble_dir}/nonexistent_dir/test", data, make_dirs=True) + client.push(f'{pebble_dir}/nonexistent_dir/test', data, make_dirs=True) def test_push_as_child_of_file_raises_error( self, @@ -4218,9 +4754,9 @@ def test_push_as_child_of_file_raises_error( client: PebbleClientType, ): data = 'data' - client.push(f"{pebble_dir}/file", data) + client.push(f'{pebble_dir}/file', data) with pytest.raises(pebble.PathError) as excinfo: - client.push(f"{pebble_dir}/file/file", data) + client.push(f'{pebble_dir}/file/file', data) assert excinfo.value.kind == 'generic-file-error' def test_push_with_permission_mask( @@ -4229,16 +4765,16 @@ def test_push_with_permission_mask( client: PebbleClientType, ): data = 'data' - client.push(f"{pebble_dir}/file", data, permissions=0o600) - client.push(f"{pebble_dir}/file", data, permissions=0o777) + client.push(f'{pebble_dir}/file', data, permissions=0o600) + client.push(f'{pebble_dir}/file', data, permissions=0o777) # If permissions are outside of the range 0o000 through 0o777, an exception should be # raised. for bad_permission in ( 0o1000, # Exceeds 0o777 - -1, # Less than 0o000 + -1, # Less than 0o000 ): with pytest.raises(pebble.PathError) as excinfo: - client.push(f"{pebble_dir}/file", data, permissions=bad_permission) + client.push(f'{pebble_dir}/file', data, permissions=bad_permission) assert excinfo.value.kind == 'generic-file-error' def test_push_files_and_list( @@ -4249,20 +4785,19 @@ def test_push_files_and_list( data = 'data' # Let's push the first file with a bunch of details. We'll check on this later. - client.push( - f"{pebble_dir}/file1", data, - permissions=0o620) + client.push(f'{pebble_dir}/file1', data, permissions=0o620) # Do a quick push with defaults for the other files. - client.push(f"{pebble_dir}/file2", data) - client.push(f"{pebble_dir}/file3", data) + client.push(f'{pebble_dir}/file2', data) + client.push(f'{pebble_dir}/file3', data) - files = client.list_files(f"{pebble_dir}/") - assert {file.path for file in files} == \ - {pebble_dir + file for file in ('/file1', '/file2', '/file3')} + files = client.list_files(f'{pebble_dir}/') + assert {file.path for file in files} == { + pebble_dir + file for file in ('/file1', '/file2', '/file3') + } # Let's pull the first file again and check its details - file = [f for f in files if f.path == f"{pebble_dir}/file1"][0] + file = [f for f in files if f.path == f'{pebble_dir}/file1'][0] assert file.name == 'file1' assert file.type == pebble.FileType.FILE assert file.size == 4 @@ -4276,9 +4811,9 @@ def test_push_and_list_file( client: PebbleClientType, ): data = 'data' - client.push(f"{pebble_dir}/file", data) - files = client.list_files(f"{pebble_dir}/") - assert {file.path for file in files} == {f"{pebble_dir}/file"} + client.push(f'{pebble_dir}/file', data) + files = client.list_files(f'{pebble_dir}/') + assert {file.path for file in files} == {f'{pebble_dir}/file'} def test_push_file_with_relative_path_fails( self, @@ -4293,31 +4828,30 @@ def test_pull_not_found( client: PebbleClientType, ): with pytest.raises(pebble.PathError) as excinfo: - client.pull("/not/found") - assert excinfo.value.kind == "not-found" - assert "/not/found" in excinfo.value.message + client.pull('/not/found') + assert excinfo.value.kind == 'not-found' + assert '/not/found' in excinfo.value.message def test_pull_directory( self, pebble_dir: str, client: PebbleClientType, ): - client.make_dir(f"{pebble_dir}/subdir") + client.make_dir(f'{pebble_dir}/subdir') with pytest.raises(pebble.PathError) as excinfo: - client.pull(f"{pebble_dir}/subdir") - assert excinfo.value.kind == "generic-file-error" - assert f"{pebble_dir}/subdir" in excinfo.value.message + client.pull(f'{pebble_dir}/subdir') + assert excinfo.value.kind == 'generic-file-error' + assert f'{pebble_dir}/subdir' in excinfo.value.message def test_list_files_not_found_raises( self, client: PebbleClientType, ): with pytest.raises(pebble.APIError) as excinfo: - client.list_files("/not/existing/file/") + client.list_files('/not/existing/file/') assert excinfo.value.code == 404 assert excinfo.value.status == 'Not Found' - assert excinfo.value.message == 'stat /not/existing/file/: no ' \ - 'such file or directory' + assert excinfo.value.message == 'stat /not/existing/file/: no ' 'such file or directory' def test_list_directory_object_itself( self, @@ -4334,8 +4868,8 @@ def test_list_directory_object_itself( assert dir_.type == pebble.FileType.DIRECTORY # Test with subdirs - client.make_dir(f"{pebble_dir}/subdir") - files = client.list_files(f"{pebble_dir}/subdir", itself=True) + client.make_dir(f'{pebble_dir}/subdir') + files = client.list_files(f'{pebble_dir}/subdir', itself=True) assert len(files) == 1 dir_ = files[0] assert dir_.name == 'subdir' @@ -4356,21 +4890,25 @@ def test_push_files_and_list_by_pattern( '/backup_file.gz', ): client.push(pebble_dir + filename, data) - files = client.list_files(f"{pebble_dir}/", pattern='file*.gz') - assert {file.path for file in files} == \ - {pebble_dir + file for file in ('/file1.gz', '/file2.tar.gz')} + files = client.list_files(f'{pebble_dir}/', pattern='file*.gz') + assert {file.path for file in files} == { + pebble_dir + file for file in ('/file1.gz', '/file2.tar.gz') + } def test_make_directory( self, pebble_dir: str, client: PebbleClientType, ): - client.make_dir(f"{pebble_dir}/subdir") - assert client.list_files(f"{pebble_dir}/", pattern='subdir')[0].path == \ - f"{pebble_dir}/subdir" - client.make_dir(f"{pebble_dir}/subdir/subdir") - assert client.list_files(f"{pebble_dir}/subdir", pattern='subdir')[0].path == \ - f"{pebble_dir}/subdir/subdir" + client.make_dir(f'{pebble_dir}/subdir') + assert ( + client.list_files(f'{pebble_dir}/', pattern='subdir')[0].path == f'{pebble_dir}/subdir' + ) + client.make_dir(f'{pebble_dir}/subdir/subdir') + assert ( + client.list_files(f'{pebble_dir}/subdir', pattern='subdir')[0].path + == f'{pebble_dir}/subdir/subdir' + ) def test_make_directory_recursively( self, @@ -4378,12 +4916,14 @@ def test_make_directory_recursively( client: PebbleClientType, ): with pytest.raises(pebble.PathError) as excinfo: - client.make_dir(f"{pebble_dir}/subdir/subdir", make_parents=False) + client.make_dir(f'{pebble_dir}/subdir/subdir', make_parents=False) assert excinfo.value.kind == 'not-found' - client.make_dir(f"{pebble_dir}/subdir/subdir", make_parents=True) - assert client.list_files(f"{pebble_dir}/subdir", pattern='subdir')[0].path == \ - f"{pebble_dir}/subdir/subdir" + client.make_dir(f'{pebble_dir}/subdir/subdir', make_parents=True) + assert ( + client.list_files(f'{pebble_dir}/subdir', pattern='subdir')[0].path + == f'{pebble_dir}/subdir/subdir' + ) def test_make_directory_with_relative_path_fails( self, @@ -4398,16 +4938,16 @@ def test_make_subdir_of_file_fails( pebble_dir: str, client: PebbleClientType, ): - client.push(f"{pebble_dir}/file", 'data') + client.push(f'{pebble_dir}/file', 'data') # Direct child case with pytest.raises(pebble.PathError) as excinfo: - client.make_dir(f"{pebble_dir}/file/subdir") + client.make_dir(f'{pebble_dir}/file/subdir') assert excinfo.value.kind == 'generic-file-error' # Recursive creation case, in case its flow is different with pytest.raises(pebble.PathError) as excinfo: - client.make_dir(f"{pebble_dir}/file/subdir/subdir", make_parents=True) + client.make_dir(f'{pebble_dir}/file/subdir/subdir', make_parents=True) assert excinfo.value.kind == 'generic-file-error' def test_make_dir_with_permission_mask( @@ -4415,21 +4955,21 @@ def test_make_dir_with_permission_mask( pebble_dir: str, client: PebbleClientType, ): - client.make_dir(f"{pebble_dir}/dir1", permissions=0o700) - client.make_dir(f"{pebble_dir}/dir2", permissions=0o777) + client.make_dir(f'{pebble_dir}/dir1', permissions=0o700) + client.make_dir(f'{pebble_dir}/dir2', permissions=0o777) - files = client.list_files(f"{pebble_dir}/", pattern='dir*') - assert [f for f in files if f.path == f"{pebble_dir}/dir1"][0].permissions == 0o700 - assert [f for f in files if f.path == f"{pebble_dir}/dir2"][0].permissions == 0o777 + files = client.list_files(f'{pebble_dir}/', pattern='dir*') + assert [f for f in files if f.path == f'{pebble_dir}/dir1'][0].permissions == 0o700 + assert [f for f in files if f.path == f'{pebble_dir}/dir2'][0].permissions == 0o777 # If permissions are outside of the range 0o000 through 0o777, an exception should be # raised. for i, bad_permission in enumerate(( 0o1000, # Exceeds 0o777 - -1, # Less than 0o000 + -1, # Less than 0o000 )): with pytest.raises(pebble.PathError) as excinfo: - client.make_dir(f"{pebble_dir}/dir3_{i}", permissions=bad_permission) + client.make_dir(f'{pebble_dir}/dir3_{i}', permissions=bad_permission) assert excinfo.value.kind == 'generic-file-error' def test_remove_path( @@ -4437,32 +4977,32 @@ def test_remove_path( pebble_dir: str, client: PebbleClientType, ): - client.push(f"{pebble_dir}/file", '') - client.make_dir(f"{pebble_dir}/dir/subdir", make_parents=True) - client.push(f"{pebble_dir}/dir/subdir/file1", '') - client.push(f"{pebble_dir}/dir/subdir/file2", '') - client.push(f"{pebble_dir}/dir/subdir/file3", '') - client.make_dir(f"{pebble_dir}/empty_dir") + client.push(f'{pebble_dir}/file', '') + client.make_dir(f'{pebble_dir}/dir/subdir', make_parents=True) + client.push(f'{pebble_dir}/dir/subdir/file1', '') + client.push(f'{pebble_dir}/dir/subdir/file2', '') + client.push(f'{pebble_dir}/dir/subdir/file3', '') + client.make_dir(f'{pebble_dir}/empty_dir') - client.remove_path(f"{pebble_dir}/file") + client.remove_path(f'{pebble_dir}/file') - client.remove_path(f"{pebble_dir}/empty_dir") + client.remove_path(f'{pebble_dir}/empty_dir') # Remove non-empty directory, recursive=False: error with pytest.raises(pebble.PathError) as excinfo: - client.remove_path(f"{pebble_dir}/dir", recursive=False) + client.remove_path(f'{pebble_dir}/dir', recursive=False) assert excinfo.value.kind == 'generic-file-error' # Remove non-empty directory, recursive=True: succeeds (and removes child objects) - client.remove_path(f"{pebble_dir}/dir", recursive=True) + client.remove_path(f'{pebble_dir}/dir', recursive=True) # Remove non-existent path, recursive=False: error with pytest.raises(pebble.PathError) as excinfo: - client.remove_path(f"{pebble_dir}/dir/does/not/exist/asdf", recursive=False) + client.remove_path(f'{pebble_dir}/dir/does/not/exist/asdf', recursive=False) assert excinfo.value.kind == 'not-found' # Remove non-existent path, recursive=True: succeeds - client.remove_path(f"{pebble_dir}/dir/does/not/exist/asdf", recursive=True) + client.remove_path(f'{pebble_dir}/dir/does/not/exist/asdf', recursive=True) # Other notes: # * Parent directories created via push(make_dirs=True) default to root:root ownership @@ -4483,11 +5023,14 @@ class _MakedirArgs(typing.TypedDict): class TestPebbleStorageAPIsUsingMocks(PebbleStorageAPIsTestMixin): @pytest.fixture def client(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: mycontainer: {} - ''') + """, + ) backend = harness._backend client = backend.get_pebble('/charm/containers/mycontainer/pebble.socket') harness.set_can_connect('mycontainer', True) @@ -4501,7 +5044,9 @@ def pebble_dir(self, client: PebbleClientType): return pebble_dir def test_container_storage_mounts(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: c1: @@ -4521,7 +5066,8 @@ def test_container_storage_mounts(self, request: pytest.FixtureRequest): type: filesystem store2: type: filesystem - ''') + """, + ) request.addfinalizer(harness.cleanup) store_id = harness.add_storage('store1')[0] @@ -4582,41 +5128,21 @@ def test_push_with_ownership( data = 'data' user, group = self._select_testing_user_group() cases: typing.List[_MakedirArgs] = [ + {'user_id': user.pw_uid, 'user': None, 'group_id': group.gr_gid, 'group': None}, + {'user_id': None, 'user': user.pw_name, 'group_id': None, 'group': group.gr_name}, + {'user_id': None, 'user': user.pw_name, 'group_id': group.gr_gid, 'group': None}, + {'user_id': user.pw_uid, 'user': None, 'group_id': None, 'group': group.gr_name}, { - "user_id": user.pw_uid, - "user": None, - "group_id": group.gr_gid, - "group": None - }, - { - "user_id": None, - "user": user.pw_name, - "group_id": None, - "group": group.gr_name - }, - { - "user_id": None, - "user": user.pw_name, - "group_id": group.gr_gid, - "group": None + 'user_id': user.pw_uid, + 'user': user.pw_name, + 'group_id': group.gr_gid, + 'group': group.gr_name, }, - { - "user_id": user.pw_uid, - "user": None, - "group_id": None, - "group": group.gr_name - }, - { - "user_id": user.pw_uid, - "user": user.pw_name, - "group_id": group.gr_gid, - "group": group.gr_name - } ] for idx, case in enumerate(cases): - client.push(f"{pebble_dir}/file{idx}", data, **case) - file_ = client.list_files(f"{pebble_dir}/file{idx}")[0] - assert file_.path == f"{pebble_dir}/file{idx}" + client.push(f'{pebble_dir}/file{idx}', data, **case) + file_ = client.list_files(f'{pebble_dir}/file{idx}')[0] + assert file_.path == f'{pebble_dir}/file{idx}' def test_make_dir_with_ownership( self, @@ -4625,44 +5151,24 @@ def test_make_dir_with_ownership( ): user, group = self._select_testing_user_group() cases: typing.List[_MakedirArgs] = [ + {'user_id': user.pw_uid, 'user': None, 'group_id': group.gr_gid, 'group': None}, + {'user_id': None, 'user': user.pw_name, 'group_id': None, 'group': group.gr_name}, + {'user_id': None, 'user': user.pw_name, 'group_id': group.gr_gid, 'group': None}, + {'user_id': user.pw_uid, 'user': None, 'group_id': None, 'group': group.gr_name}, { - "user_id": user.pw_uid, - "user": None, - "group_id": group.gr_gid, - "group": None - }, - { - "user_id": None, - "user": user.pw_name, - "group_id": None, - "group": group.gr_name - }, - { - "user_id": None, - "user": user.pw_name, - "group_id": group.gr_gid, - "group": None + 'user_id': user.pw_uid, + 'user': user.pw_name, + 'group_id': group.gr_gid, + 'group': group.gr_name, }, - { - "user_id": user.pw_uid, - "user": None, - "group_id": None, - "group": group.gr_name - }, - { - "user_id": user.pw_uid, - "user": user.pw_name, - "group_id": group.gr_gid, - "group": group.gr_name - } ] for idx, case in enumerate(cases): - client.make_dir(f"{pebble_dir}/dir{idx}", **case) - dir_ = client.list_files(f"{pebble_dir}/dir{idx}", itself=True)[0] - assert dir_.path == f"{pebble_dir}/dir{idx}" + client.make_dir(f'{pebble_dir}/dir{idx}', **case) + dir_ = client.list_files(f'{pebble_dir}/dir{idx}', itself=True)[0] + assert dir_.path == f'{pebble_dir}/dir{idx}' - @patch("grp.getgrgid") - @patch("pwd.getpwuid") + @patch('grp.getgrgid') + @patch('pwd.getpwuid') def test_list_files_unnamed( self, getpwuid: MagicMock, @@ -4673,8 +5179,8 @@ def test_list_files_unnamed( getpwuid.side_effect = KeyError getgrgid.side_effect = KeyError data = 'data' - client.push(f"{pebble_dir}/file", data) - files = client.list_files(f"{pebble_dir}/") + client.push(f'{pebble_dir}/file', data) + files = client.list_files(f'{pebble_dir}/') assert len(files) == 1 assert files[0].user is None assert files[0].group is None @@ -4683,7 +5189,9 @@ def test_list_files_unnamed( class TestFilesystem: @pytest.fixture def harness(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test containers: test-container: @@ -4693,87 +5201,88 @@ def harness(self): storage: test-storage: type: filesystem - ''') + """, + ) harness.begin() - harness.set_can_connect("test-container", True) + harness.set_can_connect('test-container', True) yield harness harness.cleanup() @pytest.fixture def container_fs_root(self, harness: ops.testing.Harness[ops.CharmBase]): - return harness.get_filesystem_root("test-container") + return harness.get_filesystem_root('test-container') @pytest.fixture def container(self, harness: ops.testing.Harness[ops.CharmBase]): - return harness.charm.unit.get_container("test-container") + return harness.charm.unit.get_container('test-container') def test_push(self, container: ops.Container, container_fs_root: pathlib.Path): - container.push("/foo", source="foo") - assert (container_fs_root / "foo").is_file() - assert (container_fs_root / "foo").read_text() == "foo" + container.push('/foo', source='foo') + assert (container_fs_root / 'foo').is_file() + assert (container_fs_root / 'foo').read_text() == 'foo' def test_push_create_parent(self, container: ops.Container, container_fs_root: pathlib.Path): - container.push("/foo/bar", source="bar", make_dirs=True) - assert (container_fs_root / "foo").is_dir() - assert (container_fs_root / "foo" / "bar").read_text() == "bar" + container.push('/foo/bar', source='bar', make_dirs=True) + assert (container_fs_root / 'foo').is_dir() + assert (container_fs_root / 'foo' / 'bar').read_text() == 'bar' def test_push_path(self, container: ops.Container, container_fs_root: pathlib.Path): with tempfile.TemporaryDirectory() as temp: tempdir = pathlib.Path(temp) - (tempdir / "foo/bar").mkdir(parents=True) - (tempdir / "foo/test").write_text("test") - (tempdir / "foo/bar/foobar").write_text("foobar") - (tempdir / "foo/baz").mkdir(parents=True) - container.push_path(tempdir / "foo", "/tmp") # noqa: S108 - - assert (container_fs_root / "tmp").is_dir() - assert (container_fs_root / "tmp/foo").is_dir() - assert (container_fs_root / "tmp/foo/bar").is_dir() - assert (container_fs_root / "tmp/foo/baz").is_dir() - assert (container_fs_root / "tmp/foo/test").read_text() == "test" - assert (container_fs_root / "tmp/foo/bar/foobar").read_text() == "foobar" + (tempdir / 'foo/bar').mkdir(parents=True) + (tempdir / 'foo/test').write_text('test') + (tempdir / 'foo/bar/foobar').write_text('foobar') + (tempdir / 'foo/baz').mkdir(parents=True) + container.push_path(tempdir / 'foo', '/tmp') # noqa: S108 + + assert (container_fs_root / 'tmp').is_dir() + assert (container_fs_root / 'tmp/foo').is_dir() + assert (container_fs_root / 'tmp/foo/bar').is_dir() + assert (container_fs_root / 'tmp/foo/baz').is_dir() + assert (container_fs_root / 'tmp/foo/test').read_text() == 'test' + assert (container_fs_root / 'tmp/foo/bar/foobar').read_text() == 'foobar' def test_make_dir(self, container: ops.Container, container_fs_root: pathlib.Path): - container.make_dir("/tmp") # noqa: S108 - assert (container_fs_root / "tmp").is_dir() - container.make_dir("/foo/bar/foobar", make_parents=True) - assert (container_fs_root / "foo/bar/foobar").is_dir() + container.make_dir('/tmp') # noqa: S108 + assert (container_fs_root / 'tmp').is_dir() + container.make_dir('/foo/bar/foobar', make_parents=True) + assert (container_fs_root / 'foo/bar/foobar').is_dir() def test_pull(self, container: ops.Container, container_fs_root: pathlib.Path): - (container_fs_root / "foo").write_text("foo") - assert container.pull("/foo").read() == "foo" + (container_fs_root / 'foo').write_text('foo') + assert container.pull('/foo').read() == 'foo' def test_pull_path(self, container: ops.Container, container_fs_root: pathlib.Path): - (container_fs_root / "foo").mkdir() - (container_fs_root / "foo/bar").write_text("bar") - (container_fs_root / "foobar").mkdir() - (container_fs_root / "test").write_text("test") + (container_fs_root / 'foo').mkdir() + (container_fs_root / 'foo/bar').write_text('bar') + (container_fs_root / 'foobar').mkdir() + (container_fs_root / 'test').write_text('test') with tempfile.TemporaryDirectory() as temp: tempdir = pathlib.Path(temp) - container.pull_path("/", tempdir) - assert (tempdir / "foo").is_dir() - assert (tempdir / "foo/bar").read_text() == "bar" - assert (tempdir / "foobar").is_dir() - assert (tempdir / "test").read_text() == "test" + container.pull_path('/', tempdir) + assert (tempdir / 'foo').is_dir() + assert (tempdir / 'foo/bar').read_text() == 'bar' + assert (tempdir / 'foobar').is_dir() + assert (tempdir / 'test').read_text() == 'test' def test_list_files(self, container: ops.Container, container_fs_root: pathlib.Path): - (container_fs_root / "foo").mkdir() - assert container.list_files("/foo") == [] - assert len(container.list_files("/")) == 1 - file_info = container.list_files("/")[0] - assert file_info.path == "/foo" + (container_fs_root / 'foo').mkdir() + assert container.list_files('/foo') == [] + assert len(container.list_files('/')) == 1 + file_info = container.list_files('/')[0] + assert file_info.path == '/foo' assert file_info.type == FileType.DIRECTORY - assert container.list_files("/foo", itself=True)[0].path == "/foo" - (container_fs_root / "foo/bar").write_text("foobar") - assert len(container.list_files("/foo")) == 1 - assert len(container.list_files("/foo", pattern="*ar")) == 1 - assert len(container.list_files("/foo", pattern="*oo")) == 0 - file_info = container.list_files("/foo")[0] - assert file_info.path == "/foo/bar" + assert container.list_files('/foo', itself=True)[0].path == '/foo' + (container_fs_root / 'foo/bar').write_text('foobar') + assert len(container.list_files('/foo')) == 1 + assert len(container.list_files('/foo', pattern='*ar')) == 1 + assert len(container.list_files('/foo', pattern='*oo')) == 0 + file_info = container.list_files('/foo')[0] + assert file_info.path == '/foo/bar' assert file_info.type == FileType.FILE - root_info = container.list_files("/", itself=True)[0] - assert root_info.path == "/" - assert root_info.name == "/" + root_info = container.list_files('/', itself=True)[0] + assert root_info.path == '/' + assert root_info.name == '/' def test_storage_mount( self, @@ -4781,14 +5290,14 @@ def test_storage_mount( container: ops.Container, container_fs_root: pathlib.Path, ): - storage_id = harness.add_storage("test-storage", 1, attach=True)[0] - assert (container_fs_root / "mounts/foo").exists() - (container_fs_root / "mounts/foo/bar").write_text("foobar") - assert container.pull("/mounts/foo/bar").read() == "foobar" + storage_id = harness.add_storage('test-storage', 1, attach=True)[0] + assert (container_fs_root / 'mounts/foo').exists() + (container_fs_root / 'mounts/foo/bar').write_text('foobar') + assert container.pull('/mounts/foo/bar').read() == 'foobar' harness.detach_storage(storage_id) - assert not (container_fs_root / "mounts/foo/bar").is_file() + assert not (container_fs_root / 'mounts/foo/bar').is_file() harness.attach_storage(storage_id) - assert (container_fs_root / "mounts/foo/bar").read_text(), "foobar" + assert (container_fs_root / 'mounts/foo/bar').read_text(), 'foobar' def _make_storage_attach_harness( self, @@ -4807,7 +5316,7 @@ def _on_attach(self, event: ops.StorageAttachedEvent): self.locations.append(event.storage.location) if meta is None: - meta = ''' + meta = """ name: test containers: test-container: @@ -4817,7 +5326,7 @@ def _on_attach(self, event: ops.StorageAttachedEvent): storage: test-storage: type: filesystem - ''' + """ harness = ops.testing.Harness(MyCharm, meta=meta) request.addfinalizer(harness.cleanup) return harness @@ -4851,20 +5360,20 @@ def test_storage_add_with_later_attach(self, request: pytest.FixtureRequest): assert harness.charm.attached.count('test-storage/0') == 1 def test_storage_machine_charm_metadata(self, request: pytest.FixtureRequest): - meta = ''' + meta = """ name: test storage: test-storage: type: filesystem mount: /mounts/foo - ''' + """ harness = self._make_storage_attach_harness(request, meta) harness.begin() harness.add_storage('test-storage', attach=True) assert 'test-storage/0' in harness.charm.attached def test_storage_multiple_storage_instances(self, request: pytest.FixtureRequest): - meta = ''' + meta = """ name: test storage: test-storage: @@ -4872,7 +5381,7 @@ def test_storage_multiple_storage_instances(self, request: pytest.FixtureRequest mount: /mounts/foo multiple: range: 2-4 - ''' + """ harness = self._make_storage_attach_harness(request, meta) harness.begin() harness.add_storage('test-storage', 2, attach=True) @@ -4880,15 +5389,20 @@ def test_storage_multiple_storage_instances(self, request: pytest.FixtureRequest assert harness.charm.locations[0] != harness.charm.locations[1] harness.add_storage('test-storage', 2, attach=True) assert harness.charm.attached == [ - 'test-storage/0', 'test-storage/1', 'test-storage/2', 'test-storage/3'] + 'test-storage/0', + 'test-storage/1', + 'test-storage/2', + 'test-storage/3', + ] assert len(set(harness.charm.locations)) == 4 class TestSecrets: def test_add_model_secret_by_app_name_str(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} - )) + harness = ops.testing.Harness( + ops.CharmBase, + meta=yaml.safe_dump({'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}}), + ) request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4900,9 +5414,10 @@ def test_add_model_secret_by_app_name_str(self, request: pytest.FixtureRequest): assert secret.get_content() == {'password': 'hunter2'} def test_add_model_secret_by_app_instance(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} - )) + harness = ops.testing.Harness( + ops.CharmBase, + meta=yaml.safe_dump({'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}}), + ) request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4915,9 +5430,10 @@ def test_add_model_secret_by_app_instance(self, request: pytest.FixtureRequest): assert secret.get_content() == {'password': 'hunter3'} def test_add_model_secret_by_unit_instance(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} - )) + harness = ops.testing.Harness( + ops.CharmBase, + meta=yaml.safe_dump({'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}}), + ) request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -4930,9 +5446,10 @@ def test_add_model_secret_by_unit_instance(self, request: pytest.FixtureRequest) assert secret.get_content() == {'password': 'hunter4'} def test_get_secret_as_owner(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} - )) + harness = ops.testing.Harness( + ops.CharmBase, + meta=yaml.safe_dump({'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}}), + ) request.addfinalizer(harness.cleanup) harness.begin() # App secret. @@ -4952,7 +5469,7 @@ def test_get_secret_and_refresh(self, request: pytest.FixtureRequest): harness.begin() harness.set_leader(True) secret = harness.charm.app.add_secret({'password': 'hunter6'}) - secret.set_content({"password": "hunter7"}) + secret.set_content({'password': 'hunter7'}) retrieved_secret = harness.model.get_secret(id=secret.id) assert retrieved_secret.id == secret.id assert retrieved_secret.get_content() == {'password': 'hunter6'} @@ -4966,7 +5483,7 @@ def test_get_secret_removed(self, request: pytest.FixtureRequest): harness.begin() harness.set_leader(True) secret = harness.charm.app.add_secret({'password': 'hunter8'}) - secret.set_content({"password": "hunter9"}) + secret.set_content({'password': 'hunter9'}) secret.remove_revision(secret.get_info().revision) with pytest.raises(ops.SecretNotFoundError): harness.model.get_secret(id=secret.id) @@ -4975,13 +5492,13 @@ def test_get_secret_by_label(self, request: pytest.FixtureRequest): harness = ops.testing.Harness(ops.CharmBase, meta='name: webapp') request.addfinalizer(harness.cleanup) harness.begin() - secret_id = harness.charm.app.add_secret({'password': 'hunter9'}, label="my-pass").id - secret = harness.model.get_secret(label="my-pass") - assert secret.label == "my-pass" + secret_id = harness.charm.app.add_secret({'password': 'hunter9'}, label='my-pass').id + secret = harness.model.get_secret(label='my-pass') + assert secret.label == 'my-pass' assert secret.get_content() == {'password': 'hunter9'} - secret = harness.model.get_secret(id=secret_id, label="other-name") + secret = harness.model.get_secret(id=secret_id, label='other-name') assert secret.get_content() == {'password': 'hunter9'} - secret = harness.model.get_secret(label="other-name") + secret = harness.model.get_secret(label='other-name') assert secret.get_content() == {'password': 'hunter9'} def test_add_model_secret_invalid_content(self, request: pytest.FixtureRequest): @@ -4992,9 +5509,10 @@ def test_add_model_secret_invalid_content(self, request: pytest.FixtureRequest): harness.add_model_secret('database', {'x': 'y'}) # key too short def test_set_secret_content(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(EventRecorder, meta=yaml.safe_dump( - {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} - )) + harness = ops.testing.Harness( + EventRecorder, + meta=yaml.safe_dump({'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}}), + ) request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -5040,9 +5558,10 @@ def test_set_secret_content_invalid_content(self, request: pytest.FixtureRequest harness.set_secret_content(secret_id, {'x': 'y'}) def test_grant_secret_and_revoke_secret(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} - )) + harness = ops.testing.Harness( + ops.CharmBase, + meta=yaml.safe_dump({'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}}), + ) request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -5058,9 +5577,10 @@ def test_grant_secret_and_revoke_secret(self, request: pytest.FixtureRequest): harness.model.get_secret(id=secret_id) def test_grant_secret_wrong_app(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} - )) + harness = ops.testing.Harness( + ops.CharmBase, + meta=yaml.safe_dump({'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}}), + ) request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -5071,9 +5591,10 @@ def test_grant_secret_wrong_app(self, request: pytest.FixtureRequest): harness.model.get_secret(id=secret_id) def test_grant_secret_wrong_unit(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}} - )) + harness = ops.testing.Harness( + ops.CharmBase, + meta=yaml.safe_dump({'name': 'webapp', 'requires': {'db': {'interface': 'pgsql'}}}), + ) request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'database') harness.add_relation_unit(relation_id, 'database/0') @@ -5092,9 +5613,10 @@ def test_grant_secret_no_relation(self, request: pytest.FixtureRequest): harness.grant_secret(secret_id, 'webapp') def test_get_secret_grants(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'database', 'provides': {'db': {'interface': 'pgsql'}}} - )) + harness = ops.testing.Harness( + ops.CharmBase, + meta=yaml.safe_dump({'name': 'database', 'provides': {'db': {'interface': 'pgsql'}}}), + ) request.addfinalizer(harness.cleanup) relation_id = harness.add_relation('db', 'webapp') @@ -5225,12 +5747,12 @@ def test_secret_permissions_unit(self, request: pytest.FixtureRequest): harness.begin() # The charm can always manage a local unit secret. - secret_id = harness.charm.unit.add_secret({"password": "1234"}).id + secret_id = harness.charm.unit.add_secret({'password': '1234'}).id secret = harness.charm.model.get_secret(id=secret_id) - assert secret.get_content() == {"password": "1234"} + assert secret.get_content() == {'password': '1234'} info = secret.get_info() assert info.id == secret_id - secret.set_content({"password": "5678"}) + secret.set_content({'password': '5678'}) secret.remove_all_revisions() def test_secret_permissions_leader(self, request: pytest.FixtureRequest): @@ -5240,12 +5762,12 @@ def test_secret_permissions_leader(self, request: pytest.FixtureRequest): # The leader can manage an application secret. harness.set_leader(True) - secret_id = harness.charm.app.add_secret({"password": "1234"}).id + secret_id = harness.charm.app.add_secret({'password': '1234'}).id secret = harness.charm.model.get_secret(id=secret_id) - assert secret.get_content() == {"password": "1234"} + assert secret.get_content() == {'password': '1234'} info = secret.get_info() assert info.id == secret_id - secret.set_content({"password": "5678"}) + secret.set_content({'password': '5678'}) secret.remove_all_revisions() def test_secret_permissions_nonleader(self, request: pytest.FixtureRequest): @@ -5255,20 +5777,18 @@ def test_secret_permissions_nonleader(self, request: pytest.FixtureRequest): # Non-leaders can only view an application secret. harness.set_leader(False) - secret_id = harness.charm.app.add_secret({"password": "1234"}).id + secret_id = harness.charm.app.add_secret({'password': '1234'}).id secret = harness.charm.model.get_secret(id=secret_id) - assert secret.get_content() == {"password": "1234"} + assert secret.get_content() == {'password': '1234'} with pytest.raises(ops.model.SecretNotFoundError): secret.get_info() with pytest.raises(ops.model.SecretNotFoundError): - secret.set_content({"password": "5678"}) + secret.set_content({'password': '5678'}) with pytest.raises(ops.model.SecretNotFoundError): secret.remove_all_revisions() def test_add_user_secret(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp'} - )) + harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump({'name': 'webapp'})) request.addfinalizer(harness.cleanup) harness.begin() @@ -5281,9 +5801,7 @@ def test_add_user_secret(self, request: pytest.FixtureRequest): assert secret.get_content() == secret_content def test_get_user_secret_without_grant(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp'} - )) + harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump({'name': 'webapp'})) request.addfinalizer(harness.cleanup) harness.begin() secret_id = harness.add_user_secret({'password': 'foo'}) @@ -5291,9 +5809,7 @@ def test_get_user_secret_without_grant(self, request: pytest.FixtureRequest): harness.model.get_secret(id=secret_id) def test_revoke_user_secret(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump( - {'name': 'webapp'} - )) + harness = ops.testing.Harness(ops.CharmBase, meta=yaml.safe_dump({'name': 'webapp'})) request.addfinalizer(harness.cleanup) harness.begin() @@ -5305,9 +5821,7 @@ def test_revoke_user_secret(self, request: pytest.FixtureRequest): harness.model.get_secret(id=secret_id) def test_set_user_secret_content(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(EventRecorder, meta=yaml.safe_dump( - {'name': 'webapp'} - )) + harness = ops.testing.Harness(EventRecorder, meta=yaml.safe_dump({'name': 'webapp'})) request.addfinalizer(harness.cleanup) harness.begin() secret_id = harness.add_user_secret({'password': 'foo'}) @@ -5319,9 +5833,7 @@ def test_set_user_secret_content(self, request: pytest.FixtureRequest): assert secret.get_content(refresh=True) == {'password': 'bar'} def test_get_user_secret_info(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(EventRecorder, meta=yaml.safe_dump( - {'name': 'webapp'} - )) + harness = ops.testing.Harness(EventRecorder, meta=yaml.safe_dump({'name': 'webapp'})) request.addfinalizer(harness.cleanup) harness.begin() secret_id = harness.add_user_secret({'password': 'foo'}) @@ -5402,101 +5914,104 @@ def test_errors(self, request: pytest.FixtureRequest): class TestHandleExec: @pytest.fixture def harness(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test containers: test-container: - ''') + """, + ) harness.begin() - harness.set_can_connect("test-container", True) + harness.set_can_connect('test-container', True) yield harness harness.cleanup() @pytest.fixture def container(self, harness: ops.testing.Harness[ops.CharmBase]): - return harness.charm.unit.get_container("test-container") + return harness.charm.unit.get_container('test-container') def test_register_handler( self, harness: ops.testing.Harness[ops.CharmBase], container: ops.Container, ): - harness.handle_exec(container, ["foo"], result="foo") - harness.handle_exec(container, ["foo", "bar", "foobar"], result="foobar2") - harness.handle_exec(container, ["foo", "bar"], result="foobar") + harness.handle_exec(container, ['foo'], result='foo') + harness.handle_exec(container, ['foo', 'bar', 'foobar'], result='foobar2') + harness.handle_exec(container, ['foo', 'bar'], result='foobar') - stdout, _ = container.exec(["foo", "bar", "foobar", "--help"]).wait_output() - assert stdout == "foobar2" + stdout, _ = container.exec(['foo', 'bar', 'foobar', '--help']).wait_output() + assert stdout == 'foobar2' - stdout, _ = container.exec(["foo", "bar", "--help"]).wait_output() - assert stdout == "foobar" + stdout, _ = container.exec(['foo', 'bar', '--help']).wait_output() + assert stdout == 'foobar' - stdout, _ = container.exec(["foo", "bar"]).wait_output() - assert stdout == "foobar" + stdout, _ = container.exec(['foo', 'bar']).wait_output() + assert stdout == 'foobar' - stdout, _ = container.exec(["foo", "--help"]).wait_output() - assert stdout == "foo" + stdout, _ = container.exec(['foo', '--help']).wait_output() + assert stdout == 'foo' def test_re_register_handler( self, harness: ops.testing.Harness[ops.CharmBase], container: ops.Container, ): - harness.handle_exec(container, ["foo", "bar"], result="foobar") - harness.handle_exec(container, ["foo"], result="foo") + harness.handle_exec(container, ['foo', 'bar'], result='foobar') + harness.handle_exec(container, ['foo'], result='foo') - stdout, _ = container.exec(["foo", "bar"]).wait_output() - assert stdout == "foobar" + stdout, _ = container.exec(['foo', 'bar']).wait_output() + assert stdout == 'foobar' - harness.handle_exec(container, ["foo", "bar"], result="hello") - stdout, _ = container.exec(["foo", "bar"]).wait_output() - assert stdout == "hello" + harness.handle_exec(container, ['foo', 'bar'], result='hello') + stdout, _ = container.exec(['foo', 'bar']).wait_output() + assert stdout == 'hello' - harness.handle_exec(container.name, ["foo"], result="hello2") - stdout, _ = container.exec(["foo"]).wait_output() - assert stdout == "hello2" + harness.handle_exec(container.name, ['foo'], result='hello2') + stdout, _ = container.exec(['foo']).wait_output() + assert stdout == 'hello2' with pytest.raises(pebble.APIError): - container.exec(["abc"]).wait() + container.exec(['abc']).wait() def test_register_match_all_prefix( self, harness: ops.testing.Harness[ops.CharmBase], container: ops.Container, ): - harness.handle_exec(container, [], result="hello") + harness.handle_exec(container, [], result='hello') - stdout, _ = container.exec(["foo", "bar"]).wait_output() - assert stdout == "hello" + stdout, _ = container.exec(['foo', 'bar']).wait_output() + assert stdout == 'hello' - stdout, _ = container.exec(["ls"]).wait_output() - assert stdout == "hello" + stdout, _ = container.exec(['ls']).wait_output() + assert stdout == 'hello' def test_register_with_result( self, harness: ops.testing.Harness[ops.CharmBase], container: ops.Container, ): - harness.handle_exec(container, ["foo"], result=10) + harness.handle_exec(container, ['foo'], result=10) with pytest.raises(pebble.ExecError) as excinfo: - container.exec(["foo"]).wait() + container.exec(['foo']).wait() assert excinfo.value.exit_code == 10 - harness.handle_exec(container, ["foo"], result="hello") - stdout, stderr = container.exec(["foo"]).wait_output() - assert stdout == "hello" - assert stderr == "" + harness.handle_exec(container, ['foo'], result='hello') + stdout, stderr = container.exec(['foo']).wait_output() + assert stdout == 'hello' + assert stderr == '' with pytest.raises(ValueError): - container.exec(["foo"], encoding=None).wait_output() + container.exec(['foo'], encoding=None).wait_output() - harness.handle_exec(container, ["foo"], result=b"hello2") - stdout, stderr = container.exec(["foo"], encoding=None).wait_output() - assert stdout == b"hello2" - assert stderr == b"" - stdout, stderr = container.exec(["foo"]).wait_output() - assert stdout == "hello2" - assert stderr == "" + harness.handle_exec(container, ['foo'], result=b'hello2') + stdout, stderr = container.exec(['foo'], encoding=None).wait_output() + assert stdout == b'hello2' + assert stderr == b'' + stdout, stderr = container.exec(['foo']).wait_output() + assert stdout == 'hello2' + assert stderr == '' def test_register_with_handler( self, @@ -5510,37 +6025,37 @@ def handler(args: ops.testing.ExecArgs): args_history.append(args) return return_value - harness.handle_exec(container, ["foo"], handler=handler) + harness.handle_exec(container, ['foo'], handler=handler) - container.exec(["foo", "bar"]).wait() + container.exec(['foo', 'bar']).wait() assert len(args_history) == 1 - assert args_history[-1].command == ["foo", "bar"] + assert args_history[-1].command == ['foo', 'bar'] return_value = ExecResult(exit_code=1) with pytest.raises(pebble.ExecError): - container.exec(["foo", "bar"]).wait() + container.exec(['foo', 'bar']).wait() - return_value = ExecResult(stdout="hello", stderr="error") - stdout, stderr = container.exec(["foo"]).wait_output() - assert stdout == "hello" - assert stderr == "error" + return_value = ExecResult(stdout='hello', stderr='error') + stdout, stderr = container.exec(['foo']).wait_output() + assert stdout == 'hello' + assert stderr == 'error' assert len(args_history) == 3 - container.exec(["foo"], environment={"bar": "foobar"}).wait_output() - assert args_history[-1].environment == {"bar": "foobar"} + container.exec(['foo'], environment={'bar': 'foobar'}).wait_output() + assert args_history[-1].environment == {'bar': 'foobar'} - return_value = ExecResult(stdout=b"hello") - stdout, _ = container.exec(["foo"], encoding=None).wait_output() + return_value = ExecResult(stdout=b'hello') + stdout, _ = container.exec(['foo'], encoding=None).wait_output() assert args_history[-1].encoding is None - assert stdout == b"hello" + assert stdout == b'hello' - container.exec(["foo"], working_dir="/test").wait_output() - assert args_history[-1].working_dir == "/test" + container.exec(['foo'], working_dir='/test').wait_output() + assert args_history[-1].working_dir == '/test' - container.exec(["foo"], user="foo", user_id=1, group="bar", group_id=2).wait() - assert args_history[-1].user == "foo" + container.exec(['foo'], user='foo', user_id=1, group='bar', group_id=2).wait() + assert args_history[-1].user == 'foo' assert args_history[-1].user_id == 1 - assert args_history[-1].group == "bar" + assert args_history[-1].group == 'bar' assert args_history[-1].group_id == 2 def test_exec_timeout( @@ -5553,24 +6068,24 @@ def handler(_: ops.testing.ExecArgs): harness.handle_exec(container, [], handler=handler) with pytest.raises(TimeoutError): - container.exec(["ls"], timeout=1).wait() + container.exec(['ls'], timeout=1).wait() with pytest.raises(RuntimeError): - container.exec(["ls"]).wait() + container.exec(['ls']).wait() def test_combined_error( self, harness: ops.testing.Harness[ops.CharmBase], container: ops.Container, ): - return_value = ExecResult(stdout="foobar") + return_value = ExecResult(stdout='foobar') harness.handle_exec(container, [], handler=lambda _: return_value) - stdout, stderr = container.exec(["ls"], combine_stderr=True).wait_output() - assert stdout == "foobar" - assert stderr == "" + stdout, stderr = container.exec(['ls'], combine_stderr=True).wait_output() + assert stdout == 'foobar' + assert stderr == '' - return_value = ExecResult(stdout="foobar", stderr="error") + return_value = ExecResult(stdout='foobar', stderr='error') with pytest.raises(ValueError): - container.exec(["ls"], combine_stderr=True).wait_output() + container.exec(['ls'], combine_stderr=True).wait_output() def test_exec_stdin( self, @@ -5583,11 +6098,11 @@ def handler(args: ops.testing.ExecArgs): args_history.append(args) harness.handle_exec(container, [], handler=handler) - proc = container.exec(["ls"], stdin="test") + proc = container.exec(['ls'], stdin='test') assert proc.stdin is None - assert args_history[-1].stdin == "test" + assert args_history[-1].stdin == 'test' - proc = container.exec(["ls"]) + proc = container.exec(['ls']) assert proc.stdin is not None assert args_history[-1].stdin is None @@ -5596,49 +6111,45 @@ def test_exec_stdout_stderr( harness: ops.testing.Harness[ops.CharmBase], container: ops.Container, ): - harness.handle_exec( - container, [], result=ExecResult( - stdout="output", stderr="error")) + harness.handle_exec(container, [], result=ExecResult(stdout='output', stderr='error')) stdout = io.StringIO() stderr = io.StringIO() - proc = container.exec(["ls"], stderr=stderr, stdout=stdout) + proc = container.exec(['ls'], stderr=stderr, stdout=stdout) assert proc.stdout is None assert proc.stderr is None proc.wait() - assert stdout.getvalue() == "output" - assert stderr.getvalue() == "error" + assert stdout.getvalue() == 'output' + assert stderr.getvalue() == 'error' - proc = container.exec(["ls"]) + proc = container.exec(['ls']) assert proc.stdout is not None # Not assertIsNotNone to help type checkers. assert proc.stderr is not None # Not assertIsNotNone to help type checkers. proc.wait() - assert proc.stdout.read() == "output" - assert proc.stderr.read() == "error" + assert proc.stdout.read() == 'output' + assert proc.stderr.read() == 'error' - harness.handle_exec( - container, [], result=ExecResult( - stdout=b"output", stderr=b"error")) + harness.handle_exec(container, [], result=ExecResult(stdout=b'output', stderr=b'error')) stdout = io.StringIO() stderr = io.StringIO() - proc = container.exec(["ls"], stderr=stderr, stdout=stdout) - assert stdout.getvalue() == "output" - assert stderr.getvalue() == "error" - proc = container.exec(["ls"]) + proc = container.exec(['ls'], stderr=stderr, stdout=stdout) + assert stdout.getvalue() == 'output' + assert stderr.getvalue() == 'error' + proc = container.exec(['ls']) assert proc.stdout is not None # Not assertIsNotNone to help type checkers. assert proc.stderr is not None # Not assertIsNotNone to help type checkers. - assert proc.stdout.read() == "output" - assert proc.stderr.read() == "error" + assert proc.stdout.read() == 'output' + assert proc.stderr.read() == 'error' stdout = io.BytesIO() stderr = io.BytesIO() - proc = container.exec(["ls"], stderr=stderr, stdout=stdout, encoding=None) - assert stdout.getvalue() == b"output" - assert stderr.getvalue() == b"error" - proc = container.exec(["ls"], encoding=None) + proc = container.exec(['ls'], stderr=stderr, stdout=stdout, encoding=None) + assert stdout.getvalue() == b'output' + assert stderr.getvalue() == b'error' + proc = container.exec(['ls'], encoding=None) assert proc.stdout is not None # Not assertIsNotNone to help type checkers. assert proc.stderr is not None # Not assertIsNotNone to help type checkers. - assert proc.stdout.read() == b"output" - assert proc.stderr.read() == b"error" + assert proc.stdout.read() == b'output' + assert proc.stderr.read() == b'error' def test_exec_service_context( self, @@ -5646,50 +6157,52 @@ def test_exec_service_context( container: ops.Container, ): service: ops.pebble.ServiceDict = { - "command": "test", - "working-dir": "/tmp", # noqa: S108 - "user": "foo", - "user-id": 1, - "group": "bar", - "group-id": 2, - "environment": {"foo": "bar", "foobar": "barfoo"} + 'command': 'test', + 'working-dir': '/tmp', # noqa: S108 + 'user': 'foo', + 'user-id': 1, + 'group': 'bar', + 'group-id': 2, + 'environment': {'foo': 'bar', 'foobar': 'barfoo'}, } layer: ops.pebble.LayerDict = { - 'summary': "", - 'description': "", - 'services': { - "test": service}} - container.add_layer(label="test", layer=ops.pebble.Layer(layer)) + 'summary': '', + 'description': '', + 'services': {'test': service}, + } + container.add_layer(label='test', layer=ops.pebble.Layer(layer)) args_history: typing.List[ops.testing.ExecArgs] = [] def handler(args: ops.testing.ExecArgs): args_history.append(args) - os.environ["JUJU_VERSION"] = "3.2.1" - harness.handle_exec(container, ["ls"], handler=handler) + os.environ['JUJU_VERSION'] = '3.2.1' + harness.handle_exec(container, ['ls'], handler=handler) - container.exec(["ls"], service_context="test").wait() - assert args_history[-1].working_dir == "/tmp" # noqa: S108 - assert args_history[-1].user == "foo" + container.exec(['ls'], service_context='test').wait() + assert args_history[-1].working_dir == '/tmp' # noqa: S108 + assert args_history[-1].user == 'foo' assert args_history[-1].user_id == 1 - assert args_history[-1].group == "bar" + assert args_history[-1].group == 'bar' assert args_history[-1].group_id == 2 - assert args_history[-1].environment == {"foo": "bar", "foobar": "barfoo"} - - container.exec(["ls"], - service_context="test", - working_dir="/test", - user="test", - user_id=3, - group="test_group", - group_id=4, - environment={"foo": "hello"}).wait() - assert args_history[-1].working_dir == "/test" - assert args_history[-1].user == "test" + assert args_history[-1].environment == {'foo': 'bar', 'foobar': 'barfoo'} + + container.exec( + ['ls'], + service_context='test', + working_dir='/test', + user='test', + user_id=3, + group='test_group', + group_id=4, + environment={'foo': 'hello'}, + ).wait() + assert args_history[-1].working_dir == '/test' + assert args_history[-1].user == 'test' assert args_history[-1].user_id == 3 - assert args_history[-1].group == "test_group" + assert args_history[-1].group == 'test_group' assert args_history[-1].group_id == 4 - assert args_history[-1].environment == {"foo": "hello", "foobar": "barfoo"} + assert args_history[-1].environment == {'foo': 'hello', 'foobar': 'barfoo'} class TestActions: @@ -5700,7 +6213,6 @@ def action_results(self): @pytest.fixture def harness(self, action_results: typing.Dict[str, typing.Any]): - class ActionCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) @@ -5708,7 +6220,8 @@ def __init__(self, framework: ops.Framework): self.framework.observe(self.on.fail_action, self._on_fail_action) self.framework.observe(self.on.results_action, self._on_results_action) self.framework.observe( - self.on.log_and_results_action, self._on_log_and_results_action) + self.on.log_and_results_action, self._on_log_and_results_action + ) self.simple_was_called = False def _on_simple_action(self, event: ops.ActionEvent): @@ -5717,27 +6230,30 @@ def _on_simple_action(self, event: ops.ActionEvent): assert isinstance(event.id, str) def _on_fail_action(self, event: ops.ActionEvent): - event.fail("this will be ignored") - event.log("some progress") + event.fail('this will be ignored') + event.log('some progress') if event.params.get('empty-failure-message'): event.fail() else: - event.fail("something went wrong") - event.log("more progress") + event.fail('something went wrong') + event.log('more progress') event.set_results(action_results) def _on_log_and_results_action(self, event: ops.ActionEvent): - event.log("Step 1") - event.set_results({"result1": event.params["foo"]}) - event.log("Step 2") - event.set_results({"result2": event.params.get("bar")}) + event.log('Step 1') + event.set_results({'result1': event.params['foo']}) + event.log('Step 2') + event.set_results({'result2': event.params.get('bar')}) def _on_results_action(self, event: ops.ActionEvent): event.set_results(action_results) - harness = ops.testing.Harness(ActionCharm, meta=''' + harness = ops.testing.Harness( + ActionCharm, + meta=""" name: test - ''', actions=''' + """, + actions=""" simple: description: lorem ipsum fail: @@ -5759,28 +6275,32 @@ def _on_results_action(self, event: ops.ActionEvent): type: integer results: description: incididunt ut labore - ''') + """, + ) harness.begin() yield harness harness.cleanup() def test_before_begin(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test - ''') + """, + ) with pytest.raises(RuntimeError): - harness.run_action("fail") + harness.run_action('fail') def test_invalid_action(self, harness: ops.testing.Harness[ops.CharmBase]): # This action isn't in the metadata at all. with pytest.raises(RuntimeError): - harness.run_action("another-action") + harness.run_action('another-action') # Also check that we're not exposing the action with the dash to underscore replacement. with pytest.raises(RuntimeError): - harness.run_action("log_and_results") + harness.run_action('log_and_results') def test_run_action(self, harness: ops.testing.Harness[ops.CharmBase]): - out = harness.run_action("simple") + out = harness.run_action('simple') assert out.logs == [] assert out.results == {} assert harness.charm.simple_was_called # type: ignore @@ -5796,78 +6316,85 @@ def test_fail_action( action_results: typing.Dict[str, typing.Any], harness: ops.testing.Harness[ops.CharmBase], ): - action_results["partial"] = "foo" + action_results['partial'] = 'foo' with pytest.raises(ops.testing.ActionFailed) as excinfo: - harness.run_action("fail") + harness.run_action('fail') - assert excinfo.value.message == "something went wrong" - assert excinfo.value.output.logs == ["some progress", "more progress"] - assert excinfo.value.output.results == {"partial": "foo"} + assert excinfo.value.message == 'something went wrong' + assert excinfo.value.output.logs == ['some progress', 'more progress'] + assert excinfo.value.output.results == {'partial': 'foo'} def test_required_param(self, harness: ops.testing.Harness[ops.CharmBase]): with pytest.raises(RuntimeError): - harness.run_action("unobserved-param-tester") + harness.run_action('unobserved-param-tester') with pytest.raises(RuntimeError): - harness.run_action("unobserved-param-tester", {"bar": "baz"}) - harness.run_action("unobserved-param-tester", {"foo": "baz"}) - harness.run_action("unobserved-param-tester", {"foo": "baz", "bar": "qux"}) + harness.run_action('unobserved-param-tester', {'bar': 'baz'}) + harness.run_action('unobserved-param-tester', {'foo': 'baz'}) + harness.run_action('unobserved-param-tester', {'foo': 'baz', 'bar': 'qux'}) def test_additional_params(self, harness: ops.testing.Harness[ops.CharmBase]): - harness.run_action("simple", {"foo": "bar"}) + harness.run_action('simple', {'foo': 'bar'}) with pytest.raises(ops.ModelError): - harness.run_action("unobserved-param-tester", {"foo": "bar", "qux": "baz"}) - harness.run_action("simple", { - "string": "hello", - "number": 28.8, - "object": {"a": {"b": "c"}}, - "array": [1, 2, 3], - "boolean": True, - "null": None}) + harness.run_action('unobserved-param-tester', {'foo': 'bar', 'qux': 'baz'}) + harness.run_action( + 'simple', + { + 'string': 'hello', + 'number': 28.8, + 'object': {'a': {'b': 'c'}}, + 'array': [1, 2, 3], + 'boolean': True, + 'null': None, + }, + ) def test_logs_and_results(self, harness: ops.testing.Harness[ops.CharmBase]): - out = harness.run_action("log-and-results") - assert out.logs == ["Step 1", "Step 2"] - assert out.results == {"result1": "foo-default", "result2": None} - out = harness.run_action("log-and-results", {"foo": "baz", "bar": 28}) - assert out.results == {"result1": "baz", "result2": 28} - - @pytest.mark.parametrize('prohibited_key', [ - "stdout", "stdout-encoding", "stderr", "stderr-encoding" - ]) + out = harness.run_action('log-and-results') + assert out.logs == ['Step 1', 'Step 2'] + assert out.results == {'result1': 'foo-default', 'result2': None} + out = harness.run_action('log-and-results', {'foo': 'baz', 'bar': 28}) + assert out.results == {'result1': 'baz', 'result2': 28} + + @pytest.mark.parametrize( + 'prohibited_key', ['stdout', 'stdout-encoding', 'stderr', 'stderr-encoding'] + ) def test_bad_results( self, action_results: typing.Dict[str, typing.Any], harness: ops.testing.Harness[ops.CharmBase], prohibited_key: str, ): - action_results["a"] = {"b": 1} - action_results["a.b"] = 2 + action_results['a'] = {'b': 1} + action_results['a.b'] = 2 with pytest.raises(ValueError): - harness.run_action("results") + harness.run_action('results') # There are some result key names we cannot use. action_results.clear() - action_results[prohibited_key] = "foo" + action_results[prohibited_key] = 'foo' with pytest.raises(ops.ModelError): - harness.run_action("results") + harness.run_action('results') # There are some additional rules around what result keys are valid. action_results.clear() - action_results["A"] = "foo" + action_results['A'] = 'foo' with pytest.raises(ValueError): - harness.run_action("results") + harness.run_action('results') class TestNotify: def test_notify_basics(self, request: pytest.FixtureRequest): - harness = ops.testing.Harness(ContainerEventCharm, meta=""" + harness = ops.testing.Harness( + ContainerEventCharm, + meta=""" name: notifier containers: foo: resource: foo-image bar: resource: foo-image - """) + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_container_events('foo') @@ -5890,59 +6417,71 @@ def test_notify_basics(self, request: pytest.FixtureRequest): assert id3 != '' assert id3 != id2 - expected_changes = [{ - 'name': 'pebble-custom-notice', - 'container': 'foo', - 'notice_id': id1a, - 'notice_type': 'custom', - 'notice_key': 'example.com/n1', - }, { - 'name': 'pebble-custom-notice', - 'container': 'foo', - 'notice_id': id2, - 'notice_type': 'custom', - 'notice_key': 'foo.com/n2', - }, { - 'name': 'pebble-custom-notice', - 'container': 'bar', - 'notice_id': id3, - 'notice_type': 'custom', - 'notice_key': 'example.com/n1', - }, { - 'name': 'pebble-custom-notice', - 'container': 'foo', - 'notice_id': id1a, - 'notice_type': 'custom', - 'notice_key': 'example.com/n1', - }] + expected_changes = [ + { + 'name': 'pebble-custom-notice', + 'container': 'foo', + 'notice_id': id1a, + 'notice_type': 'custom', + 'notice_key': 'example.com/n1', + }, + { + 'name': 'pebble-custom-notice', + 'container': 'foo', + 'notice_id': id2, + 'notice_type': 'custom', + 'notice_key': 'foo.com/n2', + }, + { + 'name': 'pebble-custom-notice', + 'container': 'bar', + 'notice_id': id3, + 'notice_type': 'custom', + 'notice_key': 'example.com/n1', + }, + { + 'name': 'pebble-custom-notice', + 'container': 'foo', + 'notice_id': id1a, + 'notice_type': 'custom', + 'notice_key': 'example.com/n1', + }, + ] assert harness.charm.changes == expected_changes def test_notify_no_repeat(self, request: pytest.FixtureRequest): """Ensure event doesn't get triggered when notice occurs but doesn't repeat.""" - harness = ops.testing.Harness(ContainerEventCharm, meta=""" + harness = ops.testing.Harness( + ContainerEventCharm, + meta=""" name: notifier containers: foo: resource: foo-image - """) + """, + ) request.addfinalizer(harness.cleanup) harness.begin() harness.charm.observe_container_events('foo') - id1a = harness.pebble_notify('foo', 'example.com/n1', - repeat_after=datetime.timedelta(days=1)) - id1b = harness.pebble_notify('foo', 'example.com/n1', - repeat_after=datetime.timedelta(days=1)) + id1a = harness.pebble_notify( + 'foo', 'example.com/n1', repeat_after=datetime.timedelta(days=1) + ) + id1b = harness.pebble_notify( + 'foo', 'example.com/n1', repeat_after=datetime.timedelta(days=1) + ) assert id1a == id1b - expected_changes = [{ - 'name': 'pebble-custom-notice', - 'container': 'foo', - 'notice_id': id1a, - 'notice_type': 'custom', - 'notice_key': 'example.com/n1', - }] + expected_changes = [ + { + 'name': 'pebble-custom-notice', + 'container': 'foo', + 'notice_id': id1a, + 'notice_type': 'custom', + 'notice_key': 'example.com/n1', + } + ] assert harness.charm.changes == expected_changes def test_notify_no_begin(self, request: pytest.FixtureRequest): @@ -5951,19 +6490,23 @@ def test_notify_no_begin(self, request: pytest.FixtureRequest): class TestCharm(ops.CharmBase): def __init__(self, framework: ops.Framework): super().__init__(framework) - self.framework.observe(self.on['c1'].pebble_custom_notice, - self._on_pebble_custom_notice) + self.framework.observe( + self.on['c1'].pebble_custom_notice, self._on_pebble_custom_notice + ) def _on_pebble_custom_notice(self, event: ops.PebbleCustomNoticeEvent): nonlocal num_notices num_notices += 1 - harness = ops.testing.Harness(TestCharm, meta=""" + harness = ops.testing.Harness( + TestCharm, + meta=""" name: notifier containers: c1: resource: c1-image - """) + """, + ) request.addfinalizer(harness.cleanup) id = harness.pebble_notify('c1', 'example.com/n1') @@ -6041,11 +6584,14 @@ def test_get_notices(self, client: PebbleClientType): class TestNotices(PebbleNoticesMixin): @pytest.fixture def client(self): - harness = ops.testing.Harness(ops.CharmBase, meta=''' + harness = ops.testing.Harness( + ops.CharmBase, + meta=""" name: test-app containers: mycontainer: {} - ''') + """, + ) backend = harness._backend client = backend.get_pebble('/charm/containers/mycontainer/pebble.socket') harness.set_can_connect('mycontainer', True) @@ -6071,11 +6617,7 @@ def _on_start(self, event: ops.StartEvent): 'endpoint': 'https://127.0.0.1:8443', 'credential': { 'auth-type': 'certificate', - 'attrs': { - 'client-cert': 'foo', - 'client-key': 'bar', - 'server-cert': 'baz' - }, + 'attrs': {'client-cert': 'foo', 'client-key': 'bar', 'server-cert': 'baz'}, }, } harness.set_cloud_spec(ops.CloudSpec.from_dict(cloud_spec_dict)) diff --git a/test/test_timeconv.py b/test/test_timeconv.py index ec7ba3c14..ad4b71983 100644 --- a/test/test_timeconv.py +++ b/test/test_timeconv.py @@ -23,37 +23,47 @@ def test_parse_rfc3339(): nzdt = datetime.timezone(datetime.timedelta(hours=13)) utc = datetime.timezone.utc - assert timeconv.parse_rfc3339('2020-12-25T13:45:50+13:00') == \ - datetime.datetime(2020, 12, 25, 13, 45, 50, 0, tzinfo=nzdt) + assert timeconv.parse_rfc3339('2020-12-25T13:45:50+13:00') == datetime.datetime( + 2020, 12, 25, 13, 45, 50, 0, tzinfo=nzdt + ) - assert timeconv.parse_rfc3339('2020-12-25T13:45:50.123456789+13:00') == \ - datetime.datetime(2020, 12, 25, 13, 45, 50, 123457, tzinfo=nzdt) + assert timeconv.parse_rfc3339('2020-12-25T13:45:50.123456789+13:00') == datetime.datetime( + 2020, 12, 25, 13, 45, 50, 123457, tzinfo=nzdt + ) - assert timeconv.parse_rfc3339('2021-02-10T04:36:22Z') == \ - datetime.datetime(2021, 2, 10, 4, 36, 22, 0, tzinfo=utc) + assert timeconv.parse_rfc3339('2021-02-10T04:36:22Z') == datetime.datetime( + 2021, 2, 10, 4, 36, 22, 0, tzinfo=utc + ) - assert timeconv.parse_rfc3339('2021-02-10t04:36:22z') == \ - datetime.datetime(2021, 2, 10, 4, 36, 22, 0, tzinfo=utc) + assert timeconv.parse_rfc3339('2021-02-10t04:36:22z') == datetime.datetime( + 2021, 2, 10, 4, 36, 22, 0, tzinfo=utc + ) - assert timeconv.parse_rfc3339('2021-02-10T04:36:22.118970777Z') == \ - datetime.datetime(2021, 2, 10, 4, 36, 22, 118971, tzinfo=utc) + assert timeconv.parse_rfc3339('2021-02-10T04:36:22.118970777Z') == datetime.datetime( + 2021, 2, 10, 4, 36, 22, 118971, tzinfo=utc + ) - assert timeconv.parse_rfc3339('2020-12-25T13:45:50.123456789+00:00') == \ - datetime.datetime(2020, 12, 25, 13, 45, 50, 123457, tzinfo=utc) + assert timeconv.parse_rfc3339('2020-12-25T13:45:50.123456789+00:00') == datetime.datetime( + 2020, 12, 25, 13, 45, 50, 123457, tzinfo=utc + ) - assert timeconv.parse_rfc3339('2006-08-28T13:20:00.9999999Z') == \ - datetime.datetime(2006, 8, 28, 13, 20, 0, 999999, tzinfo=utc) + assert timeconv.parse_rfc3339('2006-08-28T13:20:00.9999999Z') == datetime.datetime( + 2006, 8, 28, 13, 20, 0, 999999, tzinfo=utc + ) - assert timeconv.parse_rfc3339('2006-12-31T23:59:59.9999999Z') == \ - datetime.datetime(2006, 12, 31, 23, 59, 59, 999999, tzinfo=utc) + assert timeconv.parse_rfc3339('2006-12-31T23:59:59.9999999Z') == datetime.datetime( + 2006, 12, 31, 23, 59, 59, 999999, tzinfo=utc + ) tzinfo = datetime.timezone(datetime.timedelta(hours=-11, minutes=-30)) - assert timeconv.parse_rfc3339('2020-12-25T13:45:50.123456789-11:30') == \ - datetime.datetime(2020, 12, 25, 13, 45, 50, 123457, tzinfo=tzinfo) + assert timeconv.parse_rfc3339('2020-12-25T13:45:50.123456789-11:30') == datetime.datetime( + 2020, 12, 25, 13, 45, 50, 123457, tzinfo=tzinfo + ) tzinfo = datetime.timezone(datetime.timedelta(hours=4)) - assert timeconv.parse_rfc3339('2000-01-02T03:04:05.006000+04:00') == \ - datetime.datetime(2000, 1, 2, 3, 4, 5, 6000, tzinfo=tzinfo) + assert timeconv.parse_rfc3339('2000-01-02T03:04:05.006000+04:00') == datetime.datetime( + 2000, 1, 2, 3, 4, 5, 6000, tzinfo=tzinfo + ) with pytest.raises(ValueError): timeconv.parse_rfc3339('') @@ -71,81 +81,85 @@ def test_parse_rfc3339(): timeconv.parse_rfc3339('2021-02-10T04:36:22.118970777-99:99') -@pytest.mark.parametrize("input,expected", [ - # Test cases taken from Go's time.ParseDuration tests - # simple - ('0', datetime.timedelta(seconds=0)), - ('5s', datetime.timedelta(seconds=5)), - ('30s', datetime.timedelta(seconds=30)), - ('1478s', datetime.timedelta(seconds=1478)), - # sign - ('-5s', datetime.timedelta(seconds=-5)), - ('+5s', datetime.timedelta(seconds=5)), - ('-0', datetime.timedelta(seconds=0)), - ('+0', datetime.timedelta(seconds=0)), - # decimal - ('5.0s', datetime.timedelta(seconds=5)), - ('5.6s', datetime.timedelta(seconds=5.6)), - ('5.s', datetime.timedelta(seconds=5)), - ('.5s', datetime.timedelta(seconds=0.5)), - ('1.0s', datetime.timedelta(seconds=1)), - ('1.00s', datetime.timedelta(seconds=1)), - ('1.004s', datetime.timedelta(seconds=1.004)), - ('1.0040s', datetime.timedelta(seconds=1.004)), - ('100.00100s', datetime.timedelta(seconds=100.001)), - # different units - ('10ns', datetime.timedelta(seconds=0.000_000_010)), - ('11us', datetime.timedelta(seconds=0.000_011)), - ('12µs', datetime.timedelta(seconds=0.000_012)), # U+00B5 # noqa: RUF001 - ('12μs', datetime.timedelta(seconds=0.000_012)), # U+03BC - ('13ms', datetime.timedelta(seconds=0.013)), - ('14s', datetime.timedelta(seconds=14)), - ('15m', datetime.timedelta(seconds=15 * 60)), - ('16h', datetime.timedelta(seconds=16 * 60 * 60)), - # composite durations - ('3h30m', datetime.timedelta(seconds=3 * 60 * 60 + 30 * 60)), - ('10.5s4m', datetime.timedelta(seconds=4 * 60 + 10.5)), - ('-2m3.4s', datetime.timedelta(seconds=-(2 * 60 + 3.4))), - ('1h2m3s4ms5us6ns', datetime.timedelta(seconds=1 * 60 * 60 + 2 * 60 + 3.004_005_006)), - ('39h9m14.425s', datetime.timedelta(seconds=39 * 60 * 60 + 9 * 60 + 14.425)), - # large value - ('52763797000ns', datetime.timedelta(seconds=52.763_797_000)), - # more than 9 digits after decimal point, see https://golang.org/issue/6617 - ('0.3333333333333333333h', datetime.timedelta(seconds=20 * 60)), - # huge string; issue 15011. - ('0.100000000000000000000h', datetime.timedelta(seconds=6 * 60)), - # This value tests the first overflow check in leadingFraction. - ('0.830103483285477580700h', datetime.timedelta(seconds=49 * 60 + 48.372_539_827)), - # Test precision handling - ('7200000h1us', datetime.timedelta(hours=7_200_000, microseconds=1)) -]) +@pytest.mark.parametrize( + 'input,expected', + [ + # Test cases taken from Go's time.ParseDuration tests + # simple + ('0', datetime.timedelta(seconds=0)), + ('5s', datetime.timedelta(seconds=5)), + ('30s', datetime.timedelta(seconds=30)), + ('1478s', datetime.timedelta(seconds=1478)), + # sign + ('-5s', datetime.timedelta(seconds=-5)), + ('+5s', datetime.timedelta(seconds=5)), + ('-0', datetime.timedelta(seconds=0)), + ('+0', datetime.timedelta(seconds=0)), + # decimal + ('5.0s', datetime.timedelta(seconds=5)), + ('5.6s', datetime.timedelta(seconds=5.6)), + ('5.s', datetime.timedelta(seconds=5)), + ('.5s', datetime.timedelta(seconds=0.5)), + ('1.0s', datetime.timedelta(seconds=1)), + ('1.00s', datetime.timedelta(seconds=1)), + ('1.004s', datetime.timedelta(seconds=1.004)), + ('1.0040s', datetime.timedelta(seconds=1.004)), + ('100.00100s', datetime.timedelta(seconds=100.001)), + # different units + ('10ns', datetime.timedelta(seconds=0.000_000_010)), + ('11us', datetime.timedelta(seconds=0.000_011)), + ('12µs', datetime.timedelta(seconds=0.000_012)), # U+00B5 # noqa: RUF001 + ('12μs', datetime.timedelta(seconds=0.000_012)), # U+03BC + ('13ms', datetime.timedelta(seconds=0.013)), + ('14s', datetime.timedelta(seconds=14)), + ('15m', datetime.timedelta(seconds=15 * 60)), + ('16h', datetime.timedelta(seconds=16 * 60 * 60)), + # composite durations + ('3h30m', datetime.timedelta(seconds=3 * 60 * 60 + 30 * 60)), + ('10.5s4m', datetime.timedelta(seconds=4 * 60 + 10.5)), + ('-2m3.4s', datetime.timedelta(seconds=-(2 * 60 + 3.4))), + ('1h2m3s4ms5us6ns', datetime.timedelta(seconds=1 * 60 * 60 + 2 * 60 + 3.004_005_006)), + ('39h9m14.425s', datetime.timedelta(seconds=39 * 60 * 60 + 9 * 60 + 14.425)), + # large value + ('52763797000ns', datetime.timedelta(seconds=52.763_797_000)), + # more than 9 digits after decimal point, see https://golang.org/issue/6617 + ('0.3333333333333333333h', datetime.timedelta(seconds=20 * 60)), + # huge string; issue 15011. + ('0.100000000000000000000h', datetime.timedelta(seconds=6 * 60)), + # This value tests the first overflow check in leadingFraction. + ('0.830103483285477580700h', datetime.timedelta(seconds=49 * 60 + 48.372_539_827)), + # Test precision handling + ('7200000h1us', datetime.timedelta(hours=7_200_000, microseconds=1)), + ], +) def test_parse_duration(input: str, expected: datetime.timedelta): output = timeconv.parse_duration(input) - assert output == expected, \ - f'parse_duration({input!r}): expected {expected!r}, got {output!r}' - - -@pytest.mark.parametrize("input", [ - # Test cases taken from Go's time.ParseDuration tests - '', - '3', - '-', - 's', - '.', - '-.', - '.s', - '+.s', - '1d', - '\x85\x85', - '\xffff', - 'hello \xffff world', - - # Additional cases - 'X3h', - '3hY', - 'X3hY', - '3.4.5s', -]) + assert output == expected, f'parse_duration({input!r}): expected {expected!r}, got {output!r}' + + +@pytest.mark.parametrize( + 'input', + [ + # Test cases taken from Go's time.ParseDuration tests + '', + '3', + '-', + 's', + '.', + '-.', + '.s', + '+.s', + '1d', + '\x85\x85', + '\xffff', + 'hello \xffff world', + # Additional cases + 'X3h', + '3hY', + 'X3hY', + '3.4.5s', + ], +) def test_parse_duration_errors(input: str): with pytest.raises(ValueError): timeconv.parse_duration(input) diff --git a/tox.ini b/tox.ini index 933b7b2eb..7caa76f9c 100644 --- a/tox.ini +++ b/tox.ini @@ -40,16 +40,14 @@ commands = [testenv:fmt] description = Apply coding style standards to code deps = - autopep8~=1.6 - isort~=5.13 + ruff==0.4.5 commands = - isort {[vars]all_path} --multi-line=3 --line-length=99 --split-on-trailing-comma - autopep8 --in-place {[vars]all_path} + ruff format --preview [testenv:lint] description = Check code against coding style standards deps = - ruff~=0.3.5 + ruff==0.4.5 commands = ruff check --preview