diff --git a/.github/workflows/tox.yaml b/.github/workflows/tox.yaml index 9379c9f94..efb9e5698 100644 --- a/.github/workflows/tox.yaml +++ b/.github/workflows/tox.yaml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.6', '3.7', '3.8', '3.9', '3.10'] + python-version: ['3.8', '3.9', '3.10'] steps: - uses: actions/checkout@v1 @@ -35,21 +35,11 @@ jobs: fail-fast: false matrix: juju_channel: - - latest/stable - - 2.9/stable - - 2.8/stable + - 3.1/stable bundle: - first - second - third - exclude: - # disable 'first' and 'second' bundles for juju 2.8 since 'magpie' - # is not a promulgated charm in the charmstore, only on charmhub - # which 2.8 can't talk to. - - juju_channel: 2.8/stable - bundle: first - - juju_channel: 2.8/stable - bundle: second env: TEST_ZAZA_BUG_LP1987332: "on" # http://pad.lv/1987332 needs: build @@ -60,19 +50,21 @@ jobs: set -euxo pipefail python -m pip install --upgrade pip pip install tox tox-gh-actions - sudo snap install --channel ${{ matrix.juju_channel }} --classic juju + sudo snap install --channel ${{ matrix.juju_channel }} juju sudo snap install --classic juju-crashdump sudo lxd init --auto # This is a throw-away CI environment, do not do this at home sudo chmod 666 /var/snap/lxd/common/lxd/unix.socket # until Juju provides stable IPv6-support we unfortunately need this lxc network set lxdbr0 ipv6.address none + sudo iptables -F FORWARD + sudo iptables -P FORWARD ACCEPT # pull images lxc image copy --alias juju/bionic/amd64 --copy-aliases ubuntu-daily:bionic local: lxc image copy --alias juju/focal/amd64 --copy-aliases ubuntu-daily:focal local: lxc image copy --alias juju/jammy/amd64 --copy-aliases ubuntu-daily:jammy local: lxc image list - juju bootstrap --no-gui localhost + juju bootstrap localhost - name: Functional test run: | set -euxo pipefail diff --git a/requirements.txt b/requirements.txt index faffe82cc..c4c46c09a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,7 @@ pyparsing<3.0.0 # pin for aodhclient which is held for py35 async_generator kubernetes<18.0.0; python_version < '3.6' # pined, as juju uses kubernetes -# pinned until 3.0 regressions are handled: https://github.com/openstack-charmers/zaza/issues/545 -juju<3.0 +juju<3.2 juju_wait PyYAML>=3.0 pbr==5.6.0 diff --git a/setup.py b/setup.py index 87445e411..00e19cdfb 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ 'hvac<0.7.0', 'jinja2', - 'juju<3.0', + 'juju<3.2', 'juju-wait', 'PyYAML', 'tenacity', diff --git a/test-requirements.txt b/test-requirements.txt index a4a067b23..e93a02978 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -17,5 +17,5 @@ keystoneauth1 oslo.config python-novaclient tenacity -# pinned until 3.0 regressions are handled: https://github.com/openstack-charmers/zaza/issues/545 -juju<3.0 +# Fix upper version to ensure compatibility with Juju 3.1 +juju<3.2 diff --git a/unit_tests/test_zaza_model.py b/unit_tests/test_zaza_model.py index 0c8cf962e..8fa8f3316 100644 --- a/unit_tests/test_zaza_model.py +++ b/unit_tests/test_zaza_model.py @@ -134,6 +134,9 @@ async def _add_unit(count=1, to=None): async def _destroy_unit(*unitnames): return + async def _scale(scale=None, scale_change=None): + return + def _is_leader(leader): async def _inner_is_leader(): return leader @@ -142,6 +145,7 @@ async def _inner_is_leader(): self.run_action = mock.MagicMock() self.run_action.wait.side_effect = _wait self.action = mock.MagicMock() + self.action.wait.side_effect = _wait self.action.data = { 'model-uuid': '1a035018-71ff-473e-8aab-d1a8d6b6cda7', 'id': 'e26ffb69-6626-4e93-8840-07f7e041e99d', @@ -155,6 +159,9 @@ async def _inner_is_leader(): 'enqueued': '2018-04-11T23:13:42Z', 'started': '2018-04-11T23:13:42Z', 'completed': '2018-04-11T23:13:43Z'} + self.action.results = { + 'return-code': '0', 'stderr': '', 'stdout': 'RESULT' + } self.machine3 = mock.MagicMock(status='active') self.machine7 = mock.MagicMock(status='active') @@ -209,6 +216,7 @@ def fail_on_use(): _units.destroy_relation.side_effect = _destroy_relation _units.add_unit.side_effect = _add_unit _units.destroy_unit.side_effect = _destroy_unit + _units.scale.side_effect = _scale self.mymodel = mock.MagicMock() self.mymodel.applications = { @@ -493,7 +501,9 @@ def test_block_until_auto_reconnect_model_disconnected_sync(self): with mock.patch.object(zaza, 'RUN_LIBJUJU_IN_THREAD', new=False): model.sync_wrapper(self._wrapper)() self.Model_mock.disconnect.assert_has_calls([mock.call()]) - self.Model_mock.connect_model.has_calls([mock.call('modelname')]) + self.Model_mock.connect_model.assert_has_calls( + [mock.call('testmodel')] + ) def test_block_until_auto_reconnect_model_disconnected_async(self): self._mocks_for_block_until_auto_reconnect_model( @@ -506,7 +516,9 @@ async def _async_true(): with mock.patch.object(zaza, 'RUN_LIBJUJU_IN_THREAD', new=False): model.sync_wrapper(self._wrapper)() self.Model_mock.disconnect.assert_has_calls([mock.call()]) - self.Model_mock.connect_model.has_calls([mock.call('modelname')]) + self.Model_mock.connect_model.assert_has_calls( + [mock.call('testmodel')] + ) def test_block_until_auto_reconnect_model_blocks_till_true(self): self._mocks_for_block_until_auto_reconnect_model(True, True) @@ -775,7 +787,47 @@ def test_run_on_unit(self): expected) self.unit1.run.assert_called_once_with(cmd, timeout=None) + def test_run_on_unit_juju2_x(self): + del self.action.results + self.patch_object(model, 'get_juju_model', return_value='mname') + expected = { + 'Code': '0', + 'Stderr': '', + 'Stdout': 'RESULT', + 'stderr': '', + 'stdout': 'RESULT'} + self.cmd = cmd = 'somecommand someargument' + self.patch_object(model, 'Model') + self.patch_object(model, 'get_unit_from_name') + self.get_unit_from_name.return_value = self.unit1 + self.Model.return_value = self.Model_mock + self.assertEqual(model.run_on_unit('app/2', cmd), + expected) + self.unit1.run.assert_called_once_with(cmd, timeout=None) + def test_run_on_unit_lc_keys(self): + self.patch_object(model, 'get_juju_model', return_value='mname') + self.action.results = { + 'return-code': '0', + 'stdout': 'RESULT', + 'stderr': 'some error'} + expected = { + 'Code': '0', + 'Stderr': 'some error', + 'Stdout': 'RESULT', + 'stderr': 'some error', + 'stdout': 'RESULT'} + self.cmd = cmd = 'somecommand someargument' + self.patch_object(model, 'Model') + self.patch_object(model, 'get_unit_from_name') + self.get_unit_from_name.return_value = self.unit1 + self.Model.return_value = self.Model_mock + self.assertEqual(model.run_on_unit('app/2', cmd), + expected) + self.unit1.run.assert_called_once_with(cmd, timeout=None) + + def test_run_on_unit_lc_keys_juju2_x(self): + del self.action.results self.patch_object(model, 'get_juju_model', return_value='mname') self.action.data['results'] = { 'Code': '0', @@ -797,6 +849,25 @@ def test_run_on_unit_lc_keys(self): self.unit1.run.assert_called_once_with(cmd, timeout=None) def test_run_on_unit_missing_stderr(self): + self.patch_object(model, 'get_juju_model', return_value='mname') + expected = { + 'Code': '0', + 'Stderr': '', + 'Stdout': 'RESULT', + 'stderr': '', + 'stdout': 'RESULT'} + self.action.results = {'return-code': '0', 'stdout': 'RESULT'} + self.cmd = cmd = 'somecommand someargument' + self.patch_object(model, 'Model') + self.patch_object(model, 'get_unit_from_name') + self.get_unit_from_name.return_value = self.unit1 + self.Model.return_value = self.Model_mock + self.assertEqual(model.run_on_unit('app/2', cmd), + expected) + self.unit1.run.assert_called_once_with(cmd, timeout=None) + + def test_run_on_unit_missing_stderr_juju2_x(self): + del self.action.results self.patch_object(model, 'get_juju_model', return_value='mname') expected = { 'Code': '0', @@ -829,6 +900,22 @@ def test_run_on_leader(self): expected) self.unit2.run.assert_called_once_with(cmd, timeout=None) + def test_run_on_leader_juju2_x(self): + del self.action.results + self.patch_object(model, 'get_juju_model', return_value='mname') + expected = { + 'Code': '0', + 'Stderr': '', + 'Stdout': 'RESULT', + 'stderr': '', + 'stdout': 'RESULT'} + self.cmd = cmd = 'somecommand someargument' + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + self.assertEqual(model.run_on_leader('app', cmd), + expected) + self.unit2.run.assert_called_once_with(cmd, timeout=None) + def test_get_relation_id(self): self.patch_object(model, 'get_juju_model', return_value='mname') self.patch_object(model, 'Model') @@ -891,6 +978,36 @@ def test_destroy_unit_wait(self): self.async_block_until_unit_count.assert_called_once_with( 'app', 1, model_name=None) + def test_scale_out(self): + self.patch_object(model, 'get_juju_model', return_value='mname') + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + model.scale('app', scale=3) + self.mymodel.applications['app'].scale.assert_called_once_with( + scale=3, scale_change=None) + + def test_scale_wait(self): + self.patch_object(model, 'async_block_until_unit_count') + self.patch_object(model, 'get_juju_model', return_value='mname') + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + model.scale('app', scale=3, wait=True) + self.mymodel.applications['app'].scale.assert_called_once_with( + scale=3, scale_change=None) + self.async_block_until_unit_count.assert_called_once_with( + 'app', 3, model_name=None) + + def test_scale_back_wait(self): + self.patch_object(model, 'async_block_until_unit_count') + self.patch_object(model, 'get_juju_model', return_value='mname') + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + model.scale('app', scale_change=-1, wait=True) + self.mymodel.applications['app'].scale.assert_called_once_with( + scale=None, scale_change=-1) + self.async_block_until_unit_count.assert_called_once_with( + 'app', 1, model_name=None) + def test_get_relation_id_interface(self): self.patch_object(model, 'get_juju_model', return_value='mname') self.patch_object(model, 'Model') @@ -1005,7 +1122,7 @@ async def _async_get_unit_from_name(x, *args): return units[x] self.async_get_unit_from_name.side_effect = _async_get_unit_from_name - self.run_action.status = 'completed' + self.run_action.data = {'status': 'completed'} model.run_action_on_units( ['app/1', 'app/2'], 'backup', @@ -1023,7 +1140,7 @@ def test_run_action_on_units_timeout(self): self.Model.return_value = self.Model_mock self.patch_object(model, 'get_unit_from_name') self.get_unit_from_name.return_value = self.unit1 - self.run_action.status = 'running' + self.run_action.data = {'status': 'running'} with self.assertRaises(AsyncTimeoutError): model.run_action_on_units( ['app/2'], @@ -1041,7 +1158,7 @@ async def _fake_get_action_output(_): self.Model.return_value = self.Model_mock self.patch_object(model, 'get_unit_from_name') self.get_unit_from_name.return_value = self.unit1 - self.run_action.status = 'failed' + self.run_action.data = {'status': 'failed'} with self.assertRaises(model.ActionFailed): model.run_action_on_units( ['app/2'], @@ -1498,6 +1615,25 @@ def test_get_current_model(self): self.assertEqual(model.get_current_model(), self.model_name) def test_file_contents_success(self): + self.action.results = { + 'return-code': '0', + 'stderr': '', + 'stdout': 'somestring' + } + + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + self.patch_object(model, 'get_juju_model', return_value='mname') + contents = model.file_contents( + 'app/2', + '/tmp/src/myfile.txt', + timeout=0.1) + self.unit1.run.assert_called_once_with( + 'cat /tmp/src/myfile.txt', timeout=0.1) + self.assertEqual('somestring', contents) + + def test_file_contents_success_juju2_x(self): + del self.action.results self.action.data = { 'results': {'Code': '0', 'Stderr': '', 'Stdout': 'somestring'} } @@ -1514,6 +1650,23 @@ def test_file_contents_success(self): self.assertEqual('somestring', contents) def test_file_contents_fault(self): + self.action.results = { + 'return-code': '0', + 'stderr': 'fault', + 'stdout': '' + } + + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + self.patch_object(model, 'get_juju_model', return_value='mname') + with self.assertRaises(model.RemoteFileError) as ctxtmgr: + model.file_contents('app/2', '/tmp/src/myfile.txt', timeout=0.1) + self.unit1.run.assert_called_once_with( + 'cat /tmp/src/myfile.txt', timeout=0.1) + self.assertEqual(ctxtmgr.exception.args, ('fault',)) + + def test_file_contents_fault_juju2_x(self): + del self.action.results self.action.data = { 'results': {'Code': '0', 'Stderr': 'fault', 'Stdout': ''} } @@ -1528,6 +1681,33 @@ def test_file_contents_fault(self): self.assertEqual(ctxtmgr.exception.args, ('fault',)) def test_block_until_file_has_contents(self): + self.action.results = { + 'return-code': '0', + 'stderr': '', + 'stdout': 'somestring' + } + + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + self.patch_object(model, 'get_juju_model', return_value='mname') + self.patch("builtins.open", + new_callable=mock.mock_open(), + name="_open") + _fileobj = mock.MagicMock() + _fileobj.__enter__().read.return_value = "somestring" + self._open.return_value = _fileobj + model.block_until_file_has_contents( + 'app', + '/tmp/src/myfile.txt', + 'somestring', + timeout=0.1) + self.unit1.run.assert_called_once_with( + 'cat /tmp/src/myfile.txt') + self.unit2.run.assert_called_once_with( + 'cat /tmp/src/myfile.txt') + + def test_block_until_file_has_contents_juju2_x(self): + del self.action.results self.action.data = { 'results': {'Code': '0', 'Stderr': '', 'Stdout': 'somestring'} } @@ -1552,6 +1732,29 @@ def test_block_until_file_has_contents(self): 'cat /tmp/src/myfile.txt') def test_block_until_file_has_no_contents(self): + self.action.results = {'return-code': '0', 'stderr': ''} + + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + self.patch_object(model, 'get_juju_model', return_value='mname') + self.patch("builtins.open", + new_callable=mock.mock_open(), + name="_open") + _fileobj = mock.MagicMock() + _fileobj.__enter__().read.return_value = "" + self._open.return_value = _fileobj + model.block_until_file_has_contents( + 'app', + '/tmp/src/myfile.txt', + '', + timeout=0.1) + self.unit1.run.assert_called_once_with( + 'cat /tmp/src/myfile.txt') + self.unit2.run.assert_called_once_with( + 'cat /tmp/src/myfile.txt') + + def test_block_until_file_has_no_contents_juju2_x(self): + del self.action.results self.action.data = { 'results': {'Code': '0', 'Stderr': ''} } @@ -1597,6 +1800,19 @@ def test_block_until_file_missing(self): self.patch_object(model, 'Model') self.Model.return_value = self.Model_mock self.patch_object(model, 'get_juju_model', return_value='mname') + self.action.results = {'stdout': "1"} + model.block_until_file_missing( + 'app', + '/tmp/src/myfile.txt', + timeout=0.1) + self.unit1.run.assert_called_once_with( + 'test -e "/tmp/src/myfile.txt"; echo $?') + + def test_block_until_file_missing_juju2_x(self): + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + self.patch_object(model, 'get_juju_model', return_value='mname') + del self.action.results self.action.data['results']['Stdout'] = "1" model.block_until_file_missing( 'app', @@ -1617,6 +1833,27 @@ def test_block_until_file_missing_isnt_missing(self): timeout=0.1) def test_block_until_file_matches_re(self): + self.action.results = { + 'return-code': '0', + 'stderr': '', + 'stdout': 'somestring' + } + + self.patch_object(model, 'Model') + self.Model.return_value = self.Model_mock + self.patch_object(model, 'get_juju_model', return_value='mname') + model.block_until_file_matches_re( + 'app', + '/tmp/src/myfile.txt', + 's.*string', + timeout=0.1) + self.unit1.run.assert_called_once_with( + 'cat /tmp/src/myfile.txt') + self.unit2.run.assert_called_once_with( + 'cat /tmp/src/myfile.txt') + + def test_block_until_file_matches_re_juju2_x(self): + del self.action.results self.action.data = { 'results': {'Code': '0', 'Stderr': '', 'Stdout': 'somestring'} } @@ -1898,6 +2135,24 @@ async def _run_on_unit( def block_until_oslo_config_entries_match_base(self, file_contents, expected_contents): + self.action.results = { + 'return-code': '0', + 'stderr': '', + 'stdout': file_contents + } + self.patch_object(model, 'Model') + self.patch_object(model, 'get_juju_model', return_value='mname') + self.Model.return_value = self.Model_mock + model.block_until_oslo_config_entries_match( + 'app', + '/tmp/src/myfile.txt', + expected_contents, + timeout=0.1) + + def block_until_oslo_config_entries_match_base_juju2_x(self, + file_contents, + expected_contents): + del self.action.results self.action.data = { 'results': {'Code': '0', 'Stderr': '', 'Stdout': file_contents} } diff --git a/unit_tests/utilities/test_zaza_utilities_generic.py b/unit_tests/utilities/test_zaza_utilities_generic.py index 5d3fb7759..038d53e0b 100644 --- a/unit_tests/utilities/test_zaza_utilities_generic.py +++ b/unit_tests/utilities/test_zaza_utilities_generic.py @@ -254,7 +254,7 @@ def test_series_upgrade(self): _unit, _machine_num, origin=_origin, to_series=_to_series, from_series=_from_series, workaround_script=_workaround_script, files=_files) - self.block_until_all_units_idle.called_with() + self.block_until_all_units_idle.assert_called_with() self.prepare_series_upgrade.assert_called_once_with( _machine_num, to_series=_to_series) self.wrap_do_release_upgrade.assert_called_once_with( diff --git a/zaza/charm_lifecycle/deploy.py b/zaza/charm_lifecycle/deploy.py index dc2d7b301..5ea880be7 100755 --- a/zaza/charm_lifecycle/deploy.py +++ b/zaza/charm_lifecycle/deploy.py @@ -339,7 +339,7 @@ def deploy_bundle(bundle, model, model_ctxt=None, force=False, trust=False): if trust: cmd.append('--trust') bundle_out = None - with tempfile.TemporaryDirectory() as tmpdirname: + with tempfile.TemporaryDirectory(dir=os.environ['HOME']) as tmpdirname: # Bundle templates should only exist in the bundle directory so # explicitly set the Jinja2 load path. bundle_template = get_template( diff --git a/zaza/model.py b/zaza/model.py index e5539e15e..002edbc1a 100644 --- a/zaza/model.py +++ b/zaza/model.py @@ -545,6 +545,9 @@ def _normalise_action_results(results): results[old_key] = results[key] elif results.get(old_key) and not results.get(key): results[key] = results[old_key] + if 'return-code' in results: + results['Code'] = str(results.get('return-code')) + del results['return-code'] return results else: return {} @@ -567,7 +570,12 @@ async def async_run_on_unit(unit_name, command, model_name=None, timeout=None): model = await get_model(model_name) unit = await async_get_unit_from_name(unit_name, model) action = await unit.run(command, timeout=timeout) - results = action.data.get('results') + await action.wait() + results = None + try: + results = action.results + except (AttributeError, KeyError): + results = action.data.get('results') return _normalise_action_results(results) run_on_unit = sync_wrapper(async_run_on_unit) @@ -593,7 +601,12 @@ async def async_run_on_leader(application_name, command, model_name=None, is_leader = await unit.is_leader_from_status() if is_leader: action = await unit.run(command, timeout=timeout) - results = action.data.get('results') + await action.wait() + results = None + try: + results = action.results + except (AttributeError, KeyError): + results = action.data.get('results') return _normalise_action_results(results) run_on_leader = sync_wrapper(async_run_on_leader) @@ -1071,6 +1084,32 @@ def __init__(self, action, output=None): super(ActionFailed, self).__init__(message) +def _normalise_action_object(action_obj): + """Put run action results in a consistent format. + + Prior to libjuju 3.x, action status and results are + in obj.data['status'] and obj.data['results']. + From libjuju 3.x, status and results are modified + to obj._status and obj.results. + This functiona normalises the status to + obj.data['status'] and results to obj.data['results'] + + :param action_obj: action object + :type results: juju.action.Action + :returns: Updated action object + :rtype: juju.action.Action + """ + try: + # libjuju 3.x + action_obj.data['status'] = action_obj._status + action_obj.data['results'] = action_obj.results + except (AttributeError, KeyError): + # libjuju 2.x format, no changes needed + pass + + return action_obj + + async def async_run_action(unit_name, action_name, model_name=None, action_params=None, raise_on_failure=False): """Run action on given unit. @@ -1096,6 +1135,7 @@ async def async_run_action(unit_name, action_name, model_name=None, unit = await async_get_unit_from_name(unit_name, model) action_obj = await unit.run_action(action_name, **action_params) await action_obj.wait() + action_obj = _normalise_action_object(action_obj) if raise_on_failure and action_obj.status != 'completed': try: output = await model.get_action_output(action_obj.id) @@ -1136,6 +1176,7 @@ async def async_run_action_on_leader(application_name, action_name, action_obj = await unit.run_action(action_name, **action_params) await action_obj.wait() + action_obj = _normalise_action_object(action_obj) if raise_on_failure and action_obj.status != 'completed': try: output = await model.get_action_output(action_obj.id) @@ -1183,14 +1224,14 @@ async def async_run_action_on_units(units, action_name, action_params=None, async def _check_actions(): for action_obj in actions: - if action_obj.status in ['running', 'pending']: + if action_obj.data['status'] in ['running', 'pending']: return False return True await async_block_until(_check_actions, timeout=timeout) for action_obj in actions: - if raise_on_failure and action_obj.status != 'completed': + if raise_on_failure and action_obj.data['status'] != 'completed': try: output = await model.get_action_output(action_obj.id) except KeyError: @@ -2049,7 +2090,13 @@ async def _check_file(): for unit in units: try: output = await unit.run('cat {}'.format(remote_file)) - contents = output.data.get('results').get('Stdout', '') + await output.wait() + results = {} + try: + results = output.results + except (AttributeError, KeyError): + results = output.data.get('results', {}) + contents = results.get('Stdout', results.get('stdout', '')) if inspect.iscoroutinefunction(check_function): if not await check_function(contents): return False @@ -2184,7 +2231,15 @@ async def _check_for_file(model): for unit in units: try: output = await unit.run('test -e "{}"; echo $?'.format(path)) - contents = output.data.get('results')['Stdout'] + await output.wait() + output_result = {} + try: + output_result = output.results + except (AttributeError, KeyError): + output_result = output.data.get('results', {}) + contents = output_result.get( + 'Stdout', output_result.get('stdout', '') + ) results.append("1" in contents) # libjuju throws a generic error for connection failure. So we # cannot differentiate between a connectivity issue and a @@ -2725,6 +2780,40 @@ async def async_destroy_unit(application_name, *unit_names, model_name=None, destroy_unit = sync_wrapper(async_destroy_unit) +async def async_scale(application_name, scale=None, scale_change=None, + wait=False, model_name=None): + """ + Set or adjust the scale of this (K8s) application. + + :param application_name: Name of application to add unit(s) to + :type application_name: str + :param scale: Scale to which to set this application. + :type scale: int + :param scale_change: Amount by which to adjust the scale of this + application (can be positive or negative). + :type scale_change: int + :param wait: Whether to wait for the unit change to appear in juju + status + :type wait: bool + :param model_name: Name of model to operate on. + :type model_name: str + """ + model = await get_model(model_name) + app = model.applications[application_name] + await app.scale(scale=scale, scale_change=scale_change) + if wait: + if scale: + target_count = scale + else: + target_count = len(app.units) + scale_change + await async_block_until_unit_count( + application_name, + target_count, + model_name=model_name) + +scale = sync_wrapper(async_scale) + + def set_model_constraints(constraints, model_name=None): """ Set constraints on a model. diff --git a/zaza/utilities/deployment_env.py b/zaza/utilities/deployment_env.py index 543051c33..f08dc63cf 100644 --- a/zaza/utilities/deployment_env.py +++ b/zaza/utilities/deployment_env.py @@ -46,7 +46,7 @@ MODEL_DEFAULTS = { # Model defaults from charm-test-infra # https://jujucharms.com/docs/2.1/models-config - 'default-series': 'xenial', + 'default-series': 'focal', 'image-stream': 'daily', 'test-mode': 'true', 'transmit-vendor-metrics': 'false',