diff --git a/.github/workflows/issues.yaml b/.github/workflows/issues.yaml deleted file mode 100644 index 6f2dfca2..00000000 --- a/.github/workflows/issues.yaml +++ /dev/null @@ -1,14 +0,0 @@ -name: Issues Sync -on: - issues: - types: [opened] -jobs: - sync-issues: - runs-on: ubuntu-latest - steps: - - uses: alex-page/github-project-automation-plus@v0.8.1 - with: - project: 'Automation and Tooling' - column: 'To Do' - repo-token: ${{ secrets.PROJ_MNG }} - diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml deleted file mode 100644 index d21afffb..00000000 --- a/.github/workflows/lint.yaml +++ /dev/null @@ -1,36 +0,0 @@ -name: Linter -on: - pull_request: - branches: - - main - workflow_dispatch: - -jobs: - lint: - name: Lint bash scripts - runs-on: ubuntu-latest - steps: - - name: Install tools - run: sudo apt install --no-install-recommends --yes python3-bashate - - name: Get sources - uses: actions/checkout@v3 - - name: Lint bash scripts - run: | - bashate --ignore E006 --verbose openstack/tools/create-microceph-vm.sh - bashate --ignore E006 --verbose tools/*.sh tools/juju-lnav - bashate --ignore E006 --verbose openstack/novarc - bashate --ignore E006 --verbose common/ch_channel_map/*.sh - - check-commit-message: - name: Check Commit Message - runs-on: ubuntu-latest - steps: - - name: Get sources - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - name: Check Commit Message - run: | - ./tools/lint-git-messages.sh \ - ${{ github.event.pull_request.base.sha }} \ - ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/run-tests.yaml b/.github/workflows/run-tests.yaml new file mode 100644 index 00000000..20ae97bb --- /dev/null +++ b/.github/workflows/run-tests.yaml @@ -0,0 +1,47 @@ +# This is a templated file and must be kept up-to-date with the original +# from upstream at https://github.com/canonical/se-tooling-ci-common. +name: Run Tests +on: + - push + - pull_request + - workflow_dispatch + +jobs: + test: + strategy: + matrix: + python-version: ['3.8', '3.10', '3.12'] + os: [ubuntu-24.04, ubuntu-22.04, ubuntu-20.04] + exclude: + - os: ubuntu-20.04 + python-version: '3.10' + - os: ubuntu-20.04 + python-version: '3.12' + - os: ubuntu-22.04 + python-version: '3.8' + - os: ubuntu-22.04 + python-version: '3.12' + - os: ubuntu-24.04 + python-version: '3.8' + - os: ubuntu-24.04 + python-version: '3.10' + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r gh-test-requirements.txt + - name: Run pylint + run: tox -e pylint + if: matrix.python-version == '3.10' + - name: Run pep8 + run: tox -e pep8 + if: matrix.python-version == '3.10' + - name: Run bashate + run: tox -e bashate + if: matrix.python-version == '3.10' diff --git a/.gitignore b/.gitignore index 496748aa..1d804f8f 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ b bundles ssl/*/ microk8s/dockercreds +__pycache__ diff --git a/.module_defaults/cos b/.module_defaults/cos new file mode 120000 index 00000000..6377437c --- /dev/null +++ b/.module_defaults/cos @@ -0,0 +1 @@ +../cos/module_defaults \ No newline at end of file diff --git a/common/ceph_release_info b/common/ceph_release_info index 59904eac..8fde4cc4 100644 --- a/common/ceph_release_info +++ b/common/ceph_release_info @@ -13,5 +13,6 @@ export -A APP_RELEASE_NAMES=( [firefly]=icehouse [octopus]=ussuri [pacific]=wallaby # xena and wallaby both have pacific [quincy]=yoga - [reef]=bobcat ) # bobcat and caracal both have reef + [reef]=bobcat + [squid]=caracal ) diff --git a/common/ch_channel_map/any_series b/common/ch_channel_map/any_series index dd672f57..b2595b76 100644 --- a/common/ch_channel_map/any_series +++ b/common/ch_channel_map/any_series @@ -12,5 +12,9 @@ for c in ${CEPH_CHARMS[@]}; do CHARM_CHANNEL[$c]=$ceph_release/edge done +for c in ${IAM_CHARMS[@]}; do + CHARM_CHANNEL[$c]=latest/edge +done + CHARM_CHANNEL[pacemaker-remote]=${series}/edge CHARM_CHANNEL[microk8s]=1.28/stable diff --git a/common/ch_channel_map/jammy-caracal b/common/ch_channel_map/jammy-caracal new file mode 100644 index 00000000..1dc47f8a --- /dev/null +++ b/common/ch_channel_map/jammy-caracal @@ -0,0 +1,8 @@ +# These are charmhub mappings for charms that are outside of the main large +# groupings like openstack and ceph etc. + +# Versions are based on https://docs.openstack.org/charm-guide/latest/project/charm-delivery.html + +CHARM_CHANNEL[ovn-central]=24.03/candidate +CHARM_CHANNEL[ovn-chassis]=24.03/candidate +CHARM_CHANNEL[ovn-dedicated-chassis]=24.03/candidate diff --git a/common/charm_lists b/common/charm_lists index aa19f83f..97fd13d2 100644 --- a/common/charm_lists +++ b/common/charm_lists @@ -88,3 +88,14 @@ prometheus ro zookeeper ) + +declare -a IAM_CHARMS=( +hydra +identity-platform-login-ui-operator +kratos +kratos-external-idp-integrator +oathkeeper +postgresql-k8s +self-signed-certificates +traefik-k8s +) diff --git a/common/generate-bundle.sh b/common/generate-bundle.sh index 4722cfe1..48f4e78f 100755 --- a/common/generate-bundle.sh +++ b/common/generate-bundle.sh @@ -6,6 +6,7 @@ MOD_DIR=$(realpath $(dirname $0)) . $MOD_DIR/pipeline/02configure . $MOD_DIR/pipeline/03build # Ensure no unrendered variables -out="`grep -r __ $MOD_DIR/b/${MASTER_OPTS[BUNDLE_NAME]} --exclude=config --exclude-dir=p| egrep -v '^.*#'`" || exit 0 +out="`grep -r __ $MOD_DIR/b/${MASTER_OPTS[BUNDLE_NAME]} --exclude=config \ + --exclude-dir=p| egrep -v '^.*#'`" || exit 0 echo -e "ERROR: there are unrendered variables in your bundle:\n$out" exit 1 diff --git a/common/generate_bundle_base b/common/generate_bundle_base index 66040dce..e2e6e633 100644 --- a/common/generate_bundle_base +++ b/common/generate_bundle_base @@ -9,13 +9,17 @@ declare -a overlay_opts=() # Catch any changes/additions to master opts update_master_opts ${MOD_PASSTHROUGH_OPTS[@]} +type=$(get_cloud_type) vip_start=${MASTER_OPTS[VIP_ADDR_START]} -if [[ -z $vip_start ]] && [[ -e ~/novarc ]]; then +if [[ -z $vip_start ]] && [[ -e ~/novarc ]] && [[ $type = openstack ]]; then # prodstack - cidr=$(source ~/novarc; openstack subnet show subnet_${OS_USERNAME}-psd -c cidr -f value 2>/dev/null) + cidr=$(source ~/novarc; openstack subnet show subnet_${OS_USERNAME}-psd \ + -c cidr -f value 2>/dev/null) if [[ -z $cidr ]]; then # stsstack - cidr=$(source ~/novarc; openstack subnet show ${OS_USERNAME}_admin_subnet -c cidr -f value 2>/dev/null) + cidr=$(source ~/novarc; openstack subnet show \ + ${OS_USERNAME}_admin_subnet \ + -c cidr -f value 2>/dev/null) if [[ -n $cidr ]]; then vip_start=$(echo $cidr| sed -r 's/([0-9]+\.[0-9]+).+/\1/g').150.0 fi @@ -24,7 +28,9 @@ if [[ -z $vip_start ]] && [[ -e ~/novarc ]]; then # last 20 addresses for vips which is prone to collisions but # we have no alternative currently. net_end=$(awk -F'.' '/HostMax/{print $NF}' <<<$(ipcalc -b $cidr)) - vip_start=$(echo $cidr| sed -r 's/([0-9]+\.[0-9]+\.[0-9]+).+/\1/g').$((net_end - 19)) + vip_start=$(echo $cidr| + sed -r 's/([0-9]+\.[0-9]+\.[0-9]+).+/\1/g').$((net_end - + 19)) fi fi VIP_START_PREFIX=${vip_start%\.*} @@ -42,6 +48,12 @@ if [ -n "${MASTER_OPTS[MODEL_CONFIG]}" ]; then juju model-config ${MASTER_OPTS[MODEL_CONFIG]} fi +if [ -n "${MASTER_OPTS[MODEL_CONSTRAINTS]}" ]; then + juju set-model-constraints ${MASTER_OPTS[MODEL_CONSTRAINTS]} +elif ! ${MASTER_OPTS[HYPERCONVERGED_DEPLOYMENT]}; then + juju set-model-constraints root-disk-source=volume root-disk=20G +fi + if has_opt --list; then state_root=`get_bundle_state_root` if [ -d "$state_root" ]; then @@ -61,7 +73,8 @@ finish () { local target - echo "${MOD_DIR}/generate-bundle.sh ${CACHED_STDIN[@]}" > ${bundles_dir}/generate-command + echo "${MOD_DIR}/generate-bundle.sh ${CACHED_STDIN[@]}" > \ + ${bundles_dir}/generate-command if has_opt --replay; then target=${bundles_dir}/command echo -e "INFO: replaying last known command (from $target)\n" @@ -125,7 +138,9 @@ if ((${#MOD_OVERLAYS[@]})); then unset subdir fi cp $MOD_DIR/overlays/$overlay $bundles_dir/o/${subdir:-""} - ((${#overlay_opts[@]}==0)) && overlay_opts+=("") # left padding + if ((${#overlay_opts[@]}==0)); then + overlay_opts+=("") # left padding + fi overlay_opts+=( --overlay $bundles_dir/o/$overlay ) overlay_list+=( "$bundles_dir/o/$overlay" ) render $bundles_dir/o/$overlay @@ -154,20 +169,27 @@ aggregate_mysql_interface_parts $bundles_dir/o # generate bundle list base_bundle=$bundles_dir/`basename $bundle` overlay_list+=( $base_bundle ) -readarray -t application_list < <("$MOD_DIR/../tools/juju-bundle-applications.py" ${overlay_list[@]}) +readarray -t \ + application_list < <("$MOD_DIR/../tools/juju-bundle-applications.py" \ + ${overlay_list[@]}) # Generate placement overlay for use with MAAS provider if ${MASTER_OPTS[HYPERCONVERGED_DEPLOYMENT]}; then - cp $MOD_DIR/overlays/unit_placement/header.yaml.template $bundles_dir/unit-placement.yaml + cp $MOD_DIR/overlays/unit_placement/header.yaml.template \ + $bundles_dir/unit-placement.yaml if [[ $MOD_NAME = openstack ]]; then # these two represent total machines since e.g. ceph is deployed on compute but compute and gateway are never colocated - num_placement_machines=$((${MOD_PARAMS[__NUM_NEUTRON_GATEWAY_UNITS__]}+${MOD_PARAMS[__NUM_COMPUTE_UNITS__]})) + num_placement_machines=$((${MOD_PARAMS[__NUM_NEUTRON_GATEWAY_UNITS__]}+ + ${MOD_PARAMS[__NUM_COMPUTE_UNITS__]})) elif [[ $MOD_NAME = kubernetes ]]; then - num_placement_machines=$((${MOD_PARAMS[__NUM_K8S_CONTROL_PLANE_UNITS__]}+${MOD_PARAMS[__NUM_K8S_WORKER_UNITS__]})) - elif [[ $MOD_NAME = ceph ]]; then + num_placement_machines=\ + $((${MOD_PARAMS[__NUM_K8S_CONTROL_PLANE_UNITS__]}+ + ${MOD_PARAMS[__NUM_K8S_WORKER_UNITS__]})) + elif [[ $MOD_NAME = ceph ]] || [[ $MOD_NAME = cos ]]; then num_placement_machines=$((${MOD_PARAMS[__NUM_CEPH_OSD_UNITS__]})) else - echo "ERROR: module '$MOD_NAME' does not yet have support for hyperconverged mode" 1>&2 + echo -n "ERROR: module '$MOD_NAME'" 1>&2 + echo " does not yet have support for hyperconverged mode" 1>&2 exit 1 fi # detect all apps used and generate placement info by doing: @@ -175,33 +197,41 @@ if ${MASTER_OPTS[HYPERCONVERGED_DEPLOYMENT]}; then # * search unit_placement template with same name for app in ${application_list[@]}; do # filter juju keywords - [[ $app == "options" ]] || [[ $app == "to" ]] || [[ $app == "storage" ]] && continue + [[ $app =~ ^(options|to|storage)$ ]] && continue app_placement=${app}.yaml t=$MOD_DIR/overlays/unit_placement/${app_placement}.template [ -r "$t" ] || continue # load template cp $t $PLACEMENT_OVERLAYS_DIR/$app_placement # apply all renderers - render_placement_units_lxd $PLACEMENT_OVERLAYS_DIR/$app_placement $num_placement_machines - render_placement_units_metal $PLACEMENT_OVERLAYS_DIR/$app_placement $num_placement_machines + render_placement_units_lxd $PLACEMENT_OVERLAYS_DIR/$app_placement \ + $num_placement_machines + render_placement_units_metal $PLACEMENT_OVERLAYS_DIR/$app_placement \ + $num_placement_machines render $PLACEMENT_OVERLAYS_DIR/$app_placement # add to master placement overlay - cat $PLACEMENT_OVERLAYS_DIR/$app_placement >> $bundles_dir/unit-placement.yaml + cat $PLACEMENT_OVERLAYS_DIR/$app_placement >> \ + $bundles_dir/unit-placement.yaml done # finally render master machine list - render_placement_machines $bundles_dir/unit-placement.yaml $num_placement_machines + render_placement_machines $bundles_dir/unit-placement.yaml \ + $num_placement_machines render $bundles_dir/unit-placement.yaml # and add to list of overlays overlay_opts+=( --overlay $bundles_dir/unit-placement.yaml ) # add default binding to all applications bindings="\ bindings:\n '': ${MASTER_OPTS[DEFAULT_BINDING]}" - find $bundles_dir -name \*.yaml| xargs -l sed -i -r "/^\s+charm:.+/a$bindings" + find $bundles_dir -name \*.yaml|\ + xargs -l sed -i -r "/^\s+charm:.+/a$bindings" fi -((${#overlay_opts[@]})) && overlay_opts+=("") # right padding +if ((${#overlay_opts[@]})); then + overlay_opts+=("") # right padding +fi -echo -e "juju deploy${JUJU_DEPLOY_OPTS} ${base_bundle}${overlay_opts[@]:- }\n " > ${bundles_dir}/command +echo -e "juju deploy${JUJU_DEPLOY_OPTS} \ + ${base_bundle}${overlay_opts[@]:- }\n " > ${bundles_dir}/command finish for f in $INTERNAL_BUNDLE_CONFIG; do diff --git a/common/helpers b/common/helpers index a9efcae1..d3b7aa80 100644 --- a/common/helpers +++ b/common/helpers @@ -5,6 +5,7 @@ declare -A MASTER_OPTS=( [DEFAULT_BINDING]= [HYPERCONVERGED_DEPLOYMENT]=false [MODEL_CONFIG]='test-mode=true' + [MODEL_CONSTRAINTS]='' [BUNDLE_NAME]='' [CLOUD_NAME]='' [TARGET_RELEASE_NAME]='' @@ -42,6 +43,13 @@ list_overlays () } | sort -u } +get_cloud_type () +{ + local cloud=`juju show-model| sed -rn 's/.+cloud:\s*(.+).*/\1/p'| uniq` + local type=`juju show-cloud $cloud| sed -rn 's/^type:\s*(.+).*/\1/p'| uniq` + echo "$type" +} + _usage () { cat << EOF USAGE: `basename $0` OPTIONS [OVERLAYS] [MODULE_OPTS] @@ -181,6 +189,14 @@ filter_master_opts () fi shift ;; + --model-constraints) + if [ -z "${MASTER_OPTS[MODEL_CONSTRAINTS]}" ]; then + MASTER_OPTS[MODEL_CONSTRAINTS]="$2" + else + MASTER_OPTS[MODEL_CONSTRAINTS]+=" $2" + fi + shift + ;; --name|-n) # give bundle set a name and store under named dir MASTER_OPTS[BUNDLE_NAME]="${2%%:*}" @@ -782,8 +798,7 @@ ensure_model # Establish what cloud provider is in use and if it's MAAS, use unit placement. # NOTE: this has to be done AFTER the model has been created and we have # switched context. -cloud=`juju show-model| sed -rn 's/.+cloud:\s*(.+).*/\1/p'| uniq` -type=`juju show-cloud $cloud| sed -rn 's/^type:\s*(.+).*/\1/p'| uniq` +type=$(get_cloud_type) if [[ $type = maas ]]; then echo "INFO: maas provider detected - enabling hyperconverged deployment" MASTER_OPTS[HYPERCONVERGED_DEPLOYMENT]=true diff --git a/common/render.d/all b/common/render.d/all index c446453a..5b87e554 100644 --- a/common/render.d/all +++ b/common/render.d/all @@ -45,7 +45,8 @@ render_mod_params () if ((${#MOD_PARAMS[@]})); then for p in ${!MOD_PARAMS[@]}; do - echo -n "-e 's,$p,${MOD_PARAMS[$p]},g' " >> $config_renderer + # Escape any comma characters in the value, otherwise it breaks + echo -n "-e 's,$p,${MOD_PARAMS[$p]//,/\\,},g' " >> $config_renderer echo "${p}: \"${MOD_PARAMS[$p]}\"" >> $INTERNAL_BUNDLE_CONFIG done fi diff --git a/cos/common b/cos/common new file mode 120000 index 00000000..60d3b0a6 --- /dev/null +++ b/cos/common @@ -0,0 +1 @@ +../common \ No newline at end of file diff --git a/cos/configure b/cos/configure new file mode 100755 index 00000000..6266d2e3 --- /dev/null +++ b/cos/configure @@ -0,0 +1,48 @@ +#!/bin/bash -x +COS_MODEL=cos +SCRIPT_DIR=$(realpath $(dirname $0)) + +juju_run_cmd="juju run" +if (( $(juju --version | awk -F. {'print $1'}) > 2 )); then + juju_run_cmd="juju exec" +fi + +which kubectl || sudo snap install kubectl --classic +mkdir -p ~/.kube + +if $(juju list-models| egrep -q "^${COS_MODEL}\*"); then + echo "WARNING: currently in '$COS_MODEL' context - switch to microk8s model to re-run microk8s config" +else + mk8s_unit=$(juju status| sed -nr 's,(^microk8s/[[:digit:]]+)\*.*,\1,p') + $juju_run_cmd --unit $mk8s_unit microk8s.config > ~/.kube/config + $juju_run_cmd --unit $mk8s_unit -- 'IPADDR=$( ip r get 2.2.2.2| sed -rn "s/.+src ([0-9\.]+) .+/\1/p"); microk8s enable metallb:$IPADDR-$IPADDR' +fi + +kubectl get pods -A + +if ! $(juju list-clouds| egrep -q "^microk8s-cos"); then + KUBECONFIG=~/.kube/config juju add-k8s microk8s-cos --cluster-name=microk8s-cluster --client --controller ${OS_PROJECT_NAME/_/-} --storage=ceph-xfs +fi +if ! $(juju list-models| egrep -q "^${COS_MODEL}"); then + juju add-model $COS_MODEL microk8s-cos + juju deploy cos-lite --overlay ${SCRIPT_DIR}/overlays/cos/cos-lite-offers.yaml --trust +else + echo "INFO: model '$COS_MODEL' already exists - skipping deploy" + juju switch $COS_MODEL +fi + +juju wait-for application grafana + +set +x +echo "INFO: COS should now be reachable at the following endpoints:" +juju run traefik/0 show-proxied-endpoints --format=yaml| yq '."traefik/0".results."proxied-endpoints"' | jq + +GRAFANA_PASSWORD=$(juju run grafana/leader get-admin-password --model cos 2>/dev/null| sed -rn 's/admin-password:\s+(.+)/\1/p') +GRAFANA_USER=$(juju config grafana admin_user) +echo "Grafana login info: ${GRAFANA_USER}/$GRAFANA_PASSWORD" + +echo "INFO: run the following to consume COS from your microk8s model:" +echo "juju switch " +for offer in $(juju list-offers| tail -n+2| awk '{print $1}'); do + echo "juju consume ${COS_MODEL}.$offer" +done diff --git a/cos/cos.yaml.template b/cos/cos.yaml.template new file mode 100644 index 00000000..ca4d00ea --- /dev/null +++ b/cos/cos.yaml.template @@ -0,0 +1,17 @@ +series: __SERIES__ +machines: + '0': + constraints: __MACHINE1_CONSTRAINTS__ + series: __SERIES__ +applications: + microk8s: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__microk8s + num_units: 1 + expose: true + options: + containerd_http_proxy: __CONTAINERD_PROXY__ + containerd_https_proxy: __CONTAINERD_PROXY__ + containerd_no_proxy: __CONTAINERD_NO_PROXY__ + to: + - 0 + diff --git a/cos/generate-bundle.sh b/cos/generate-bundle.sh new file mode 120000 index 00000000..394558ee --- /dev/null +++ b/cos/generate-bundle.sh @@ -0,0 +1 @@ +common/generate-bundle.sh \ No newline at end of file diff --git a/cos/module_defaults b/cos/module_defaults new file mode 100644 index 00000000..8a37d73f --- /dev/null +++ b/cos/module_defaults @@ -0,0 +1,18 @@ +# This file must contain defaults for all variables used in bundles/overlays. +# They are used to render to final product in the event they are not provided +# elsewhere. It is inserted into the global context at the start of the +# pipeline. +# +# You can check that none are missing by running lint/check_var_defaults.sh +# + +MOD_PARAMS[__MICROK8S_CHANNEL__]="latest/edge" +MOD_PARAMS[__CONTAINERD_PROXY__]='' +MOD_PARAMS[__CONTAINERD_NO_PROXY__]='127.0.0.1,localhost,::1,10.149.0.0/16,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16' +MOD_PARAMS[__NUM_MICROK8S_UNITS__]=1 +MOD_PARAMS[__NUM_COS_PROXY_UNITS__]=1 +MOD_PARAMS[__NUM_CEPH_MON_UNITS__]=3 +MOD_PARAMS[__NUM_CEPH_OSD_UNITS__]=3 +MOD_PARAMS[__MACHINE1_CONSTRAINTS__]='mem=8G' +MOD_PARAMS[__MACHINE2_CONSTRAINTS__]='mem=8G' +MOD_PARAMS[__MACHINE3_CONSTRAINTS__]='mem=8G' diff --git a/cos/overlays b/cos/overlays new file mode 120000 index 00000000..0d44a21c --- /dev/null +++ b/cos/overlays @@ -0,0 +1 @@ +../overlays \ No newline at end of file diff --git a/cos/pipeline/00setup b/cos/pipeline/00setup new file mode 100644 index 00000000..fecb3e76 --- /dev/null +++ b/cos/pipeline/00setup @@ -0,0 +1,21 @@ +#!/bin/bash + +# Globals +export MOD_NAME=cos +export MOD_BASE_TEMPLATE=cos.yaml.template +export MOD_SSL_STATE_DIR=${MOD_NAME} +[ -n "${MASTER_OPTS[BUNDLE_NAME]}" ] && \ + MOD_SSL_STATE_DIR="${MOD_SSL_STATE_DIR}-${MASTER_OPTS[BUNDLE_NAME]}" + +# opts that 02configure does not recognise that get passed to the generator +export -a MOD_PASSTHROUGH_OPTS=() + +# Collection of messages to display at the end +export -A MOD_MSGS=() +# Use order 0 to ensure this is first displayed +MOD_MSGS[0_common.0]="run ./configure to initialise your deployment" + +# Array list of overlays to use with this deployment. +export -a MOD_OVERLAYS=() + +export -A MOD_PARAMS=() diff --git a/cos/pipeline/01import-config-defaults b/cos/pipeline/01import-config-defaults new file mode 100644 index 00000000..3dd4b529 --- /dev/null +++ b/cos/pipeline/01import-config-defaults @@ -0,0 +1,5 @@ +# Start with dependency defaults in case we want to override any locally +. $MOD_DIR/../.module_defaults/ceph + +# Current module imports +. $MOD_DIR/module_defaults diff --git a/cos/pipeline/02configure b/cos/pipeline/02configure new file mode 100644 index 00000000..9c874349 --- /dev/null +++ b/cos/pipeline/02configure @@ -0,0 +1,64 @@ +#!/bin/bash +# Global variables are first defined in 00setup and module +# dependencies are defined in 01import-config-defaults +# +# All overlay/bundle variables (MOD_PARAMS) defaults must go into +# the /module_defaults file. + +target=$series +[ -z "$pocket" ] || target=${target}-$pocket +target=${target}:${MOD_PARAMS[__MICROK8S_CHANNEL__]} +MOD_PASSTHROUGH_OPTS+=( --release-name $target ) + +# Automatically use proxy if in prodstack only +if $(timeout 1s getent hosts squid.internal &> /dev/null) && [ -z "${MOD_PARAMS[__CONTAINERD_PROXY__]}" ]; then + MOD_MSGS[1_proxy.0]='PROXY: squid.internal exists, setting containerd proxy to http://squid.internal:3128' + MOD_PARAMS[__CONTAINERD_PROXY__]=http://squid.internal:3128 +fi + +if ! has_opt --charmed-ceph-lxd && ! has_opt --microceph; then + MOD_OVERLAYS+=( "cos/charmed-ceph.yaml" ) + MOD_OVERLAYS+=( "cos/ceph-csi.yaml" ) +fi + +# Skip processing input if it includes exclusive passthrough options +! has_excl_passthrough_opt && \ +while (($# > 0)) +do + case "$1" in + --containerd-proxy) #__OPT__type: (default="" unless the hostname squid.internal resolves, then it's http://squid.internal:3128) + MOD_PARAMS[__CONTAINERD_PROXY__]=$2 + shift + ;; + --containerd-no-proxy) #__OPT__type: (default=127.0.0.1,localhost,::1,10.149.0.0/16,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16) + MOD_PARAMS[__CONTAINERD_NO_PROXY__]=$2 + shift + ;; + --microceph) + MOD_OVERLAYS+=( "cos/microceph.yaml" ) + ;; + --charmed-ceph-lxd) + MOD_OVERLAYS+=( "cos/charmed-ceph-lxd.yaml" ) + ;; + --cos-proxy) + MOD_OVERLAYS+=( "cos/cos-proxy.yaml" ) + MOD_MSGS[2_cos-proxy.0]='Once the COS deployment is complete you need to do the following:' + MOD_MSGS[2_cos-proxy.1]='juju relate grafana-dashboards:grafana-dashboard cos-proxy:downstream-grafana-dashboard' + MOD_MSGS[2_cos-proxy.2]='juju relate loki-logging:logging cos-proxy:downstream-logging' + MOD_MSGS[2_cos-proxy.3]='juju relate prometheus-scrape:metrics-endpoint cos-proxy:downstream-prometheus-scrape' + ;; + --filebeat) + MOD_OVERLAYS+=( "cos/cos-proxy-filebeat.yaml" ) + if ! has_opt --cos-proxy; then + set -- $@ --cos-proxy && cache $@ + fi + ;; + *) + echo "ERROR: invalid input '$1'" + _usage + exit 1 + ;; + esac + shift +done || true + diff --git a/cos/pipeline/03build b/cos/pipeline/03build new file mode 100644 index 00000000..62dd78f9 --- /dev/null +++ b/cos/pipeline/03build @@ -0,0 +1,5 @@ +#!/bin/bash +. $MOD_DIR/common/generate_bundle_base + +print_msgs + diff --git a/cos/resources/README_resources.md b/cos/resources/README_resources.md new file mode 100644 index 00000000..4261e46e --- /dev/null +++ b/cos/resources/README_resources.md @@ -0,0 +1,5 @@ +If you want to configure charm resources [1] in a bundle, put a directory in +this path with the name of the bundle/overlay they correspond to. Any files +within will then be copied into the generated bundles' path. + +[1] https://docs.jujucharms.com/using-resources-developer-guide diff --git a/gh-test-requirements.txt b/gh-test-requirements.txt new file mode 100644 index 00000000..eb28166c --- /dev/null +++ b/gh-test-requirements.txt @@ -0,0 +1 @@ +tox<4.0.0 diff --git a/identity-platform/authentik-values.yaml b/identity-platform/authentik-values.yaml new file mode 100644 index 00000000..d39baf87 --- /dev/null +++ b/identity-platform/authentik-values.yaml @@ -0,0 +1,21 @@ +authentik: + secret_key: "my-secure-secret-key" + error_reporting: + enabled: false + postgresql: + password: "my-secure-psql-password" + bootstrap_token: "my-secure-bootstrap-token" + bootstrap_password: "Passw0rd" +server: + ingress: + ingressClassName: nginx + enabled: false + hosts: + - authentik.secloud +postgresql: + enabled: true + auth: + password: "my-secure-psql-password" +redis: + enabled: true + diff --git a/identity-platform/common b/identity-platform/common new file mode 120000 index 00000000..60d3b0a6 --- /dev/null +++ b/identity-platform/common @@ -0,0 +1 @@ +../common \ No newline at end of file diff --git a/identity-platform/configure b/identity-platform/configure new file mode 100755 index 00000000..53c2d7dc --- /dev/null +++ b/identity-platform/configure @@ -0,0 +1,74 @@ +#!/bin/bash + +# Reset +if [[ "$1" == "reset" ]]; then + helm uninstall authentik -n authentik + kubectl delete pvc -n authentik data-authentik-postgresql-0 redis-data-authentik-redis-master-0 +fi + +# Check for kratos-external-idp-integrator +if [ "$(juju status --format json| jq -r '.applications["kratos-external-idp-integrator"].units|to_entries[]|select(.value["leader"])|.key' 2> /dev/null)" == "" ]; then + echo 'ERROR: Cannot configure OIDC without kratos-external-idp-integrator!' + exit 1 +fi + +# Install Helm +if ! snap list | grep -q helm; then + sudo snap install helm --classic +fi + +# Install authentik +kubectl get ns authentik &> /dev/null || kubectl create ns authentik +https_proxy=http://squid.internal:3128 helm repo add authentik https://charts.goauthentik.io +https_proxy=http://squid.internal:3128 helm repo update +helm install authentik authentik/authentik -f ./authentik-values.yaml -n authentik --version 2024.10.1 + +timeout=0 +echo 'Waiting for Authentik to start...' +up=0 +while [[ "$up" != 1 ]]; do + up="$(kubectl get deploy -n authentik authentik-server -o json | jq '.status.readyReplicas')" + if [[ $timeout == 600 ]]; then + echo 'ERROR: Authentik failed to start.' + exit 1 + fi + sleep 1 + ((timeout++)) +done + +# Prepare port for API calls and wait +kubectl patch svc -n authentik authentik-server -p '{"spec": {"type": "NodePort"}}' || exit 1 +AUTH_PORT=$(kubectl get svc -n authentik authentik-server -o jsonpath='{.spec.ports[].nodePort}') +AUTH_IP=$(kubectl get po -n authentik -o json | jq -r '.items[] | select(.metadata.name | test("authentik-server-")) | .status.hostIP') + +# Configure OIDC +## get default values +until [ -n "$AUTH_FLOW" ]; do AUTH_FLOW=$(curl -s -X GET -H "accept: application/json" -H "Authorization: Bearer my-secure-bootstrap-token" "http://${AUTH_IP}:${AUTH_PORT}/api/v3/flows/instances/?search=default-authentication-flow" | jq -r '.results[0].pk'); done +until [ -n "$AUTHZ_FLOW" ]; do AUTHZ_FLOW=$(curl -s -X GET -H "accept: application/json" -H "Authorization: Bearer my-secure-bootstrap-token" "http://${AUTH_IP}:${AUTH_PORT}/api/v3/flows/instances/?search=default-provider-authorization-implicit-consent" | jq -r '.results[0].pk'); done +until [ -n "$INVALID_FLOW" ]; do INVALID_FLOW=$(curl -s -X GET -H "accept: application/json" -H "Authorization: Bearer my-secure-bootstrap-token" "http://${AUTH_IP}:${AUTH_PORT}/api/v3/flows/instances/?search=default-invalidation-flow" | jq -r '.results[0].pk'); done +until [ -n "$SIGN_KEY" ]; do SIGN_KEY=$(curl -s -X GET -H "accept: application/json" -H "Authorization: Bearer my-secure-bootstrap-token" "http://${AUTH_IP}:${AUTH_PORT}/api/v3/crypto/certificatekeypairs/" | jq -r '.results[0].pk'); done +until [ -n "$SCOPE" ]; do SCOPE=$(curl -s -X GET -H "accept: application/json" -H "Authorization: Bearer my-secure-bootstrap-token" "http://${AUTH_IP}:${AUTH_PORT}/api/v3/propertymappings/provider/scope/?search=email" | jq -r '.results[0].pk'); done + +## create provider +curl -X POST "http://${AUTH_IP}:${AUTH_PORT}/api/v3/providers/oauth2/" -H "Authorization: Bearer my-secure-bootstrap-token" -H "accept: application/json" -H "content-type: application/json" -d "{\"name\":\"oidc-provider\",\"authentication_flow\":\"$AUTH_FLOW\",\"authorization_flow\":\"$AUTHZ_FLOW\",\"invalidation_flow\":\"$INVALID_FLOW\",\"client_type\":\"confidential\",\"client_id\":\"canonical-support\",\"client_secret\":\"my-secure-oidc-secret\",\"access_code_validity\":\"hours=3\",\"access_token_validity\":\"hours=3\",\"refresh_token_validity\":\"hours=3\",\"include_claims_in_id_token\":true,\"redirect_uris\":\"*\",\"sub_mode\":\"hashed_user_id\",\"issuer_mode\":\"per_provider\",\"signing_key\":\"$SIGN_KEY\",\"property_mappings\":[\"$SCOPE\"]}" + +## create app +curl -H "Authorization: Bearer my-secure-bootstrap-token" -X POST "http://${AUTH_IP}:${AUTH_PORT}/api/v3/core/applications/" -H "accept: application/json" -H "content-type: application/json" -d '{"name":"canonical-support","slug":"canonical-support","provider":1,"policy_engine_mode":"all"}' + +# Configure kratos +juju config kratos-external-idp-integrator provider=generic +juju config kratos-external-idp-integrator client_id=canonical-support +juju config kratos-external-idp-integrator client_secret=my-secure-oidc-secret +juju config kratos-external-idp-integrator issuer_url=http://"${AUTH_IP}:${AUTH_PORT}"/application/o/canonical-support/ + +echo " +Configuration is complete! You can test a login with the following credentials: + +Authentik Dashboard: http://${AUTH_IP}:${AUTH_PORT} +OIDC User: akadmin +Password: Passw0rd" + +grafana_url="$(juju run grafana/0 get-admin-password 2> /dev/null | grep url | sed -e 's/url: //')" +if [[ -n $grafana_url ]]; then + echo "Grafana Dashboard: $grafana_url" +fi diff --git a/identity-platform/generate-bundle.sh b/identity-platform/generate-bundle.sh new file mode 120000 index 00000000..394558ee --- /dev/null +++ b/identity-platform/generate-bundle.sh @@ -0,0 +1 @@ +common/generate-bundle.sh \ No newline at end of file diff --git a/identity-platform/iam.yaml.template b/identity-platform/iam.yaml.template new file mode 100644 index 00000000..8920a232 --- /dev/null +++ b/identity-platform/iam.yaml.template @@ -0,0 +1,72 @@ +--- +bundle: kubernetes +name: identity-platform +website: https://github.com/canonical/iam-bundle +issues: https://github.com/canonical/iam-bundle/issues +applications: + hydra: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__hydra + scale: 1 + series: jammy + trust: true + identity-platform-login-ui-operator: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__identity-platform-login-ui-operator + scale: 1 + series: jammy + trust: true + kratos: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__kratos + scale: 1 + series: jammy + options: + enforce_mfa: false + trust: true + oathkeeper: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__oathkeeper + scale: 1 + series: jammy + trust: true + postgresql-k8s: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__postgresql-k8s + scale: 1 + series: jammy + options: + plugin_btree_gin_enable: true + plugin_pg_trgm_enable: true + storage: + pgdata: kubernetes,1,1024M + trust: true + self-signed-certificates: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__self-signed-certificates + scale: 1 + traefik-admin: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__traefik-k8s + scale: 1 + series: focal + storage: + configurations: kubernetes,1,1024M + trust: true + traefik-public: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__traefik-k8s + scale: 1 + series: focal + options: + enable_experimental_forward_auth: true + storage: + configurations: kubernetes,1,1024M + trust: true +relations: + - [hydra:pg-database, postgresql-k8s:database] + - [kratos:pg-database, postgresql-k8s:database] + - [kratos:hydra-endpoint-info, hydra:hydra-endpoint-info] + - [hydra:admin-ingress, traefik-admin:ingress] + - [hydra:public-ingress, traefik-public:ingress] + - [kratos:admin-ingress, traefik-admin:ingress] + - [kratos:public-ingress, traefik-public:ingress] + - [identity-platform-login-ui-operator:ingress, traefik-public:ingress] + - [identity-platform-login-ui-operator:hydra-endpoint-info, hydra:hydra-endpoint-info] + - [identity-platform-login-ui-operator:ui-endpoint-info, hydra:ui-endpoint-info] + - [identity-platform-login-ui-operator:ui-endpoint-info, kratos:ui-endpoint-info] + - [identity-platform-login-ui-operator:kratos-info, kratos:kratos-info] + - [traefik-admin:certificates, self-signed-certificates:certificates] + - [traefik-public:certificates, self-signed-certificates:certificates] diff --git a/identity-platform/module_defaults b/identity-platform/module_defaults new file mode 100644 index 00000000..21ffaa2e --- /dev/null +++ b/identity-platform/module_defaults @@ -0,0 +1,9 @@ +# This file must contain defaults for all variables used in bundles/overlays. +# They are used to render to final product in the event they are not provided +# elsewhere. It is inserted into the global context at the start of the +# pipeline. +# +# You can check that none are missing by running lint/check_var_defaults.sh +# +JUJU_DEPLOY_OPTS=" --trust" +CHARM_CHANNEL[postgresql-k8s]=14/stable diff --git a/identity-platform/overlays b/identity-platform/overlays new file mode 120000 index 00000000..0d44a21c --- /dev/null +++ b/identity-platform/overlays @@ -0,0 +1 @@ +../overlays \ No newline at end of file diff --git a/identity-platform/pipeline/00setup b/identity-platform/pipeline/00setup new file mode 100644 index 00000000..6b183cc5 --- /dev/null +++ b/identity-platform/pipeline/00setup @@ -0,0 +1,22 @@ +#!/bin/bash + +# Globals +export MOD_NAME=identity-platform +export MOD_BASE_TEMPLATE=iam.yaml.template +export MOD_SSL_STATE_DIR=${MOD_NAME} +[ -n "${MASTER_OPTS[BUNDLE_NAME]}" ] && \ + MOD_SSL_STATE_DIR="${MOD_SSL_STATE_DIR}-${MASTER_OPTS[BUNDLE_NAME]}" + +# opts that 02configure does not recognise that get passed to the generator +export -a MOD_PASSTHROUGH_OPTS=() + +# Collection of messages to display at the end +export -A MOD_MSGS=() +# Use order 0 to ensure this is first displayed +MOD_MSGS[0_common.0]="Ensure a LoadBalancer (e.g. MetalLB or Cilium) is enabled on k8s" +MOD_MSGS[0_common.2]="Configure a local user: juju run kratos/0 create-admin-account email=admin@secloud.local password=Passw0rd username=admin" + +# Array list of overlays to use with this deployment. +export -a MOD_OVERLAYS=() + +export -A MOD_PARAMS=() diff --git a/identity-platform/pipeline/01import-config-defaults b/identity-platform/pipeline/01import-config-defaults new file mode 100644 index 00000000..8848bc10 --- /dev/null +++ b/identity-platform/pipeline/01import-config-defaults @@ -0,0 +1,2 @@ +# Current module imports +. $MOD_DIR/module_defaults diff --git a/identity-platform/pipeline/02configure b/identity-platform/pipeline/02configure new file mode 100644 index 00000000..58c94fb1 --- /dev/null +++ b/identity-platform/pipeline/02configure @@ -0,0 +1,33 @@ +#!/bin/bash +# Global variables are first defined in 00setup and module +# dependencies are defined in 01import-config-defaults +# +# All overlay/bundle variables (MOD_PARAMS) defaults must go into +# the /module_defaults file. + +cloud="$(get_cloud_type)" +if [[ "$cloud" != "k8s" ]]; then + echo "ERROR: Must switch to a Kubernetes model first." + exit 1 +fi + +while (($# > 0)) +do + case $1 in + --oidc) + MOD_OVERLAYS+=( "kubernetes/k8s-iam-oidc.yaml" ) + MOD_MSGS[0_common.1]="Setup OIDC: ./configure" + ;; + --grafana) + MOD_OVERLAYS+=( "kubernetes/k8s-iam-grafana.yaml" ) + MOD_MSGS[grafana.0]="Get Grafana URL: juju run grafana/leader get-admin-password" + ;; + *) + echo "ERROR: invalid input '$1'" + _usage + exit 1 + ;; + esac + shift +done + diff --git a/identity-platform/pipeline/03build b/identity-platform/pipeline/03build new file mode 100644 index 00000000..62dd78f9 --- /dev/null +++ b/identity-platform/pipeline/03build @@ -0,0 +1,5 @@ +#!/bin/bash +. $MOD_DIR/common/generate_bundle_base + +print_msgs + diff --git a/jaas/configure b/jaas/configure index f3c75204..6fab539b 100755 --- a/jaas/configure +++ b/jaas/configure @@ -1,4 +1,4 @@ -#!/bin/sh -u +#!/bin/bash -u status=$(juju status --format=json) candid_haproxy_machine=$(echo $status | jq '.applications."candid-haproxy".units."candid-haproxy/0".machine') diff --git a/kubernetes/module_defaults b/kubernetes/module_defaults index dc40079e..5dfc09b4 100644 --- a/kubernetes/module_defaults +++ b/kubernetes/module_defaults @@ -17,4 +17,5 @@ MOD_PARAMS[__NUM_K8S_WORKER_UNITS__]=2 MOD_PARAMS[__NUM_K8S_LB_UNITS__]=1 MOD_PARAMS[__ETCD_SNAP_CHANNEL__]='latest/stable' MOD_PARAMS[__CONTAINER_RUNTIME__]='containerd' - +MOD_PARAMS[__CONTAINERD_PROXY__]='' +MOD_PARAMS[__CONTAINERD_NO_PROXY__]='127.0.0.1,localhost,::1,10.149.0.0/16,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16' diff --git a/kubernetes/pipeline/02configure b/kubernetes/pipeline/02configure index 8f2fffb9..9e860196 100644 --- a/kubernetes/pipeline/02configure +++ b/kubernetes/pipeline/02configure @@ -67,6 +67,12 @@ if ! is_hyperconverged; then fi fi +# Automatically use proxy if in prodstack only +if $(timeout 1s getent hosts squid.internal &> /dev/null) && [ -z "${MOD_PARAMS[__CONTAINERD_PROXY__]}" ]; then + MOD_MSGS[1_proxy.0]='PROXY: Hostname squid.internal resolves, setting containerd proxy to http://squid.internal:3128' + MOD_PARAMS[__CONTAINERD_PROXY__]=http://squid.internal:3128 +fi + # Skip processing input if it includes exclusive passthrough options ! has_excl_passthrough_opt && \ while (($# > 0)) @@ -110,6 +116,14 @@ do conflicts_with $1 --docker MOD_OVERLAYS+=( "kubernetes/k8s-containerd.yaml" ) ;; + --containerd-proxy) #__OPT__type: (default="" unless the hostname squid.internal resolves, then it's http://squid.internal:3128) + MOD_PARAMS[__CONTAINERD_PROXY__]=$2 + shift + ;; + --containerd-no-proxy) #__OPT__type: (default=127.0.0.1,localhost,::1,10.149.0.0/16,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16) + MOD_PARAMS[__CONTAINERD_NO_PROXY__]=$2 + shift + ;; --calico) conflicts_with $1 --canal MOD_OVERLAYS+=( "kubernetes/k8s-cni-calico.yaml" ) diff --git a/microk8s/microk8s.yaml.template b/microk8s/microk8s.yaml.template index f2e1fafe..750cff5f 100644 --- a/microk8s/microk8s.yaml.template +++ b/microk8s/microk8s.yaml.template @@ -5,3 +5,7 @@ applications: num_units: __NUM_MICROK8S_UNITS__ constraints: mem=8G expose: true + options: + containerd_http_proxy: __CONTAINERD_PROXY__ + containerd_https_proxy: __CONTAINERD_PROXY__ + containerd_no_proxy: __CONTAINERD_NO_PROXY__ diff --git a/microk8s/module_defaults b/microk8s/module_defaults index 7ec60d63..db77bf51 100644 --- a/microk8s/module_defaults +++ b/microk8s/module_defaults @@ -8,3 +8,5 @@ MOD_PARAMS[__MICROK8S_CHANNEL__]="latest/edge" MOD_PARAMS[__NUM_MICROK8S_UNITS__]=1 +MOD_PARAMS[__CONTAINERD_PROXY__]='' +MOD_PARAMS[__CONTAINERD_NO_PROXY__]='127.0.0.1,localhost,::1,10.149.0.0/16,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16' diff --git a/microk8s/pipeline/02configure b/microk8s/pipeline/02configure index 35710e57..60678fe7 100644 --- a/microk8s/pipeline/02configure +++ b/microk8s/pipeline/02configure @@ -9,3 +9,31 @@ target=$series [ -z "$pocket" ] || target=${target}-$pocket target=${target}:${MOD_PARAMS[__MICROK8S_CHANNEL__]} MOD_PASSTHROUGH_OPTS+=( --release-name $target ) + +# Automatically use proxy if in prodstack only +if $(timeout 1s getent hosts squid.internal &> /dev/null) && [ -z "${MOD_PARAMS[__CONTAINERD_PROXY__]}" ]; then + MOD_MSGS[1_proxy.0]='PROXY: squid.internal exists, setting containerd proxy to http://squid.internal:3128' + MOD_PARAMS[__CONTAINERD_PROXY__]=http://squid.internal:3128 +fi + +# Skip processing input if it includes exclusive passthrough options +! has_excl_passthrough_opt && \ +while (($# > 0)) +do + case "$1" in + --containerd-proxy) #__OPT__type: (default="" unless the hostname squid.internal resolves, then it's http://squid.internal:3128) + MOD_PARAMS[__CONTAINERD_PROXY__]=$2 + shift + ;; + --containerd-no-proxy) #__OPT__type: (default=127.0.0.1,localhost,::1,10.149.0.0/16,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16) + MOD_PARAMS[__CONTAINERD_NO_PROXY__]=$2 + shift + ;; + *) + echo "ERROR: invalid input '$1'" + _usage + exit 1 + ;; + esac + shift +done diff --git a/openstack/bin/post-deploy-config b/openstack/bin/post-deploy-config index 8fb7e7fb..ac8a6850 100755 --- a/openstack/bin/post-deploy-config +++ b/openstack/bin/post-deploy-config @@ -74,7 +74,7 @@ if __name__ == '__main__': raise ValueError('Unable to find network {}'.format(net_id)) else: # Preserve existing default behavior (eg. manual testing) - net_name = os.environ.get('UNDERCLOUD_OAM_NET') + net_name = os.environ.get('UNDERCLOUD_EXT_NET') if not net_name: net_name = os.environ['OS_USERNAME'] + '_admin_net' diff --git a/openstack/configure b/openstack/configure index 25003dc8..51bbf426 100755 --- a/openstack/configure +++ b/openstack/configure @@ -1,4 +1,4 @@ -#!/bin/sh -u +#!/bin/bash -u profile=${1:-prodstack6} net_type=${2:-""} ./profiles/$profile $net_type diff --git a/openstack/module_defaults b/openstack/module_defaults index 8c0569ca..75e46db3 100644 --- a/openstack/module_defaults +++ b/openstack/module_defaults @@ -51,10 +51,11 @@ MOD_PARAMS[__MAAS_URL__]= # e.g. http://1.2.3.4:5240/MAAS MOD_PARAMS[__MAAS_API_KEY__]= MOD_PARAMS[__GLOBAL_MTU__]=1500 MOD_PARAMS[__PATH_MTU__]= +MOD_PARAMS[__NUM_WATCHER_UNITS__]=1 # This is enough for creating one ubuntu vm or multiple cirros vms but some # scenarios may want to allow more per compute (e.g. octavia). -MOD_PARAMS[__NOVA_COMPUTE_UNIT_CONSTRAINTS__]="mem=4G root-disk=80G" +MOD_PARAMS[__NOVA_COMPUTE_UNIT_CONSTRAINTS__]="mem=4G cores=2" # Try to use current model (or newly requested one) as subdomain name model_subdomain=`get_juju_model` diff --git a/openstack/novarc b/openstack/novarc index 697c975a..ca2ef404 100644 --- a/openstack/novarc +++ b/openstack/novarc @@ -40,8 +40,8 @@ END return;; esac fi + export OS_AUTH_PROTOCOL=https fi - export OS_AUTH_PROTOCOL=https else unset OS_AUTH_PROTOCOL fi @@ -56,8 +56,8 @@ unset _OS_PARAMS # If user was specified use it if [[ $# -gt 1 && $1 = --service ]]; then - RELATION_ID=$(juju run --unit $2/leader -- relation-ids identity-service | cut -d : -f 2) - readarray -t CREDENTIALS < <(juju run --unit $2/leader -- relation-get --relation ${RELATION_ID} --format json - keystone/0) + RELATION_ID=$(juju exec --unit $2/leader -- relation-ids identity-service | cut -d : -f 2) + readarray -t CREDENTIALS < <(juju exec --unit $2/leader -- relation-get --relation ${RELATION_ID} --format json - keystone/0) export OS_USERNAME=$(echo ${CREDENTIALS} | jq --raw-output .service_username) export OS_USER_DOMAIN_NAME=$(echo ${CREDENTIALS} | jq --raw-output .service_domain) @@ -74,7 +74,7 @@ else # from the leader databag. _CONFIG_PASSWD="$(juju config keystone admin-password| awk '{print tolower($0)}')" if [ "${_CONFIG_PASSWD}" == "none" ] || [ "${_CONFIG_PASSWD}" == "" ]; then - export OS_PASSWORD=$(juju run -u keystone/leader leader-get admin_passwd) + export OS_PASSWORD=$(juju exec --unit keystone/leader leader-get admin_passwd) else export OS_PASSWORD="$(juju config keystone admin-password)" fi diff --git a/openstack/novarcv3_domain b/openstack/novarcv3_domain index e7177a0f..4c7fb8d9 100644 --- a/openstack/novarcv3_domain +++ b/openstack/novarcv3_domain @@ -6,7 +6,7 @@ done keystone_addr=`juju config keystone vip` # TODO(hopem): remove fix for bug 1789415 once released if [ -z "$keystone_addr" ] || [ "$keystone_addr" = "" ]; then - keystone_addr=`juju run --unit keystone/0 unit-get private-address` + keystone_addr=`juju exec --unit keystone/0 unit-get private-address` fi ssl_cert=`juju config keystone ssl_cert` diff --git a/openstack/openstack.yaml.template b/openstack/openstack.yaml.template index ebed8672..f2833a03 100644 --- a/openstack/openstack.yaml.template +++ b/openstack/openstack.yaml.template @@ -35,6 +35,8 @@ applications: migration-auth-type: ssh openstack-origin: *openstack_origin force-raw-images: false # disable for stsstack since conversion kills the disks and is not needed + storage: + ephemeral-device: cinder,50G,1 nova-cloud-controller: num_units: __NUM_NOVACC_UNITS__ charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__nova-cloud-controller diff --git a/openstack/pipeline/02configure b/openstack/pipeline/02configure index 297b9e21..a517f7de 100644 --- a/openstack/pipeline/02configure +++ b/openstack/pipeline/02configure @@ -429,7 +429,7 @@ do # This equates to m1.large (rather than m1.medium) which should # allow creating 1x ubunu vm + 1x amphora vm on the same host thus # avoiding the need for > 1 compute host. - MOD_PARAMS[__NOVA_COMPUTE_UNIT_CONSTRAINTS__]="mem=8G root-disk=80G" + MOD_PARAMS[__NOVA_COMPUTE_UNIT_CONSTRAINTS__]="mem=8G cores=2" if ! has_opt --no-octavia-diskimage-retrofit; then # By default we let retrofit use images uploaded by the # post-deploy configure script. @@ -712,6 +712,16 @@ do MOD_MSGS[cinder-purestorage.4]="openstack volume type create cinder-ceph --property volume_backend_name=cinder-ceph # If also deploying ceph" MOD_MSGS[cinder-purestorage.5]="juju config cinder default-volume-type=cinder-ceph # or cinder-purestorage" ;; + --watcher) + if assert_min_release yoga 'watcher'; then + MOD_OVERLAYS+=( "openstack/watcher.yaml" ) + fi + ;; + --watcher-ha*) + get_units $1 __NUM_WATCHER_UNITS__ 3 + MOD_OVERLAYS+=( "openstack/watcher-ha.yaml" ) + set -- $@ --watcher && cache $@ + ;; *) echo "ERROR: invalid input '$1'" _usage diff --git a/openstack/profiles/default b/openstack/profiles/default index 25cbd576..f9663c15 100755 --- a/openstack/profiles/default +++ b/openstack/profiles/default @@ -89,10 +89,12 @@ set_img_properties () # Download images if not already present mkdir -vp ~/images +upload_image cloudimages focal focal-server-cloudimg-amd64.img & upload_image cloudimages jammy jammy-server-cloudimg-amd64.img & -upload_image cirros cirros-0.6.2 0.6.2/cirros-0.6.2-x86_64-disk.img & +upload_image cirros cirros-0.4.0 0.4.0/cirros-0.4.0-x86_64-disk.img & wait # Set properties needed by octavia-disk-image-retrofit (See LP: #1842430) +set_img_properties focal 20.04 focal-server-cloudimg-amd64.img & set_img_properties jammy 22.04 jammy-server-cloudimg-amd64.img & wait diff --git a/openstack/profiles/prodstack5 b/openstack/profiles/prodstack5 new file mode 100755 index 00000000..2357c174 --- /dev/null +++ b/openstack/profiles/prodstack5 @@ -0,0 +1,30 @@ +#!/bin/bash -ex +source ~/novarc +# this is currently assumed to be a /25 network +EXT_SUBNET=subnet_${OS_USERNAME}-psd-extra +CIDR=`openstack subnet show $EXT_SUBNET -c cidr -f value` +# We reserve the last 64 of the /25 to FIP. +# We get the last usable IP on the subnet +FIP_RANGE_LASTIP=`openstack subnet show ${EXT_SUBNET} -c allocation_pools -f json | jq -r '.allocation_pools[0].end'` +# Get the first major subnet information, IE, 10.149.123 on a 10.149.123.0/25 subnet +SUBNET_RANGE=${FIP_RANGE_LASTIP%.*} + +# We figure out from the last usable IP the last 64 usable IPs and concatenate it into the SUBNET_RANGE. +FIP_RANGE_FIRSTIP="${SUBNET_RANGE}.$((${FIP_RANGE_LASTIP##*.} - 64))" + + +export GATEWAY=`openstack subnet show $EXT_SUBNET -c gateway_ip -f value` + +[[ -z "$NAMESERVER" ]] && export NAMESERVER="91.189.91.131" +[[ -z "$SWIFT_IP" ]] && export SWIFT_IP="10.130.56.34" + +# Set defaults, if not already set. +[[ -z "$GATEWAY" ]] && export GATEWAY="$GATEWAY" +[[ -z "$CIDR_EXT" ]] && export CIDR_EXT="$CIDR" +[[ -z "$FIP_RANGE" ]] && export FIP_RANGE="${FIP_RANGE_FIRSTIP}:${FIP_RANGE_LASTIP}" +[[ -z "$CIDR_PRIV" ]] && export CIDR_PRIV="192.168.21.0/24" + +export UNDERCLOUD_OAM_NET="$(sed -E --quiet "s/.+OS_PROJECT_NAME=(.+)_project/net_\1-psd/p" ~/novarc)" +export UNDERCLOUD_EXT_NET="$(sed -E --quiet "s/.+OS_PROJECT_NAME=(.+)_project/net_\1-psd-extra/p" ~/novarc)" + +./profiles/default "$@" diff --git a/openstack/tools/allocate_vips.sh b/openstack/tools/allocate_vips.sh index efe9677c..814473e8 100644 --- a/openstack/tools/allocate_vips.sh +++ b/openstack/tools/allocate_vips.sh @@ -16,10 +16,9 @@ net_pre=$(echo $cidr| sed -r 's/([0-9]+\.[0-9]+\.[0-9]+).+/\1/g') j=1 -for i in $(seq ${vip_start_suffix} ${net_end}) -do +for i in $(seq ${vip_start_suffix} ${net_end}); do echo openstack port create --network net_${OS_USERNAME}-psd \ --fixed-ip subnet=subnet_${OS_USERNAME}-psd,ip-address=${net_pre}.$i \ --disable --os-cloud ps6 ps6-vip-ip$( printf "%02d" $j ) - ((j++)) -done + j=$((j+1)) +done \ No newline at end of file diff --git a/openstack/tools/charmed_openstack_functest_runner.sh b/openstack/tools/charmed_openstack_functest_runner.sh new file mode 100755 index 00000000..bea0c362 --- /dev/null +++ b/openstack/tools/charmed_openstack_functest_runner.sh @@ -0,0 +1,289 @@ +#!/bin/bash -eu +# +# Run Charmed Openstack CI tests manually in a similar way to how they are run +# by OpenStack CI (OSCI). +# +# Usage: clone/fetch charm to test and run from within charm root dir. +# +FUNC_TEST_PR= +FUNC_TEST_TARGET=() +MANUAL_FUNCTESTS=false +MODIFY_BUNDLE_CONSTRAINTS=true +REMOTE_BUILD= +SKIP_BUILD=false +SLEEP= +WAIT_ON_DESTROY=true + +. $(dirname $0)/func_test_tools/common.sh + +usage () { + cat << EOF +USAGE: $(basename $0) OPTIONS + +Run OpenStack charms functional tests manually in a similar way to how +Openstack CI (OSCI) would do it. This tool should be run from within a charm +root. + +Not all charms use the same versions and dependencies and an attempt is made to +cover this here but in some cases needs to be dealt with as a pre-requisite to +running the tool. For example some charms need their tests to be run using +python 3.8 and others python 3.10. Some tests might require Juju 2.9 and others +Juju 3.x - the assumption in this runner is that Juju 3.x is ok to use. + +OPTIONS: + --func-test-target TARGET_NAME + Provide the name of a specific test target to run. If none provided + all tests are run based on what is defined in osci.yaml i.e. will do + what osci would do by default. This option can be provided more than + once. + --func-test-pr PR_ID + Provides similar functionality to Func-Test-Pr in commit message. Set + to zaza-openstack-tests Pull Request ID. + --no-wait + By default we wait before destroying the model after a test run. This + flag can used to override that behaviour. + --manual-functests + Runs functest commands separately (deploy,configure,test) instead of + the entire suite. + --remote-build USER@HOST,GIT_PATH + Builds the charm in a remote location and transfers the charm file over. + The destination needs to be prepared for the build and authorized for + ssh. Implies --skip-build. Specify parameter as ,. + Example: --remote-build ubuntu@10.171.168.1,~/git/charm-nova-compute + --skip-build + Skip building charm if already done to save time. + --skip-modify-bundle-constraints + By default we modify test bundle constraints to ensure that applications + have the resources they need. For example nova-compute needs to have + enough capacity to boot the vms required by the tests. + --sleep TIME_SECS + Specify amount of seconds to sleep between functest steps. + --help + This help message. +EOF +} + +while (($# > 0)); do + case "$1" in + --debug) + set -x + ;; + --func-test-target) + FUNC_TEST_TARGET+=( $2 ) + shift + ;; + --func-test-pr) + FUNC_TEST_PR=$2 + shift + ;; + --manual-functests) + MANUAL_FUNCTESTS=true + ;; + --no-wait) + WAIT_ON_DESTROY=false + ;; + --remote-build) + REMOTE_BUILD=$2 + SKIP_BUILD=true + shift + ;; + --skip-modify-bundle-constraints) + MODIFY_BUNDLE_CONSTRAINTS=false + ;; + --skip-build) + SKIP_BUILD=true + ;; + --sleep) + SLEEP=$2 + shift + ;; + --help|-h) + usage + exit 0 + ;; + *) + echo "ERROR: invalid input '$1'" + usage + exit 1 + ;; + esac + shift +done + +# Install dependencies +which yq &>/dev/null || sudo snap install yq + +# Ensure zosci-config checked out and up-to-date +get_and_update_repo https://github.com/openstack-charmers/zosci-config + +TOOLS_PATH=$(realpath $(dirname $0))/func_test_tools +# This is used generally to identify the charm root. +export CHARM_ROOT_PATH=$PWD + +# Get commit we are running tests against. +COMMIT_ID=$(git -C $CHARM_ROOT_PATH rev-parse --short HEAD) +CHARM_NAME=$(awk '/^name: .+/{print $2}' metadata.yaml) + +echo "Running functional tests for charm $CHARM_NAME commit $COMMIT_ID" + +source ~/novarc +export {,TEST_}CIDR_EXT=$(openstack subnet show subnet_${OS_USERNAME}-psd-extra -c cidr -f value) +FIP_MAX=$(ipcalc $CIDR_EXT| awk '$1=="HostMax:" {print $2}') +FIP_MIN=$(ipcalc $CIDR_EXT| awk '$1=="HostMin:" {print $2}') +FIP_MIN_ABC=${FIP_MIN%.*} +FIP_MIN_D=${FIP_MIN##*.} +FIP_MIN=${FIP_MIN_ABC}.$(($FIP_MIN_D + 64)) + +# Setup vips needed by zaza tests. +for ((i=2;i;i-=1)); do + export {OS,TEST}_VIP0$((i-1))=$(create_zaza_vip 0$i) +done + +# More information on config https://github.com/openstack-charmers/zaza/blob/master/doc/source/runningcharmtests.rst +export {,TEST_}NET_ID=$(openstack network show net_${OS_USERNAME}-psd-extra -f value -c id) +export {,TEST_}FIP_RANGE=$FIP_MIN:$FIP_MAX +export {,TEST_}GATEWAY=$(openstack subnet show subnet_${OS_USERNAME}-psd-extra -c gateway_ip -f value) +export {,TEST_}NAME_SERVER=91.189.91.131 +export {,TEST_}CIDR_PRIV=192.168.21.0/24 +#export SWIFT_IP=10.140.56.22 +export TEST_MODEL_SETTINGS="image-stream=released;default-series=jammy;test-mode=true;transmit-vendor-metrics=false" +# We need to set TEST_JUJU3 as well as the constraints file +# Ref: https://github.com/openstack-charmers/zaza/blob/e96ab098f00951079fccb34bc38d4ae6ebb38606/setup.py#L47 +export TEST_JUJU3=1 + +# NOTE: this should not be necessary for > juju 2.x but since we still have a need for it we add it in +export TEST_ZAZA_BUG_LP1987332=1 + +# Some charms point to an upstream constraints file that installs python-libjuju 2.x so we need to do this to ensure we get 3.x +export TEST_CONSTRAINTS_FILE=https://raw.githubusercontent.com/openstack-charmers/zaza/master/constraints-juju34.txt + +LOGFILE=$(mktemp --suffix=-charm-func-test-results) +( +# 2. Build +if ! $SKIP_BUILD; then + # default value is 1.5/stable, assumed that later charm likely have charmcraft_channel value + CHARMCRAFT_CHANNEL=$(grep charmcraft_channel osci.yaml | sed -r 's/.+:\s+(\S+)/\1/') + sudo snap refresh charmcraft --channel ${CHARMCRAFT_CHANNEL:-"1.5/stable"} + + # ensure lxc initialised + lxd init --auto || true + + tox -re build +elif [[ -n $REMOTE_BUILD ]]; then + IFS=',' read -ra remote_build_params <<< "$REMOTE_BUILD" + REMOTE_BUILD_DESTINATION=${remote_build_params[0]} + REMOTE_BUILD_PATH=${remote_build_params[1]} + ssh $REMOTE_BUILD_DESTINATION "cd $REMOTE_BUILD_PATH;git log -1;rm -rf *.charm;tox -re build" + rm -rf *.charm + rsync -vza $REMOTE_BUILD_DESTINATION:$REMOTE_BUILD_PATH/*.charm . +fi + +# 3. Run functional tests. + +# If a func test pr is provided switch to that pr. +if [[ -n $FUNC_TEST_PR ]]; then + apply_func_test_pr $FUNC_TEST_PR +fi + +declare -A func_target_state=() +declare -a func_target_order +if ((${#FUNC_TEST_TARGET[@]})); then + for t in ${FUNC_TEST_TARGET[@]}; do + func_target_state[$t]=null + func_target_order+=( $t ) + done +else + voting_targets=() + non_voting_targets=() + for target in $(python3 $TOOLS_PATH/identify_charm_func_test_jobs.py); do + if $(python3 $TOOLS_PATH/test_is_voting.py $target); then + voting_targets+=( $target ) + else + non_voting_targets+=( $target ) + fi + done + # Ensure voting targets processed first. + for target in ${voting_targets[@]} ${non_voting_targets[@]}; do + func_target_order+=( $target ) + func_target_state[$target]=null + done +fi + +# Ensure nova-compute has enough resources to create vms in tests. Not all +# charms have bundles with constraints set so we need to cover both cases here. +if $MODIFY_BUNDLE_CONSTRAINTS; then + ( + [[ -d src ]] && cd src + for f in tests/bundles/*.yaml; do + # Dont do this if the test does not have nova-compute + if $(grep -q "nova-compute:" $f); then + if [[ $(yq '.applications' $f) = null ]]; then + yq -i '.services.nova-compute.constraints="root-disk=80G mem=8G"' $f + else + yq -i '.applications.nova-compute.constraints="root-disk=80G mem=8G"' $f + fi + fi + done + ) +fi + +first=true +init_noop_target=true +for target in ${func_target_order[@]}; do + # Destroy any existing zaza models to ensure we have all the resources we + # need. + destroy_zaza_models + + # Only rebuild on first run. + if $first; then + first=false + tox_args="-re func-target" + else + tox_args="-e func-target" + fi + [[ -d src ]] && pushd src &>/dev/null || true + fail=false + _target="$(python3 $TOOLS_PATH/extract_job_target.py $target)" + if ! $MANUAL_FUNCTESTS; then + tox ${tox_args} -- $_target || fail=true + model=$(juju list-models| egrep -o "^zaza-\S+"|tr -d '*') + else + $TOOLS_PATH/manual_functests_runner.sh "$_target" $SLEEP $init_noop_target || fail=true + model=test-$target + init_noop_target=false + fi + + if $fail; then + func_target_state[$target]='fail' + else + func_target_state[$target]='success' + fi + + if $WAIT_ON_DESTROY; then + read -p "Destroy model and run next test? [ENTER]" + fi + + # Cleanup before next run + destroy_zaza_models +done +popd &>/dev/null || true + +# Report results +echo -e "\nTest results for charm $CHARM_NAME functional tests @ commit $COMMIT_ID:" +for target in ${func_target_order[@]}; do + if $(python3 $TOOLS_PATH/test_is_voting.py $target); then + voting_info="" + else + voting_info=" (non-voting)" + fi + + if [[ ${func_target_state[$target]} = null ]]; then + echo " * $target: SKIPPED$voting_info" + elif [[ ${func_target_state[$target]} = success ]]; then + echo " * $target: SUCCESS$voting_info" + else + echo " * $target: FAILURE$voting_info" + fi +done +) 2>&1 | tee $LOGFILE +echo -e "\nResults also saved to $LOGFILE" diff --git a/openstack/tools/create_ipv4_octavia.sh b/openstack/tools/create_ipv4_octavia.sh index ccbdd91e..0f3803e3 100755 --- a/openstack/tools/create_ipv4_octavia.sh +++ b/openstack/tools/create_ipv4_octavia.sh @@ -6,8 +6,8 @@ set -u -e -x while true; do [[ `juju status keystone --format json | jq -r '.applications.keystone.units."keystone/0"."workload-status".current'` = active ]] \ && break - if [[ `juju status keystone --format json | jq -r '.applications.keystone.units."keystone/0"."workload-status".current'` = error ]] - then + if [[ `juju status keystone --format json | \ + jq -r '.applications.keystone.units."keystone/0"."workload-status".current'` = error ]]; then echo "ERROR: Octavia deployment failed" break fi @@ -21,7 +21,7 @@ OS_PROJECT_DOMAIN_NAME=service_domain OS_USERNAME=octavia OS_PROJECT_NAME=services OS_USER_DOMAIN_NAME=service_domain -OS_PASSWORD=$(juju run --unit octavia/0 "grep -v "auth" /etc/octavia/octavia.conf | grep password" | awk '{print $3}') +OS_PASSWORD=$(juju exec --unit octavia/0 "grep -v "auth" /etc/octavia/octavia.conf | grep password" | awk '{print $3}') EOF source /tmp/novarc.services diff --git a/openstack/tools/create_nova_az_aggregates.sh b/openstack/tools/create_nova_az_aggregates.sh index 654ee817..cc08cd68 100755 --- a/openstack/tools/create_nova_az_aggregates.sh +++ b/openstack/tools/create_nova_az_aggregates.sh @@ -2,7 +2,9 @@ for az in az1 az2; do readarray ids<<<"`juju status nova-compute-$az --format=yaml| grep instance-id| awk '{print $2}'`" machines=() - for id in ${ids[@]}; do machines+=( `source ~/novarc; openstack server show $id| grep " name "| awk '{print $4}'` ); done + for id in ${ids[@]}; do + machines+=( `source ~/novarc; openstack server show $id| grep " name "| awk '{print $4}'` ) + done echo "Creating aggregate ${az^^}" openstack aggregate show ${az^^} &>/dev/null || openstack aggregate create --zone $az ${az^^}; for m in ${machines[@]}; do diff --git a/openstack/tools/create_octavia_lb.sh b/openstack/tools/create_octavia_lb.sh index 56e1d3ee..13c746e5 100755 --- a/openstack/tools/create_octavia_lb.sh +++ b/openstack/tools/create_octavia_lb.sh @@ -1,11 +1,15 @@ -#!/bin/bash -eux +#!/bin/bash + +set -e -u lb=lb1 declare -a member_vm=() +member_subnet= provider=amphora protocol=HTTP protocol_port=80 hm_protocol= +vip_subnet=private_subnet while (( $# > 0 )); do case $1 in @@ -25,6 +29,14 @@ while (( $# > 0 )); do member_vm+=( "$2" ) shift ;; + --member-subnet) + if (( $# < 2 )); then + echo "missing member subnet name or ID" + exit 1 + fi + member_subnet=$2 + shift + ;; --provider) if (( $# < 2 )); then echo "missing provider" @@ -54,22 +66,32 @@ while (( $# > 0 )); do echo "missing protocol for healthmonitor" exit 1 fi - hm_protocol=$2 - shift - ;; + hm_protocol=$2 + shift + ;; + --vip-subnet) + if (( $# < 2 )); then + echo "missing vip subnet name or ID" + exit 1 + fi + vip_subnet=$2 + shift + ;; -h|--help) cat < /dev/null; then - echo "ERROR: a loadbalancer called $lb already exists" +if openstack loadbalancer show ${lb} > /dev/null 2>&1; then + echo "ERROR: a loadbalancer called ${lb} already exists" exit 1 fi LB_ID=$(openstack loadbalancer create \ --name ${lb} \ - --vip-subnet-id private_subnet \ + --vip-subnet-id ${vip_subnet} \ --provider ${provider} \ --format value \ --column id) -# Re-run the following until $lb shows ACTIVE and ONLINE status': -openstack loadbalancer show ${LB_ID} - # wait for lb to be ACTIVE +echo -n "waiting for $lb" while true; do if [[ $(openstack loadbalancer show ${LB_ID} --column provisioning_status --format value) == ACTIVE ]]; then break fi - echo "waiting for $lb" + echo -n "." + sleep 2 done +echo LISTENER_ID=$(openstack loadbalancer listener create \ --name ${lb}-listener --protocol ${protocol} --protocol-port ${protocol_port} \ - --format value --column id $lb) + --format value --column id ${lb}) + # wait for listener to be ACTIVE +echo -n "waiting for ${lb}-listener" while true; do if [[ $(openstack loadbalancer listener show ${LISTENER_ID} --column provisioning_status --format value) == ACTIVE ]]; then break fi - echo "waiting for ${lb}-listener" + echo -n "." + sleep 2 done +echo LB_ALGORITHM=ROUND_ROBIN if [[ ${provider} == ovn ]]; then @@ -132,44 +158,56 @@ POOL_ID=$(openstack loadbalancer pool create \ --listener ${LISTENER_ID} \ --protocol ${protocol} \ --format value --column id) -# wait for pool to be ACTIVE + +echo -n "waiting for ${lb}-pool" while true; do if [[ $(openstack loadbalancer pool show ${POOL_ID} --column provisioning_status --format value) == ACTIVE ]]; then break fi - echo "waiting for ${lb}-pool" + echo -n "." + sleep 2 done +echo HM_ID=$(openstack loadbalancer healthmonitor create \ --name ${lb}-healthmonitor --delay 5 --max-retries 4 --timeout 10 --type ${hm_protocol} ${url_path} ${POOL_ID} \ --format value --column id) -openstack loadbalancer healthmonitor list # Add vm(s) to pool if (( ${#member_vm[@]} == 0 )); then readarray -t member_vm < <(openstack server list --column ID --format value) - (( ${#member_vm[@]} )) || { echo "ERROR: could not find a vm to add to lb pool"; exit 1; } + if ((${#member_vm[@]}==0)); then + echo "ERROR: could not find a vm to add to lb pool" + exit 1 + fi fi for member in "${member_vm[@]}"; do - netaddr=$(openstack port list --server ${member} --network private --column "Fixed IP Addresses" --format value | \ + netaddr=$(openstack port list --server ${member} --column "Fixed IP Addresses" --format value | \ sed -rn -e "s/.+ip_address='([[:digit:]\.]+)',\s+.+/\1/" \ -e "s/.+ip_address':\s+'([[:digit:]\.]+)'}.+/\1/p") - member_id=$(openstack loadbalancer member create --subnet-id private_subnet \ - --address $netaddr --protocol-port ${protocol_port} --format value --column id ${POOL_ID}) + member_id=$(openstack loadbalancer member create --address ${netaddr} \ + $( [[ -n ${member_subnet} ]] && echo "--subnet-id ${member_subnet}" ) \ + --protocol-port ${protocol_port} --format value --column id ${POOL_ID}) + + echo -n "waiting for member ${member} (${member_id})" while true; do - [[ $(openstack loadbalancer member show --format value \ - --column provisioning_status ${POOL_ID} ${member_id}) = ACTIVE ]] \ - && break - echo "waiting for member ${member} (${member_id})" + if [[ $(openstack loadbalancer member show --format value \ + --column provisioning_status ${POOL_ID} ${member_id}) = ACTIVE ]]; then + break + fi + echo -n "." + sleep 2 done + echo done -openstack loadbalancer member list ${POOL_ID} - floating_ip=$(openstack floating ip create --format value --column floating_ip_address ext_net) lb_vip_port_id=$(openstack loadbalancer show --format value --column vip_port_id ${LB_ID}) -openstack floating ip set --port $lb_vip_port_id $floating_ip + +openstack floating ip set --port ${lb_vip_port_id} ${floating_ip} + +echo "The load balancer is at floating IP ${floating_ip}" if [[ ${hm_protocol} != HTTP ]]; then exit @@ -177,44 +215,50 @@ fi L7_POLICY1_ID=$(openstack loadbalancer l7policy create --action REDIRECT_TO_POOL \ --redirect-pool ${POOL_ID} --name ${lb}-l7policy1 --format value --column id ${LISTENER_ID}) +echo -n "waiting for ${lb}-l7policy1" while true; do if [[ $(openstack loadbalancer l7policy show ${L7_POLICY1_ID} --format value --column provisioning_status) == ACTIVE ]]; then break fi - echo "waiting for ${lb}-l7policy1" + echo -n "." + sleep 2 done - -openstack loadbalancer l7policy show ${L7_POLICY1_ID} +echo L7_RULE1_ID=$(openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH \ --value /js --format value --column id ${L7_POLICY1_ID}) +echo -n "waiting for ${L7_RULE1_ID}" while true; do if [[ $(openstack loadbalancer l7rule show --format value --column provisioning_status ${L7_POLICY1_ID} ${L7_RULE1_ID}) == ACTIVE ]]; then break fi - echo "waiting for ${L7_RULE1_ID}" + echo -n "." + sleep 2 done - -openstack loadbalancer l7rule show ${L7_POLICY1_ID} ${L7_RULE1_ID} +echo L7_POLICY2_ID=$(openstack loadbalancer l7policy create --action REDIRECT_TO_POOL \ --redirect-pool ${lb}-pool --name ${lb}-l7policy2 --format value --column id ${lb}-listener) +echo -n "waiting for ${lb}-l7policy2" while true; do if [[ $(openstack loadbalancer l7policy show ${L7_POLICY2_ID} --format value --column provisioning_status) == ACTIVE ]]; then break fi - echo "waiting for ${lb}-l7policy2" + echo -n "." + sleep 2 done - -openstack loadbalancer l7policy show ${L7_POLICY2_ID} +echo L7_RULE2_ID=$(openstack loadbalancer l7rule create --compare-type STARTS_WITH --type PATH \ --value /images --format value --column id ${L7_POLICY2_ID}) +echo -n "waiting for ${L7_RULE2_ID}" while true; do if [[ $(openstack loadbalancer l7rule show --format value --column provisioning_status ${L7_POLICY2_ID} ${L7_RULE2_ID}) == ACTIVE ]]; then break fi - echo "waiting for ${L7_RULE2_ID}" + echo -n "." + sleep 2 done +echo -openstack loadbalancer l7rule show ${L7_POLICY2_ID} ${L7_RULE2_ID} +echo "Load balancer is active" diff --git a/openstack/tools/create_sg_log.sh b/openstack/tools/create_sg_log.sh index bb527771..39f9465f 100755 --- a/openstack/tools/create_sg_log.sh +++ b/openstack/tools/create_sg_log.sh @@ -1,6 +1,5 @@ #!/bin/bash -u openstack network loggable resources list openstack network log create --resource-type security_group \ - --description "Collecting all security events" \ - --enable --event ALL Log_Created - + --description "Collecting all security events" \ + --enable --event ALL Log_Created \ No newline at end of file diff --git a/openstack/tools/delete_project.sh b/openstack/tools/delete_project.sh index 2f63bd31..1d799ecc 100755 --- a/openstack/tools/delete_project.sh +++ b/openstack/tools/delete_project.sh @@ -10,9 +10,12 @@ openstack floating ip list --project $project_name --project-domain $domain -c I openstack router unset --external-gateway ${project_name}-router & -readarray -t ports<<<`openstack port list --router ${project_name}-router -c id -c device_owner -f value| awk '$2=="network:ha_router_replicated_interface" {print $1}'` -((${#ports[@]})) && [ -n "${ports[0]}" ] || \ - readarray -t ports<<<`openstack port list --router ${project_name}-router -c id -c device_owner -f value| awk '$2=="network:router_interface_distributed" {print $1}'` +readarray -t ports<<<`openstack port list --router ${project_name}-router -c id -c device_owner -f value| \ + awk '$2=="network:ha_router_replicated_interface" {print $1}'` +if ! ((${#ports[@]})) && [ -n "${ports[0]}" ]; then + readarray -t ports<<<`openstack port list --router ${project_name}-router -c id -c device_owner -f value| \ + awk '$2=="network:router_interface_distributed" {print $1}'` +fi declare -A subnets=() if ((${#ports[@]})) && [ -n "${ports[0]}" ]; then diff --git a/openstack/tools/enable_samltestid.sh b/openstack/tools/enable_samltestid.sh index 11ebbc88..b4c5bd8b 100755 --- a/openstack/tools/enable_samltestid.sh +++ b/openstack/tools/enable_samltestid.sh @@ -50,8 +50,8 @@ juju attach-resource keystone-saml-mellon idp-metadata=./$IDP_XML status='foo' while [[ $status != 'active' ]]; do - sleep 3 - status=$(juju status keystone-saml-mellon --format json | jq -r '."applications"."keystone-saml-mellon"."application-status"."current"') + sleep 3 + status=$(juju status keystone-saml-mellon --format json | jq -r '."applications"."keystone-saml-mellon"."application-status"."current"') done juju $JUJU_RUN_CMD --format=json keystone-saml-mellon/0 get-sp-metadata > sp-metadata.json @@ -60,14 +60,14 @@ if [ $JUJU_VERSION -eq 2 ]; then cat sp-metadata.json | jq -r '."unit-keystone-saml-mellon-0".results.output' > sp-metadata.xml else cat sp-metadata.json | jq -r '."keystone-saml-mellon/0".results.output' > sp-metadata.xml -fi +fi juju attach-resource test-saml-idp1 sp-metadata=./sp-metadata.xml status='foo' while [[ $status != 'active' ]]; do - sleep 3 - status=$(juju status test-saml-idp1 --format json | jq -r '."applications"."test-saml-idp1"."application-status"."current"') + sleep 3 + status=$(juju status test-saml-idp1 --format json | jq -r '."applications"."test-saml-idp1"."application-status"."current"') done ENTITY_ID=$(egrep -o "entityID=\"(.*)\"" idp-metadata.xml | cut -d "=" -f2 | cut -d '"' -f2) diff --git a/openstack/tools/float_all.sh b/openstack/tools/float_all.sh index e40ef704..0a1cd5ae 100755 --- a/openstack/tools/float_all.sh +++ b/openstack/tools/float_all.sh @@ -4,7 +4,7 @@ echo " + Floating all instances." -function get_ip_f() { +function get_ip_f { # Get first unallocated floating IP openstack floating ip list | awk '/None/ { print $4; exit }' } diff --git a/openstack/tools/func_test_tools/__init__.py b/openstack/tools/func_test_tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack/tools/func_test_tools/common.py b/openstack/tools/func_test_tools/common.py new file mode 100644 index 00000000..1a556d8b --- /dev/null +++ b/openstack/tools/func_test_tools/common.py @@ -0,0 +1,117 @@ +""" Common helpers for func test runners. """ +from functools import cached_property +import os + +import yaml + + +class ZOSCIConfig(): + """ Extract information from zosci-config """ + def __init__(self, path): + self.path = path + + @cached_property + def project_templates(self): + """ + Generator returning each project-template defined. + """ + with open(os.path.join(self.path, 'zuul.d/project-templates.yaml'), + encoding='utf-8') as fd: + yield from yaml.safe_load(fd) + + def get_branch_jobs(self, branch, project_templates): + """ + For a given branch name, find all jobs that need to be run against that + branch. + """ + test_jobs = [] + for t in self.project_templates: + t = t['project-template'] + + # only look at functional test jobs + if 'functional' not in t['name']: + continue + + if t['name'] not in project_templates: + continue + + if 'check' not in t or 'jobs' not in t['check']: + continue + + for jobs in t['check']['jobs']: + if not isinstance(jobs, dict): + test_jobs.append(jobs) + continue + + for job, info in jobs.items(): + if t['name'] == 'charm-functional-jobs': + if branch not in info['branches']: + continue + + test_jobs.append(job) + + return test_jobs + + +class OSCIConfig(): + """ Extract information from osci.yaml """ + def __init__(self): + path = os.path.join(os.environ.get('CHARM_ROOT_PATH', ''), 'osci.yaml') + with open(path, encoding='utf-8') as fd: + self._osci_config = yaml.safe_load(fd) + + @cached_property + def project_templates(self): + """ Returns all project templates. """ + for item in self._osci_config: + if 'project' not in item: + continue + + return item['project'].get('templates', []) + + return [] + + @cached_property + def project_check_jobs(self): + """ Generator returning all project check jobs defined. """ + for item in self._osci_config: + if 'project' not in item: + continue + + if 'check' not in item['project']: + continue + + yield from item['project']['check'].get('jobs', []) + + @property + def jobs(self): + """ Generator returning all job definitions. """ + for item in self._osci_config: + if 'job' in item: + yield item['job'] + + def get_job(self, name): + """ Get job by name. + + @param name: string name + """ + for job in self.jobs: + if job['name'] == name: + return job + + return None + + def get_project_check_job(self, name): + """ Get job by name from project.check.jobs. Return can be string name + or dict. + + @param name: string name + """ + for job in self.project_check_jobs: + if isinstance(job, dict): + if name in job: + return job + elif job == name: + return job + + return None diff --git a/openstack/tools/func_test_tools/common.sh b/openstack/tools/func_test_tools/common.sh new file mode 100644 index 00000000..56c5a2a1 --- /dev/null +++ b/openstack/tools/func_test_tools/common.sh @@ -0,0 +1,75 @@ +destroy_zaza_models () +{ + if $(juju destroy-model --help| grep -q "no-prompt"); then + j3=true + else + j3=false + fi + for model in $(juju list-models| egrep -o "^zaza-\S+"|tr -d '*'); do + if $j3; then + juju destroy-model --no-prompt --force --no-wait \ + --destroy-storage $model || true + else + juju destroy-model --yes --force --no-wait --destroy-storage \ + $model || true + fi + done +} + +get_and_update_repo () +{ + url=$1 + name=$(basename $url) + path=${2:-$HOME} + ( + cd $path + if [[ -d $name ]]; then + cd $name + git checkout master + git pull + else + git clone $url + fi + ) +} + +apply_func_test_pr () +{ + # Similar to https://github.com/openstack-charmers/zosci-config/blob/master/roles/handle-func-test-pr/tasks/main.yaml#L19 + local pr_id=$1 + # We use the zosci-config tools to do this. + local msg=$(echo "Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/$pr_id"| base64) + ~/zosci-config/roles/handle-func-test-pr/files/process_func_test_pr.py \ + -f './test-requirements*.txt' \ + -f './src/test-requirements*.txt' \ + "$msg" +} + +allocate_port () +{ + # Returns address of port created. + # + local net_name=$1 + local port_name=$2 + local port_id=$(openstack port create --network $net_name $port_name -c id -f value) + openstack port show -c fixed_ips $port_id -f yaml| yq .fixed_ips[0].ip_address +} + +create_zaza_vip () +{ + # Allocates a vip ensuring to use existing ones if they exist. + # + # Returns the address of the vip. + # + local vip_id=$1 + # We use the same naming convention as ../tools/allocate_vips.sh to avoid conflicts and re-use + # those vips. + vip_port_name=ps6-vip-ip$vip_id + vip_addr=$(openstack port show -c fixed_ips $vip_port_name -f yaml| yq .fixed_ips[0].ip_address) + if [[ $vip_addr = null ]]; then + # Pre-allocate ports with addresses used for VIPs so that they don't + # collide with the deployment itself. + vip_addr=$(allocate_port net_${OS_USERNAME}-psd $vip_port_name) + fi + echo $vip_addr +} diff --git a/openstack/tools/func_test_tools/extract_job_target.py b/openstack/tools/func_test_tools/extract_job_target.py new file mode 100644 index 00000000..f917d18e --- /dev/null +++ b/openstack/tools/func_test_tools/extract_job_target.py @@ -0,0 +1,33 @@ +""" +If a job has an accompanying vars section that specifies a tox command with +target names we need to run those instead of the job name. +""" +import re +import sys + +from common import OSCIConfig # pylint: disable=import-error + + +def extract_job_target(testjob): + """ + Some jobs map directly to target names and some needs to be de-refenced by + looking for the job definition and extracting the target from the tox + command. Returns jobname if no dereference available. + + @param job: job name + """ + osci = OSCIConfig() + job = osci.get_job(testjob) + if not job or 'vars' not in job or 'tox_extra_args' not in job['vars']: + return testjob + + ret = re.search(r"(?:--)?\s*(.+)", + str(job['vars']['tox_extra_args'])) + if not ret: + return testjob + + return ret.group(1) + + +if __name__ == "__main__": + print(extract_job_target(sys.argv[1])) diff --git a/openstack/tools/func_test_tools/identify_charm_func_test_jobs.py b/openstack/tools/func_test_tools/identify_charm_func_test_jobs.py new file mode 100644 index 00000000..28f3913a --- /dev/null +++ b/openstack/tools/func_test_tools/identify_charm_func_test_jobs.py @@ -0,0 +1,60 @@ +""" +Get names of test jobs that OSCI would run for the given charm. Should be +run from within the charm root. + +Outputs space separated list of job names. +""" +import configparser +import os + +from common import ZOSCIConfig, OSCIConfig # pylint: disable=import-error + + +def get_local_jobs_and_deps(jobs): + """ + Get any locally defined jobs and add them to the list of jobs provided. + + @param jobs: list of already identified jobs. + """ + deps = [] + local_jobs = [] + osci = OSCIConfig() + project_check_jobs = list(osci.project_check_jobs) + all_jobs = project_check_jobs + jobs + for jobname in all_jobs: + if isinstance(jobname, dict): + jobname = list(jobname.keys())[0] + + job = osci.get_job(jobname) + if not job: + continue + + local_jobs.append(jobname) + + # Some jobs will depend on other tests that need to be run but + # are not defined in tests.yaml so we need to add them from + # here as well. + for name in job.get('dependencies', []): + if name in project_check_jobs: + deps.append(name) + + return deps + jobs + local_jobs + + +def get_default_jobs(): + """ + Get all jobs we need to run by default for the given branch. + """ + path = os.path.join(os.environ['HOME'], 'zosci-config') + c = configparser.ConfigParser() + c.read('.gitreview') + branch = c['gerrit'].get('defaultbranch', 'master') + osci = OSCIConfig() + jobs = ZOSCIConfig(path).get_branch_jobs(branch, osci.project_templates) + return jobs + + +if __name__ == "__main__": + _jobs = get_default_jobs() + _jobs = get_local_jobs_and_deps(list(set(_jobs))) + print(' '.join(sorted(set(_jobs)))) diff --git a/openstack/tools/func_test_tools/manual_functests_runner.sh b/openstack/tools/func_test_tools/manual_functests_runner.sh new file mode 100755 index 00000000..78611249 --- /dev/null +++ b/openstack/tools/func_test_tools/manual_functests_runner.sh @@ -0,0 +1,55 @@ +#!/bin/bash -eu + +# This file assists the main charmed_functest_runner script but can also be invoked separately +# by passing the target (jammy-antelope, focal-yoga, etc) to run and a sleep timer between the +# configure and test run. If run manually, the .charm file must exist in the source code folder +# and the environment variables need to have been exported prior to invoking this script. +# +# What this script does is run the functions functest-deploy, functest-configure and +# functest-test separately one after the other, instead of the entire suite run in the +# same command that the command functest-target does. +# +# The main advantages of this is that it is easier for debugging, and it can also help +# when there are race conditions running the charm test (by using the sleep parameter). +# +# Ideally, all those issues should be worked out in zaza, but having this alternative +# makes it easier for debugging, testing, and validating the race condition. + +TARGET=$1 +SLEEP=$2 +INIT_NOOP_TARGET=$3 + +if [[ $INIT_NOOP_TARGET = true ]]; then + tox -re func-noop +fi + +juju add-model test-$TARGET --no-switch + +# Those below are the parameters that are used when functest-target creates a model named "zaza-" + +juju model-config -m test-$TARGET test-mode=true transmit-vendor-metrics=false enable-os-upgrade=false default-series=jammy automatically-retry-hooks=false + +source ./.tox/func-noop/bin/activate + +functest-deploy -b tests/bundles/$TARGET.yaml -m test-$TARGET + +juju status -m test-$TARGET + +functest-configure -m test-$TARGET + +juju status -m test-$TARGET + +echo "Sleeping for $SLEEP seconds" + +sleep $SLEEP + +echo "Woke up" + +juju status -m test-$TARGET + +functest-test -m test-$TARGET + +juju status -m test-$TARGET + +echo "Finished $TARGET" + diff --git a/openstack/tools/func_test_tools/test_is_voting.py b/openstack/tools/func_test_tools/test_is_voting.py new file mode 100644 index 00000000..f735c310 --- /dev/null +++ b/openstack/tools/func_test_tools/test_is_voting.py @@ -0,0 +1,65 @@ +""" +Takes a func test target name as input. + + - Exit return code 0 == voting + - Exit return code 1 == non-voting +""" +import os +import sys + +from common import ( # pylint: disable=import-error + OSCIConfig, + ZOSCIConfig, +) + + +def is_job_voting(job, name): + """ + Jobs are voting by default so only return False if there is a match and it + is a dict with voting=False. + """ + if isinstance(job, dict): + if name in job: + return job[name].get('voting', True) + + return True + + +def is_test_voting(): + """ + Exit with 1 if test is non-voting otherwise 0. + """ + test_job = sys.argv[1] + # First look for the func-target in osci + if os.path.exists('osci.yaml'): + osci_config = OSCIConfig() + try: + job = osci_config.get_project_check_job(test_job) + if not is_job_voting(job, test_job): + sys.exit(1) + + # default is true + except KeyError as exc: + sys.stderr.write(f"ERROR: failed to process osci.yaml - assuming " + f"{test_job} is voting (key {exc} not found)." + "\n") + + # If the target was not found in osci.yaml then osci will fallback to zosci + project_template = 'charm-functional-jobs' + for template in osci_config.project_templates: + if 'functional' in template: + project_template = template + + path = os.path.join(os.environ['HOME'], 'zosci-config') + for project in ZOSCIConfig(path).project_templates: + t = project['project-template'] + if ('functional' not in t['name'] or t['name'] != project_template): + continue + + for job in t['check']['jobs']: + if not is_job_voting(job, test_job): + sys.exit(1) + + +if __name__ == "__main__": + is_test_voting() diff --git a/openstack/tools/install_local_ca.sh b/openstack/tools/install_local_ca.sh index 79ef0807..32e68bbd 100755 --- a/openstack/tools/install_local_ca.sh +++ b/openstack/tools/install_local_ca.sh @@ -18,12 +18,12 @@ if ((`juju status --format=json| jq -r '.applications[]| select(."charm-name"==" model_ca_cert_path=/tmp/stsstack-bundles.ssl.$model_uuid if ! validate_or_remove_ca ${model_ca_cert_path}; then - echo "Fetching CA cert from vault" 1>&2 - juju $JUJU_RUN_CMD --format=json vault/leader get-root-ca| jq -r .[].results.output > $model_ca_cert_path - if ! validate_or_remove_ca $model_ca_cert_path; then - echo "Didn't get a certificate from vault, check it's status and if necessary use ./tools/vault-unseal-and-authorise.sh" 1>&2 - exit 1 - fi + echo "Fetching CA cert from vault" 1>&2 + juju $JUJU_RUN_CMD --format=json vault/leader get-root-ca| jq -r .[].results.output > $model_ca_cert_path + if ! validate_or_remove_ca $model_ca_cert_path; then + echo "Didn't get a certificate from vault, check it's status and if necessary use ./tools/vault-unseal-and-authorise.sh" 1>&2 + exit 1 + fi fi elif [ -n "`juju config keystone ssl_cert`" ]; then MOD_DIR=$(dirname $0)/.. diff --git a/openstack/tools/instance_launch.sh b/openstack/tools/instance_launch.sh index ada9e523..918559e1 100755 --- a/openstack/tools/instance_launch.sh +++ b/openstack/tools/instance_launch.sh @@ -126,9 +126,9 @@ done # Determining flavor to use if [[ "${image_name}" =~ cirros ]]; then - flavor="m1.cirros" + flavor="m1.cirros" else - flavor="m1.small" + flavor="m1.small" fi # Create instances diff --git a/openstack/tools/openstack_regression_tests_runner.sh b/openstack/tools/openstack_regression_tests_runner.sh new file mode 100755 index 00000000..8b470996 --- /dev/null +++ b/openstack/tools/openstack_regression_tests_runner.sh @@ -0,0 +1,157 @@ +#!/bin/bash -eu +# +# Run Openstack regression tests. +# +FUNC_TEST_PR= +FUNC_TEST_TARGET= +IMAGES_PATH=$HOME/tmp +MODIFY_BUNDLE_CONSTRAINTS=true + +. $(dirname $0)/func_test_tools/common.sh + +usage () { + cat << EOF +USAGE: $(basename $0) OPTIONS + +Run Openstack regression tests. + +OPTIONS: + --func-test-target TARGET_NAME + Provide the name of a specific test target to run. + --func-test-pr PR_ID + Provides similar functionality to Func-Test-Pr in commit message. Set + to zaza-openstack-tests Pull Request ID. + --skip-modify-bundle-constraints + By default we modify test bundle constraints to ensure that applications + have the resources they need. For example nova-compute needs to have + enough capacity to boot the vms required by the tests. + --help + This help message. +EOF +} + +while (($# > 0)); do + case "$1" in + --debug) + set -x + ;; + --func-test-target) + FUNC_TEST_TARGET=$2 + shift + ;; + --func-test-pr) + FUNC_TEST_PR=$2 + shift + ;; + --skip-modify-bundle-constraints) + MODIFY_BUNDLE_CONSTRAINTS=false + ;; + --help|-h) + usage + exit 0 + ;; + *) + echo "ERROR: invalid input '$1'" + usage + exit 1 + ;; + esac + shift +done + +if [[ -z $FUNC_TEST_TARGET ]]; then + echo "ERROR: must provide a target name with --func-test-target" + exit 1 +fi + +# This is required for magnum tests and zaza will look in swift if it is not cached so we need to cache it first. +# Each openstack version uses a different fedora version, for example, focal-ussuri uses 32, +# link for download: https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/32.20201104.3.0/x86_64/fedora-coreos-32.20201104.3.0-openstack.x86_64.qcow2.xz +# All versions can be found in https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/builds.json +mkdir -p $IMAGES_PATH +if [[ ! -f $IMAGES_PATH/fedora-coreos-35.qcow2 ]]; then + wget https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/35.20220424.3.0/x86_64/fedora-coreos-35.20220424.3.0-openstack.x86_64.qcow2.xz -O $IMAGES_PATH/fedora-coreos-35.qcow2.xz + (cd $IMAGES_PATH; xz -d fedora-coreos-35.qcow2.xz; ) +fi + +# Install dependencies +which yq &>/dev/null || sudo snap install yq + +# Ensure zosci-config checked out and up-to-date +get_and_update_repo https://github.com/openstack-charmers/zosci-config + +TOOLS_PATH=$(realpath $(dirname $0))/func_test_tools +CHARM_PATH=$PWD + +echo "Running regression tests" + +source ~/novarc +export {,TEST_}CIDR_EXT=$(openstack subnet show subnet_${OS_USERNAME}-psd-extra -c cidr -f value) +FIP_MAX=$(ipcalc $CIDR_EXT| awk '$1=="HostMax:" {print $2}') +FIP_MIN=$(ipcalc $CIDR_EXT| awk '$1=="HostMin:" {print $2}') +FIP_MIN_ABC=${FIP_MIN%.*} +FIP_MIN_D=${FIP_MIN##*.} +FIP_MIN=${FIP_MIN_ABC}.$(($FIP_MIN_D + 64)) + +# Setup vips needed by zaza tests. +for ((i=2;i;i-=1)); do + export {OS,TEST}_VIP0$((i-1))=$(create_zaza_vip 0$i) +done + +# More information on config https://github.com/openstack-charmers/zaza/blob/master/doc/source/runningcharmtests.rst +export {,TEST_}NET_ID=$(openstack network show net_${OS_USERNAME}-psd-extra -f value -c id) +export {,TEST_}FIP_RANGE=$FIP_MIN:$FIP_MAX +export {,TEST_}GATEWAY=$(openstack subnet show subnet_${OS_USERNAME}-psd-extra -c gateway_ip -f value) +export {,TEST_}NAME_SERVER=91.189.91.131 +export {,TEST_}CIDR_PRIV=192.168.21.0/24 +export {,TEST_}SWIFT_IP=10.140.56.22 +export TEST_MODEL_SETTINGS="image-stream=released;default-series=jammy;test-mode=true;transmit-vendor-metrics=false" +# We need to set TEST_JUJU3 as well as the constraints file +# Ref: https://github.com/openstack-charmers/zaza/blob/e96ab098f00951079fccb34bc38d4ae6ebb38606/setup.py#L47 +export TEST_JUJU3=1 + +# NOTE: this should not be necessary for > juju 2.x but since we still have a need for it we add it in +export TEST_ZAZA_BUG_LP1987332=1 + +# Some charms point to an upstream constraints file that installs python-libjuju 2.x so we need to do this to ensure we get 3.x +export TEST_CONSTRAINTS_FILE=https://raw.githubusercontent.com/openstack-charmers/zaza/master/constraints-juju34.txt + +# NOTE: this is the default applied in zaza-openstack-tests code but setting +# explicitly so we can use locally. +export TEST_TMPDIR=$HOME/tmp +mkdir -p $TEST_TMPDIR + +# required by octavia-tempest-plugin +# go build -a -ldflags '-s -w -extldflags -static' -o test_server.bin octavia_tempest_plugin/contrib/test_server/test_server.go +if [[ ! -f $TEST_TMPDIR/test_server.bin ]]; then + cp $(dirname $0)/tempest_test_resources/test_server.bin $TEST_TMPDIR +fi + +LOGFILE=$(mktemp --suffix=-openstack-release-test-results) +( + # Ensure charmed-openstack-tester checked out and up-to-date + get_and_update_repo https://github.com/openstack-charmers/charmed-openstack-tester + cd ~/charmed-openstack-tester + # Ensure nova-compute has enough resources to create vms in tests. + if $MODIFY_BUNDLE_CONSTRAINTS; then + for f in tests/distro-regression/tests/bundles/*.yaml; do + # Dont do this if the test does not have nova-compute + if $(grep -q "nova-compute:" $f); then + if [[ $(yq '.applications' $f) = null ]]; then + yq -i '.services.nova-compute.constraints="root-disk=40G mem=4G"' $f + else + yq -i '.applications.nova-compute.constraints="root-disk=40G mem=4G"' $f + fi + fi + done + fi + + # If a func test pr is provided switch to that pr. + if [[ -n $FUNC_TEST_PR ]]; then + apply_func_test_pr $FUNC_TEST_PR + fi + + tox -re func-target -- $FUNC_TEST_TARGET || true + model=$(juju list-models| egrep -o "^zaza-\S+"|tr -d '*') +) 2>&1 | tee $LOGFILE +echo -e "\nResults also saved to $LOGFILE" diff --git a/openstack/tools/setup_tempest.sh b/openstack/tools/setup_tempest.sh index cfacc17f..bebddcc5 100755 --- a/openstack/tools/setup_tempest.sh +++ b/openstack/tools/setup_tempest.sh @@ -9,15 +9,15 @@ source ${scriptpath}/../novarc # TODO: remove fallbacks once we move to queens (they are there for clients still on ocata) ext_net=$(openstack network list --name ext_net -f value -c ID 2>/dev/null || openstack network list| awk '$4=="ext_net" {print $2}') router=$(openstack router list --name provider-router -f value -c ID 2>/dev/null || openstack router list| awk '$4=="provider-router" {print $2}') -keystone=$(juju run --unit keystone/0 unit-get private-address) -ncc=$(juju run --unit nova-cloud-controller/0 unit-get private-address) +keystone=$(juju exec --unit keystone/0 unit-get private-address) +ncc=$(juju exec --unit nova-cloud-controller/0 unit-get private-address) http=${OS_AUTH_PROTOCOL:-http} if is_ksv3; then default_domain_id=$(openstack domain list | awk '/default/ {print $2}') else dashboard="localhost" set +e - dashboard_ip=$(juju run --unit openstack-dashboard/0 unit-get private-address) + dashboard_ip=$(juju exec --unit openstack-dashboard/0 unit-get private-address) if [ "$?" == "1" ]; then dashboard=$dashboard_ip fi diff --git a/openstack/tools/tempest_test_resources/test_server.bin b/openstack/tools/tempest_test_resources/test_server.bin new file mode 100755 index 00000000..25c17837 Binary files /dev/null and b/openstack/tools/tempest_test_resources/test_server.bin differ diff --git a/overlays/cos/ceph-csi.yaml b/overlays/cos/ceph-csi.yaml new file mode 100644 index 00000000..1acd94aa --- /dev/null +++ b/overlays/cos/ceph-csi.yaml @@ -0,0 +1,11 @@ +applications: + ceph-csi: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__ceph-csi + # See https://github.com/charmed-kubernetes/ceph-csi-operator/issues/22 + channel: 1.31/stable + options: + provisioner-replicas: 1 + namespace: kube-system +relations: + - [ ceph-csi:ceph-client, ceph-mon:client ] + - [ ceph-csi:kubernetes-info, microk8s ] diff --git a/overlays/cos/charmed-ceph-lxd.yaml b/overlays/cos/charmed-ceph-lxd.yaml new file mode 100644 index 00000000..10854fbc --- /dev/null +++ b/overlays/cos/charmed-ceph-lxd.yaml @@ -0,0 +1,34 @@ +machines: + '0': + constraints: __MACHINE1_CONSTRAINTS__ + series: __SERIES__ + '1': + constraints: __MACHINE2_CONSTRAINTS__ + series: __SERIES__ + '2': + constraints: __MACHINE3_CONSTRAINTS__ + series: __SERIES__ +applications: + ceph-mon: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__ceph-mon + channel: quincy/stable + num_units: __NUM_CEPH_MON_UNITS__ + to: + - 0 + - 1 + - 2 + ceph-osd: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__ceph-osd + channel: quincy/stable + num_units: __NUM_CEPH_OSD_UNITS__ + to: + - 0 + - 1 + - 2 + options: + osd-devices: '' # must be empty string when using juju storage + config-flags: '{"osd": {"osd memory target": 1073741824}}' # matching 2G constraint + storage: + osd-devices: cinder,10G,1 +relations: + - [ ceph-osd:mon, ceph-mon:osd ] diff --git a/overlays/cos/charmed-ceph.yaml b/overlays/cos/charmed-ceph.yaml new file mode 100644 index 00000000..10854fbc --- /dev/null +++ b/overlays/cos/charmed-ceph.yaml @@ -0,0 +1,34 @@ +machines: + '0': + constraints: __MACHINE1_CONSTRAINTS__ + series: __SERIES__ + '1': + constraints: __MACHINE2_CONSTRAINTS__ + series: __SERIES__ + '2': + constraints: __MACHINE3_CONSTRAINTS__ + series: __SERIES__ +applications: + ceph-mon: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__ceph-mon + channel: quincy/stable + num_units: __NUM_CEPH_MON_UNITS__ + to: + - 0 + - 1 + - 2 + ceph-osd: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__ceph-osd + channel: quincy/stable + num_units: __NUM_CEPH_OSD_UNITS__ + to: + - 0 + - 1 + - 2 + options: + osd-devices: '' # must be empty string when using juju storage + config-flags: '{"osd": {"osd memory target": 1073741824}}' # matching 2G constraint + storage: + osd-devices: cinder,10G,1 +relations: + - [ ceph-osd:mon, ceph-mon:osd ] diff --git a/overlays/cos/cos-lite-offers.yaml b/overlays/cos/cos-lite-offers.yaml new file mode 100644 index 00000000..e1ef9852 --- /dev/null +++ b/overlays/cos/cos-lite-offers.yaml @@ -0,0 +1,24 @@ +applications: + alertmanager: + offers: + alertmanager-karma-dashboard: + endpoints: + - karma-dashboard + grafana: + offers: + grafana-dashboards: + endpoints: + - grafana-dashboard + loki: + offers: + loki-logging: + endpoints: + - logging + prometheus: + offers: + prometheus-scrape: + endpoints: + - metrics-endpoint + prometheus-receive-remote-write: + endpoints: + - receive-remote-write diff --git a/overlays/cos/cos-proxy-filebeat.yaml b/overlays/cos/cos-proxy-filebeat.yaml new file mode 100644 index 00000000..48d0f7fc --- /dev/null +++ b/overlays/cos/cos-proxy-filebeat.yaml @@ -0,0 +1,10 @@ +applications: + filebeat: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__filebeat + channel: stable + options: + logpath: /var/log/*.log /var/log/*/*.log /var/log/syslog +relations: + - [ceph-mon:juju-info, filebeat:beats-host] + - [cos-proxy:filebeat, filebeat:logstash] + - [cos-proxy:juju-info, filebeat:beats-host] diff --git a/overlays/cos/cos-proxy.yaml b/overlays/cos/cos-proxy.yaml new file mode 100644 index 00000000..ac30cb59 --- /dev/null +++ b/overlays/cos/cos-proxy.yaml @@ -0,0 +1,18 @@ +machines: + '0': + constraints: __MACHINE1_CONSTRAINTS__ + series: __SERIES__ + '1': + constraints: __MACHINE2_CONSTRAINTS__ + series: __SERIES__ + '2': + constraints: __MACHINE3_CONSTRAINTS__ + series: __SERIES__ +applications: + cos-proxy: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__cos-proxy + channel: stable + num_units: 1 + to: + - 0 + diff --git a/overlays/cos/microceph.yaml b/overlays/cos/microceph.yaml new file mode 100644 index 00000000..1a2c6f5f --- /dev/null +++ b/overlays/cos/microceph.yaml @@ -0,0 +1,20 @@ +machines: + '0': + constraints: __MACHINE1_CONSTRAINTS__ + series: __SERIES__ + '1': + constraints: __MACHINE2_CONSTRAINTS__ + series: __SERIES__ + '2': + constraints: __MACHINE3_CONSTRAINTS__ + series: __SERIES__ +applications: + microceph: + charm: microceph + channel: quincy/stable + num_units: __NUM_CEPH_OSD_UNITS__ + to: + - 0 + - 1 + - 2 + diff --git a/overlays/kubernetes/k8s-containerd.yaml b/overlays/kubernetes/k8s-containerd.yaml index c2c3e09d..8b789ed8 100644 --- a/overlays/kubernetes/k8s-containerd.yaml +++ b/overlays/kubernetes/k8s-containerd.yaml @@ -1,6 +1,10 @@ applications: containerd: charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__containerd + options: + http_proxy: __CONTAINERD_PROXY__ + https_proxy: __CONTAINERD_PROXY__ + no_proxy: __CONTAINERD_NO_PROXY__ relations: - [ 'containerd:containerd', 'kubernetes-worker:container-runtime' ] - [ 'containerd:containerd', 'kubernetes-control-plane:container-runtime' ] diff --git a/overlays/kubernetes/k8s-iam-grafana.yaml b/overlays/kubernetes/k8s-iam-grafana.yaml new file mode 100644 index 00000000..63f6e387 --- /dev/null +++ b/overlays/kubernetes/k8s-iam-grafana.yaml @@ -0,0 +1,11 @@ +applications: + grafana: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__grafana-k8s + scale: 1 + series: focal + storage: + database: kubernetes,1,1024M +relations: + - [grafana:ingress, traefik-public:traefik-route] + - [grafana:oauth, hydra:oauth] + - [grafana:receive-ca-cert, self-signed-certificates:send-ca-cert] diff --git a/overlays/kubernetes/k8s-iam-oidc.yaml b/overlays/kubernetes/k8s-iam-oidc.yaml new file mode 100644 index 00000000..445c7710 --- /dev/null +++ b/overlays/kubernetes/k8s-iam-oidc.yaml @@ -0,0 +1,9 @@ +applications: + kratos-external-idp-integrator: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__kratos-external-idp-integrator + scale: 1 + series: jammy + options: + provider: generic +relations: + - [kratos-external-idp-integrator:kratos-external-idp, kratos:kratos-external-idp] diff --git a/overlays/openstack/vault-openstack-secrets.yaml b/overlays/openstack/vault-openstack-secrets.yaml index 15e35094..aeab8ffe 100644 --- a/overlays/openstack/vault-openstack-secrets.yaml +++ b/overlays/openstack/vault-openstack-secrets.yaml @@ -2,7 +2,5 @@ applications: nova-compute: options: encrypt: True - storage: - ephemeral-device: cinder,50G,1 relations: - ['nova-compute:secrets-storage', 'vault:secrets'] diff --git a/overlays/openstack/watcher-ha.yaml b/overlays/openstack/watcher-ha.yaml new file mode 100644 index 00000000..d0e490e0 --- /dev/null +++ b/overlays/openstack/watcher-ha.yaml @@ -0,0 +1,10 @@ +applications: + watcher: + options: + vip: __VIP__ + watcher-hacluster: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__hacluster + options: + cluster_count: __NUM_WATCHER_UNITS__ +relations: + - [ watcher, watcher-hacluster ] diff --git a/overlays/openstack/watcher.yaml b/overlays/openstack/watcher.yaml new file mode 100644 index 00000000..7e8ebe39 --- /dev/null +++ b/overlays/openstack/watcher.yaml @@ -0,0 +1,16 @@ +# Variables +debug: &debug True +openstack_origin: &openstack_origin __OS_ORIGIN__ + +applications: + watcher: + charm: __CHARM_STORE____CHARM_CS_NS____CHARM_CH_PREFIX__watcher + num_units: __NUM_WATCHER_UNITS__ + constraints: mem=2G + options: + debug: *debug + openstack-origin: *openstack_origin +relations: + - [ watcher:shared-db, __MYSQL_INTERFACE__ ] + - [ watcher:identity-service, keystone:identity-service ] + - [ watcher:amqp, rabbitmq-server:amqp ] diff --git a/overlays/unit_placement/cos-proxy.yaml.template b/overlays/unit_placement/cos-proxy.yaml.template new file mode 100644 index 00000000..56e192e1 --- /dev/null +++ b/overlays/unit_placement/cos-proxy.yaml.template @@ -0,0 +1,3 @@ + cos-proxy: + to: +__UNIT_PLACEMENT_METAL__.__UNITS__.__NUM_COS_PROXY_UNITS__ diff --git a/overlays/unit_placement/microk8s.yaml.template b/overlays/unit_placement/microk8s.yaml.template new file mode 100644 index 00000000..b688e351 --- /dev/null +++ b/overlays/unit_placement/microk8s.yaml.template @@ -0,0 +1,3 @@ + microk8s: + to: +__UNIT_PLACEMENT_METAL__.__UNITS__.__NUM_MICROK8S_UNITS__ diff --git a/pylintrc b/pylintrc new file mode 100644 index 00000000..0ab2bdfc --- /dev/null +++ b/pylintrc @@ -0,0 +1,28 @@ +# This is a templated file and must be kept up-to-date with the original +# from upstream at https://github.com/canonical/se-tooling-ci-common. +[MAIN] +jobs=0 +ignore=.git + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins=pylint.extensions.no_self_use + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +[FORMAT] +max-line-length=79 +# Allow doctrings containing long urls +ignore-long-lines=^\s+.+?$ + +[REPORTS] +#reports=yes +score=yes + +[MESSAGES CONTROL] +disable= + +[DESIGN] +min-public-methods=1 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..c3726e8b --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +pyyaml diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 00000000..898f13dd --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,6 @@ +# This is a templated file and must be kept up-to-date with the original +# from upstream at https://github.com/canonical/se-tooling-ci-common. +bashate +flake8==6.1.0 +flake8-import-order==0.18.2 +pylint==3.1.0 diff --git a/tools/cleanup-cinder-attachments.sh b/tools/cleanup-cinder-attachments.sh new file mode 100755 index 00000000..84521bf9 --- /dev/null +++ b/tools/cleanup-cinder-attachments.sh @@ -0,0 +1,11 @@ +#!/bin/bash -u +for vol in $(openstack volume list| grep juju| grep in-use| awk '{print $2}'); do + echo "Finding attachments for in-use volume $vol" + for server in $(openstack volume attachment list --os-volume-api-version 3.27 --volume-id $vol -c 'Server ID' -f value); do + openstack server show $server && continue + echo "Deleting attachments for volume $vol from (non-existent) server $server" + for id in $(openstack volume attachment list --os-volume-api-version 3.27 --volume-id $vol -c 'ID' -f value); do + openstack volume attachment delete --os-volume-api-version 3.27 $id + done + done +done diff --git a/tools/juju-bundle-applications.py b/tools/juju-bundle-applications.py index 2ac1600d..96e2b376 100755 --- a/tools/juju-bundle-applications.py +++ b/tools/juju-bundle-applications.py @@ -1,14 +1,18 @@ #!/usr/bin/env python3 - +# pylint: disable=invalid-name +""" +Get Juju Bundle or Overlay Applications. +""" import sys -import yaml -application_list = set() +import yaml -for filename in sys.argv[1:]: - with open(filename, 'r') as f: - data = yaml.load_all(f, Loader=yaml.SafeLoader) - for d in data: - if 'applications' in d: - application_list.update(d['applications'].keys()) -print('\n'.join(application_list)) +if __name__ == "__main__": + application_list = set() + for filename in sys.argv[1:]: + with open(filename, 'r', encoding='utf-8') as f: + data = yaml.load_all(f, Loader=yaml.SafeLoader) + for d in data: + if 'applications' in d: + application_list.update(d['applications'].keys()) + print('\n'.join(application_list)) diff --git a/tools/juju-lnav b/tools/juju-lnav index 89e45918..2d48c096 100755 --- a/tools/juju-lnav +++ b/tools/juju-lnav @@ -10,9 +10,20 @@ install_ssh_key() { local unit for unit in $@; do echo "installing ssh key for ${unit}" - cat ~/.ssh/id_rsa.pub \ - | timeout 10 juju ssh ${unit} \ - -- sudo tee --append /root/.ssh/authorized_keys + if [[ -f ~/testkey.pub ]]; then + cat ~/testkey.pub \ + | timeout 10 juju ssh ${unit} \ + -- sudo tee --append /root/.ssh/authorized_keys + else + echo "missing key: ~/testkey.pub" + fi + if [[ -f ~/.ssh/id_rsa.pub ]]; then + cat ~/.ssh/id_rsa.pub \ + | timeout 10 juju ssh ${unit} \ + -- sudo tee --append /root/.ssh/authorized_keys + else + echo "missing key: ~/.ssh/id_rsa.pub" + fi done } diff --git a/tools/parse-bundle.py b/tools/parse-bundle.py index b328ad82..3900ccfd 100755 --- a/tools/parse-bundle.py +++ b/tools/parse-bundle.py @@ -1,18 +1,23 @@ #!/usr/bin/env python3 - +# pylint: disable=invalid-name +""" +Parse bundle information. +""" import argparse -import yaml import re import sys -lpid = r"([~a-z0-9\-]+/)?" -charm = r"([a-z0-9\-]+)" -charm_match = re.compile(r".*cs:{}{}-([0-9]+)\s*$".format(lpid, charm)) -status_match = re.compile("^App.*Version.*Status") -empty_line = re.compile("^\s*$") +import yaml + +lpid_expr = r"([~a-z0-9\-]+/)?" +charm_name_expr = r"([a-z0-9\-]+)" +charm_expr = re.compile(fr".*cs:{lpid_expr}{charm_name_expr}-([0-9]+)\s*$") +status_match = re.compile(r"^App.*Version.*Status") +empty_line = re.compile(r"^\s*$") def parse_arguments(): + """ Parse cli args. """ parser = argparse.ArgumentParser() parser.add_argument( "FILE", @@ -26,6 +31,7 @@ def parse_arguments(): def get_charms(bundle): + """ Get charms from bundle. """ charms = {} for app in bundle['applications']: charms[app] = bundle['applications'][app]['charm'] @@ -33,21 +39,23 @@ def get_charms(bundle): def process_bundle(bundle): + """ Extra charm info from bundle. """ versions_found = False charms = get_charms(bundle) - for app in charms: - ret = charm_match.match(charms[app]) + for appinfo in charms.values(): + ret = charm_expr.match(appinfo) if ret: versions_found = True _charm = ret.group(2) if ret.group(1): - _charm = "{}{}".format(ret.group(1), _charm) + _charm = f"{ret.group(1)}{_charm}" - print(_charm, charms[app]) + print(_charm, appinfo) return versions_found def process_status(model_status): + """ Extract charm versions from model status. """ versions_found = False processing = False for line in model_status: @@ -58,19 +66,19 @@ def process_status(model_status): processing = False continue if processing: - ret = [l.strip() for l in line.split()] - version = ret[1] + ret = line.strip().split() charm = ret[4] store = ret[5] rev = ret[6] if store == 'jujucharms': versions_found = True - print("{} cs:{}-{}".format(charm, charm, rev)) + print(f"{charm} cs:{charm}-{rev}") return versions_found def process(bundle_file, options): + """ Process a bundle. """ bundle = {} versions_found = False # Process revisions file assuming it is an exported bundle in yaml @@ -78,7 +86,8 @@ def process(bundle_file, options): try: bundle = yaml.load(bundle_file, Loader=yaml.SafeLoader) except yaml.scanner.ScannerError: - sys.stderr.write("INFO: input file does not appear to be in YAML format") + sys.stderr.write("INFO: input file does not appear to be in YAML " + "format\n") if 'applications' in bundle: if options.get_charms: versions_found = process_bundle(bundle) @@ -91,19 +100,15 @@ def process(bundle_file, options): versions_found = process_status(model_status) if not versions_found: - sys.stderr.write("WARNING: no valid charm revisions found in {}\n\n". - format(bundle_file.name)) - - -def main(): - options = parse_arguments() - if options.FILE == "-": - with sys.stdin as bundle: - process(bundle, options) - else: - with open(options.FILE) as bundle: - process(bundle, options) + sys.stderr.write(f"WARNING: no valid charm revisions found in " + f"{bundle_file.name}\n\n") if __name__ == "__main__": - main() + _options = parse_arguments() + if _options.FILE == "-": + with sys.stdin as _bundle: + process(_bundle, _options) + else: + with open(_options.FILE, encoding='utf-8') as _bundle: + process(_bundle, _options) diff --git a/tools/stack-manager/mark-vms-managed.sh b/tools/stack-manager/mark-vms-managed.sh index 722c3c79..d69ac5b8 100755 --- a/tools/stack-manager/mark-vms-managed.sh +++ b/tools/stack-manager/mark-vms-managed.sh @@ -2,7 +2,9 @@ . `dirname $0`/common.sh vms=( $@ ) -(("${#vms[@]}")) || { echo "ERROR: no vms provided"; exit 1; } +if (("${#vms[@]}" == 0)); then + echo "ERROR: no vms provided"; exit 1; +fi openstack object save stack-manager-${OS_PROJECT_NAME} --file $STAGING_DIR/managed_vms.json managed_vms.json echo "Marking ${#vms[@]} vms as managed=true" diff --git a/tools/stack-manager/mark-vms-unmanaged.sh b/tools/stack-manager/mark-vms-unmanaged.sh index d52c1820..c7470bde 100755 --- a/tools/stack-manager/mark-vms-unmanaged.sh +++ b/tools/stack-manager/mark-vms-unmanaged.sh @@ -2,7 +2,9 @@ . `dirname $0`/common.sh vms=( $@ ) -(("${#vms[@]}")) || { echo "ERROR: no vms provided"; exit 1; } +if (("${#vms[@]}" == 0)); then + echo "ERROR: no vms provided"; exit 1; +fi openstack object save stack-manager-${OS_PROJECT_NAME} --file $STAGING_DIR/managed_vms.json managed_vms.json echo "Marking ${#vms[@]} vms as managed=false" diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..71bdda51 --- /dev/null +++ b/tox.ini @@ -0,0 +1,92 @@ +# This is a templated file and must be kept up-to-date with the original +# from upstream at https://github.com/canonical/se-tooling-ci-common. +[tox] +skipsdist = True +envlist = bashate,pep8,pylint +minversion = 3.18.0 + +[flake8] +# H106: Don't put vim configuration in source files +# H203: Use assertIs(Not)None to check for None +# H204: Use assert(Not)Equal to check for equality +# H205: Use assert(Greater|Less)(Equal) for comparison +# H904: Delay string interpolations at logging calls +enable-extensions = H106,H203,H204,H205,H904 +show-source = true +exclude = +import-order-style = pep8 + +[testenv] +basepython = {env:TOX_PYTHON:python3} +pyfiles = + {toxinidir}/openstack/tools/func_test_tools + {toxinidir}/tools/parse-bundle.py + {toxinidir}/tools/juju-bundle-applications.py + +bashfiles = + # find -name configure| grep -v .git| sed -rn 's,^.,{toxinidir},p' + {toxinidir}/cos/configure + + {toxinidir}/tools/juju-lnav + # find tools -name \*.sh + {toxinidir}/tools/vault-unseal-and-authorise.sh + {toxinidir}/tools/mongo-access.sh + {toxinidir}/tools/lint-git-messages.sh + {toxinidir}/tools/model-poweron.sh + {toxinidir}/tools/stack-manager/mark-model-vms-unmanaged.sh + {toxinidir}/tools/stack-manager/show-all.sh + {toxinidir}/tools/stack-manager/common.sh + {toxinidir}/tools/stack-manager/show-unmanaged.sh + {toxinidir}/tools/stack-manager/mark-vms-unmanaged.sh + {toxinidir}/tools/stack-manager/mark-vms-managed.sh + {toxinidir}/tools/stack-manager/mark-model-vms-managed.sh + {toxinidir}/tools/stack-manager/show-managed.sh + {toxinidir}/tools/model-poweroff.sh + + {toxinidir}/common/generate_bundle_base + # find common -name \*.sh + {toxinidir}/common/ch_channel_map/test-channel-map.sh + {toxinidir}/common/ch_channel_map/test-all.sh + {toxinidir}/common/generate-bundle.sh + + {toxinidir}/openstack/novarc + # find openstack/tools/ -name \*.sh| xargs -I{} echo " {toxinidir}/{}" + {toxinidir}/openstack/tools/func_test_tools/manual_functests_runner.sh + {toxinidir}/openstack/tools/func_test_tools/common.sh + {toxinidir}/openstack/tools/create_octavia_lb.sh + {toxinidir}/openstack/tools/float_all.sh + {toxinidir}/openstack/tools/vault-unseal-and-authorise.sh + {toxinidir}/openstack/tools/install_local_ca.sh + {toxinidir}/openstack/tools/create_nova_az_aggregates.sh + {toxinidir}/openstack/tools/enable_samltestid.sh + {toxinidir}/openstack/tools/delete_project.sh + {toxinidir}/openstack/tools/charmed_openstack_functest_runner.sh + {toxinidir}/openstack/tools/create_ipv4_octavia.sh + {toxinidir}/openstack/tools/configure_octavia.sh + {toxinidir}/openstack/tools/create_sg_log.sh + {toxinidir}/openstack/tools/setup_tempest.sh + {toxinidir}/openstack/tools/instance_launch.sh + {toxinidir}/openstack/tools/create_project.sh + {toxinidir}/openstack/tools/allocate_vips.sh + {toxinidir}/openstack/tools/upload_octavia_amphora_image.sh + {toxinidir}/openstack/tools/sec_groups.sh + {toxinidir}/openstack/tools/create-microceph-vm.sh + {toxinidir}/openstack/tools/upload_image.sh + {toxinidir}/openstack/tools/openstack_regression_tests_runner.sh + + +setenv = + PYTHONHASHSEED=0 +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:pep8] +commands = flake8 -v {posargs:{[testenv]pyfiles}} + +[testenv:pylint] +commands = pylint -v --rcfile={toxinidir}/pylintrc {posargs:{[testenv]pyfiles}} + +[testenv:bashate] +commands = bashate --ignore E006 --verbose {posargs:{[testenv]bashfiles}} +