diff --git a/calm-integrations/calm-dr-vm-tracking-scripts/esxi-pre-migration-script.py b/calm-integrations/calm-dr-vm-tracking-scripts/esxi-pre-migration-script.py new file mode 100644 index 0000000..14c2054 --- /dev/null +++ b/calm-integrations/calm-dr-vm-tracking-scripts/esxi-pre-migration-script.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os +import requests +import json + +from calm.common.flags import gflags + +import calm.lib.model as model + +from helper import init_contexts, log, is_category_key_present, create_category_key, create_category_value, add_category_to_vm, get_application_uuids + + +REQUIRED_ATTRS = ['DEST_PC_IP', 'DEST_PC_USER', 'DEST_PC_PASS', 'SOURCE_PROJECT_NAME', 'SOURCE_PC_IP', 'SOURCE_PC_USER', 'SOURCE_PC_PASSWORD'] +msg = "" +for attr in REQUIRED_ATTRS: + if attr not in os.environ: + msg = msg + attr + ", " +if msg: + raise Exception("Please export {}". format(msg)) + +#constants +PC_PORT = 9440 +SOURCE_PC_IP = os.environ['SOURCE_PC_IP'] +SOURCE_PROJECT_NAME = os.environ['SOURCE_PROJECT_NAME'] + + +DELETED_STATE = 'deleted' +VMWARE_VM = "VMWARE_VM" +ESXI_HYPERVISOR_TYPE = "ESX" +PROTECTED = "UNPROTECTED" +LENGTH = 100 +DR_KEY = "VM_VCENTER_UUID" +headers = {'content-type': 'application/json', 'Accept': 'application/json'} + +source_base_url = "https://{}:{}/api/nutanix/v3".format(SOURCE_PC_IP, str(PC_PORT)) +source_pc_auth = {"username": os.environ['SOURCE_PC_USER'], "password": os.environ['SOURCE_PC_PASSWORD']} + +dest_base_url = "https://{}:{}/api/nutanix/v3".format(os.environ['DEST_PC_IP'], str(PC_PORT)) +dest_pc_auth = {"username": os.environ['DEST_PC_USER'], "password": os.environ['DEST_PC_PASS']} + +SYS_DEFINED_CATEGORY_KEY_LIST = [ + "ADGroup", + "AnalyticsExclusions", + "AppTier", + "AppType", + "CalmApplication", + "CalmDeployment", + "CalmService", + "CalmPackage", + "Environment", + "OSType", + "Quaratine", + "CalmVmUniqueIdentifier", + "CalmUser", + "account_uuid", + "TemplateType", + "VirtualNetworkType" +] + + +# Step -1: +""" +Get the map of vm_uuid to Vcenter uuid using mh_vms api +""" +def get_mh_vms_list(base_url, auth, offset): + method = 'POST' + url = base_url + "/mh_vms/list" + payload = {"length": LENGTH, "offset": offset} + resp = requests.request( + method, + url, + data=json.dumps(payload), + headers=headers, + auth=(auth["username"], auth["password"]), + verify=False + ) + if resp.ok: + resp_json = resp.json() + return resp_json["entities"], resp_json["metadata"]["total_matches"] + else: + log.info("Failed to get mh_vms list.") + log.info('Status code: {}'.format(resp.status_code)) + log.info('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4))) + raise Exception("Failed to get mh_vms list.") + + +def get_vm_vcenter_uuid_pc_uuid_map(base_url, pc_auth): + res = {} + total_matches = 1 + offset = 0 + while offset < total_matches: + entities, total_matches = get_mh_vms_list(base_url, pc_auth, offset) + for entity in entities: + if entity["status"]["resources"]["hypervisor_type"] == ESXI_HYPERVISOR_TYPE and entity["status"]["resources"]["protection_type"] == PROTECTED: + res[entity["status"]["resources"]["hypervisor_specific_id"]] = entity["metadata"]["uuid"] + offset += LENGTH + return res + + +# Step -2: +""" +Iterate over all the vms in the source project and create a category + name : VMVCenterUuid + value : Vm uuid on vcenter + +And Create all vm categories on destination setup +""" + +def create_dr_categories_on_source_pc_and_update_vm(): + + log.info("Creating Dr categories on source pc") + dr_key_present = is_category_key_present(source_base_url, source_pc_auth, DR_KEY) + if not dr_key_present: + create_category_key(source_base_url, source_pc_auth, DR_KEY) + + vm_uuid_map = get_vm_vcenter_uuid_pc_uuid_map(source_base_url, source_pc_auth) + for vcenter_vm_uuid, pc_vm_uuid in vm_uuid_map.items(): + create_category_value(source_base_url, source_pc_auth, DR_KEY, vcenter_vm_uuid) + add_category_to_vm(source_base_url, source_pc_auth, pc_vm_uuid, DR_KEY, vcenter_vm_uuid) + + +# Step-3 +""" + -> Create the DR key on source and destination setup + -> Iterate over all the applications, and get the vm used in substrates + -> Get the vcenter_vm_uuid and using mh_vms/list , get the pc_vm_uuid + -> Create Category key for DR_key: Vcenter_uuid on source/destination setup + -> Add the category to given vm +""" + +def create_categories(): + + log.info("Creating categories/values") + + dr_key_present = is_category_key_present(source_base_url, source_pc_auth, DR_KEY) + if not dr_key_present: + create_category_key(source_base_url, source_pc_auth, DR_KEY) + + dr_key_present = is_category_key_present(dest_base_url, dest_pc_auth, DR_KEY) + if not dr_key_present: + create_category_key(dest_base_url, dest_pc_auth, DR_KEY) + + init_contexts() + vm_uuid_map = get_vm_vcenter_uuid_pc_uuid_map(source_base_url, source_pc_auth) + application_uuid_list = get_application_uuids(SOURCE_PROJECT_NAME) + for app_uuid in application_uuid_list: + application = model.Application.get_object(app_uuid) + if application.state != DELETED_STATE: + for dep in application.active_app_profile_instance.deployments: + if dep.substrate.type == VMWARE_VM: + for element in dep.substrate.elements: + vcenter_vm_uuid = str(element.instance_id) + if vcenter_vm_uuid not in vm_uuid_map: + continue + + # Create category value for hypervisor specific attributes at source pc, dest_pc and udpate vm with it + pc_vm_uuid = vm_uuid_map[vcenter_vm_uuid] + create_category_value(source_base_url, source_pc_auth, DR_KEY, vcenter_vm_uuid) + create_category_value(dest_base_url, dest_pc_auth, DR_KEY, vcenter_vm_uuid) + add_category_to_vm(source_base_url, source_pc_auth, pc_vm_uuid, DR_KEY, vcenter_vm_uuid) + + +def main(): + try: + create_categories() + except Exception as e: + log.info("Exception: %s" % e) + raise +if __name__ == '__main__': + main() diff --git a/calm-integrations/calm-dr-vm-tracking-scripts/esxi_post_migration_script.py b/calm-integrations/calm-dr-vm-tracking-scripts/esxi_post_migration_script.py new file mode 100644 index 0000000..da7ffa0 --- /dev/null +++ b/calm-integrations/calm-dr-vm-tracking-scripts/esxi_post_migration_script.py @@ -0,0 +1,322 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import os +import json +from copy import deepcopy + +from calm.common.flags import gflags + +from aplos.insights.entity_capability import EntityCapability +from calm.lib.model.substrates.vmware import VcenterSubstrateElement +from helper import change_project_of_vmware_dr_apps, init_contexts, log, get_vm_source_dest_uuid_map, get_mh_vm +from calm.lib.model.tasks.vmware import VcenterVdiskInfo, VcenterVControllerInfo, VcenterNicInfo, VcenterFolderInfo, VcenterTagInfo +from calm.lib.model.store.db_session import flush_session +from calm.common.api_helpers.vmware_helper import get_vmware_resources + +import calm.lib.model as model + + +REQUIRED_ATTRS = ['DEST_PC_IP', 'DEST_PC_USER', 'DEST_PC_PASS', 'SOURCE_PROJECT_NAME', 'DEST_ACCOUNT_NAME', 'DEST_PROJECT_NAME'] +msg = "" +for attr in REQUIRED_ATTRS: + if attr not in os.environ: + msg = msg + attr + ", " +if msg: + raise Exception("Please export {}". format(msg)) + +# constants +PC_PORT = 9440 +LENGTH = 100 +DR_KEY = "VM_VCENTER_UUID" +DEST_PROJECT = os.environ['DEST_PROJECT_NAME'] +SRC_PROJECT = os.environ['SOURCE_PROJECT_NAME'] +TEMPLATE_NAME = os.environ['TEMPLATE_NAME'] +_TEMPLATE_UUID = "" + +dest_base_url = "https://{}:{}/api/nutanix/v3".format(os.environ['DEST_PC_IP'], str(PC_PORT)) +dest_pc_auth = {"username": os.environ['DEST_PC_USER'], "password": os.environ['DEST_PC_PASS']} + + + +def get_vcenter_details(account_name): + vcenter_details = {} + account = model.Account.query(name=account_name) + if account: + account = account[0] + vcenter_details["server"] = account.data.server + vcenter_details["username"] = account.data.username + vcenter_details["password"] = account.data.password.blob + vcenter_details["port"] = account.data.port + vcenter_details["datacenter"] = account.data.datacenter + vcenter_details["account_uuid"] = str(account.uuid) + else: + raise Exception("Could not find sepecified account {}".format(account_name)) + return vcenter_details + + +def get_template_id(vcenter_details): + global _TEMPLATE_UUID + if not _TEMPLATE_UUID: + tmp_data = get_vmware_resources('template', {"account_uuid": vcenter_details["account_uuid"]}) + tmp_list = json.loads(tmp_data[0]) + tmp_map = { + _t["name"]: _t["config.instanceUuid"] for _t in tmp_list + } + _TEMPLATE_UUID = tmp_map.get("TEMPLATE_NAME") + return _TEMPLATE_UUID + + +def get_vm_path(content, vm_name): + """ + Function to find the path of virtual machine. + Args: + content: VMware content object + vm_name: virtual machine managed object + + Returns: Folder of virtual machine if exists, else None + """ + folder_name = None + folder = vm_name.parent + if folder: + folder_name = folder.name + fp = folder.parent + # climb back up the tree to find our path, stop before the root folder + while fp is not None and fp.name is not None and fp != content.rootFolder: + folder_name = fp.name + '/' + folder_name + try: + fp = fp.parent + except BaseException: + break + folder_name = '/' + folder_name + return folder_name + +def get_vm_platform_data(vcenter_details, new_instance_id): + """ + Function to get platform data of the vm + Args: + vcenter_details: vcenter_details + new_instance_id: new_instance_id + + Returns: vm platform data + """ + + filters = { + "uuid": new_instance_id, + "account_uuid": vcenter_details["account_uuid"] + } + platform_data = get_vmware_resources('vm_detail', filters) + return platform_data + + +def update_create_spec_object(create_spec, platform_data, vcenter_details): + + create_spec.resources.account_uuid = vcenter_details["account_uuid"] + create_spec.datastore = platform_data["datastore"][0]["URL"] + create_spec.host = platform_data["host"] + create_spec.cluster = platform_data["cluster"] + """create_spec.template = "" + create_spec.resources.template_nic_list = [] + create_spec.resources.template_disk_list = [] + create_spec.resources.template_controller_list = []""" + + create_spec.template = get_template_id(vcenter_details) + for _i in create_spec.resources.template_nic_list: + _i.is_deleted = True + for _i in create_spec.resources.template_disk_list: + _i.is_deleted = True + for _i in create_spec.resources.template_controller_list: + _i.is_deleted = True + + # Treat all nics as normal nics + create_spec.resources.nic_list = [VcenterNicInfo(net_name=pn.get('net_name', ''), nic_type=pn.get('nic_type', None)) + for pn in platform_data["nics"]] + + # Treat all disks as normal disks + create_spec.resources.disk_list = [VcenterVdiskInfo(disk_size_mb=d.get('disk_size_mb', 0), + disk_type=d.get('disk_type', None), + controller_key=d.get('controller_key', None), + device_slot=d.get('disk_slot', None), + iso_path=d.get('iso_path', ""), + adapter_type=d.get('adapter_type', None), + location=d.get('location', None), + disk_mode=d.get('disk_mode', None)) for d in platform_data["disks"]] + + # Treat all controllers as normal controllers + create_spec.resources.controller_list = [VcenterVControllerInfo(controller_type=pc.get('controller_type', None), + bus_sharing=pc.get('bus_sharing', ""), + key=pc.get('key', -1)) for pc in + platform_data["controllers"]] + + # Move everything under existing path + create_spec.folder = VcenterFolderInfo(existing_path=platform_data["folder"], new_path="", delete_empty_folder=False) + + # Adding tags + create_spec.resources.tag_list = [VcenterTagInfo(tag_id=_tag) for _tag in platform_data.get("tags", [])] + + +def update_substrate_info(old_instance_id, new_instance_id, vcenter_details): + + sub_ele = VcenterSubstrateElement.query(instance_id=old_instance_id, deleted=False) + if not sub_ele: + return + + sub_ele = sub_ele[0] + current_platform_data = json.loads(sub_ele.platform_data) + new_platform_data = json.loads(get_vm_platform_data(vcenter_details, new_instance_id)[0]) + current_platform_data.update(new_platform_data) + + # update substrate element, clear all the snapshot info + sub_ele.platform_data = json.dumps(current_platform_data) + sub_ele.instance_id = new_instance_id + update_create_spec_object(sub_ele.spec, current_platform_data, vcenter_details) + current_snapshot_ids = sub_ele.snapshot_info + sub_ele.snapshot_info = [] + sub_ele.save() + + try: + from calm.lib.model.snapshot_group import VcenterSnapshotInfo, VcenterSnapshotGroup + for _id in current_snapshot_ids: + db_snapshot_info = VcenterSnapshotInfo.fetch_one(snapshot_id=_id, substrate_element_reference=str(sub_ele.uuid)) + snapshot_info_id = db_snapshot_info.uuid + snapshot_group_query = { + 'substrate_reference': str(sub_ele.replica_group_reference), + 'action_runlog_reference': str(db_snapshot_info['action_runlog_reference']), + } + db_snapshot_info.delete() + snapshot_group = VcenterSnapshotGroup.fetch_one(**snapshot_group_query) + if snapshot_group and snapshot_info_id: + snapshot_group.update_snapshot_info_references(snapshot_info_id, "remove") + if not snapshot_group.snapshots: + snapshot_group.delete() + except Exception: + pass + + # Get the substrate from substrate element + log.info("Updating VM substrate for substrate element {}". format(str(sub_ele.uuid))) + substrate = sub_ele.replica_group + update_create_spec_object(substrate.spec, current_platform_data, vcenter_details) + + # update create action + log.info("Updating 'create_Action' for substrate {}.".format(str(substrate.uuid))) + for action in substrate.actions: + if action.name == "action_create": + for task in action.runbook.get_all_tasks(): + if task.type == "PROVISION_VCENTER": + update_create_spec_object(task.attrs, current_platform_data, vcenter_details) + task.save() + break + break + substrate.save() + + # Get the substrate config from substrate object + log.info("Updating VM substrate config for substrate element {}". format(str(sub_ele.uuid))) + sub_config = substrate.config + update_create_spec_object(sub_config.spec, current_platform_data, vcenter_details) + sub_config.save() + + # Updating intent spec + application = model.AppProfileInstance.get_object(sub_ele.app_profile_instance_reference).application + clone_bp = application.app_blueprint_config + clone_bp_intent_spec_dict = json.loads(clone_bp.intent_spec) + for _nic in current_platform_data["nics"]: + _nic.pop("key", None) + for _disk in current_platform_data["disks"]: + _disk["device_slot"] = _disk.pop("disk_slot", -1) + _disk.pop("key", None) + _disk.pop("disk_name", None) + for substrate_cfg in clone_bp_intent_spec_dict.get("resources").get("substrate_definition_list"): + if substrate_cfg["uuid"] == str(sub_config.uuid): + create_spec = substrate_cfg["create_spec"] + create_spec["datastore"] = current_platform_data["datastore"][0]["URL"] + create_spec["host"] = current_platform_data["host"] + + # NOTE: For now, we are template data, Just setting every attribute as deleted + """create_spec["template"] = "" + create_spec["resources"]["template_nic_list"] = [] + create_spec["resources"]["template_disk_list"] = [] + create_spec["resources"]["template_controller_list"] = []""" + + create_spec["template"] = get_template_id(vcenter_details) + for _tnic in create_spec["resources"]["template_nic_list"]: + _tnic["is_deleted"] = True + for _tdisk in create_spec["resources"]["template_disk_list"]: + _tdisk["is_deleted"] = True + for _tcontroller in create_spec["resources"]["template_controller_list"]: + _tcontroller["is_deleted"] = True + create_spec["resources"]["nic_list"] = current_platform_data["nics"] + create_spec["resources"]["disk_list"] = current_platform_data["disks"] + create_spec["resources"]["controller_list"] = current_platform_data["controllers"] + create_spec["resources"]["account_uuid"] = vcenter_details["account_uuid"] + clone_bp.intent_spec = json.dumps(clone_bp_intent_spec_dict) + clone_bp.save() + + # TODO update patch config action + '''log.info("Updating patch config action for '{}' with instance_id '{}'.".format(current_platform_data["instance_name"], new_instance_id)) + for patch in application.active_app_profile_instance.patches: + patch_config_attr = patch.attrs_list[0] + patch_data = patch_config_attr.data + + if patch_config_attr.target == ''' + +def update_substrates(vm_uuid_map, vcenter_details): + + for older_vcenter_vm_uuid, newer_vcenter_vm_uuid in vm_uuid_map.items(): + + # Update substrates using older_vcenter_vm_uuid + try: + update_substrate_info(older_vcenter_vm_uuid, newer_vcenter_vm_uuid, vcenter_details) + except Exception as e: + log.info("Failed to udpate substrate of {0}".format(older_vcenter_vm_uuid)) + log.info(e) + flush_session() + + +def update_app_project(vm_uuid_map): + app_names = set() + app_kind = "app" + + for _, instance_id in vm_uuid_map.items(): + NSE = VcenterSubstrateElement.query(instance_id=instance_id, deleted=False) + if NSE: + NSE = NSE[0] + app_name = model.AppProfileInstance.get_object(NSE.app_profile_instance_reference).application.name + app_uuid = model.AppProfileInstance.get_object(NSE.app_profile_instance_reference).application.uuid + entity_cap = EntityCapability(kind_name=app_kind, kind_id=str(app_uuid)) + if entity_cap.project_name == SRC_PROJECT: + app_names.add(app_name) + + for app_name in app_names: + change_project_of_vmware_dr_apps(app_name, DEST_PROJECT) + + +def main(): + try: + + init_contexts() + + # Get the account + vcenter_details= get_vcenter_details(os.environ['DEST_ACCOUNT_NAME']) + + pc_vm_uuid_map = get_vm_source_dest_uuid_map(dest_base_url, dest_pc_auth) + # This will contain pc-vm-uuid(source) to pc-vm-uuid(destination) + + calm_vm_uuid_map = {} + for _, dpc_vm_uuid in pc_vm_uuid_map.items(): + dest_vm_data = get_mh_vm(dest_base_url, dest_pc_auth, dpc_vm_uuid) + old_instance_uuid = dest_vm_data["metadata"].get("categories", {}).get(DR_KEY, "") + new_instance_uuid = dest_vm_data["status"]["resources"]["hypervisor_specific_id"] + if not (old_instance_uuid or new_instance_uuid): + continue + calm_vm_uuid_map[old_instance_uuid] = new_instance_uuid + + update_substrates(calm_vm_uuid_map, vcenter_details) + + update_app_project(calm_vm_uuid_map) + + except Exception as e: + log.info("Exception: %s" % e) + raise + +if __name__ == '__main__': + main() diff --git a/calm-integrations/calm-dr-vm-tracking-scripts/helper.py b/calm-integrations/calm-dr-vm-tracking-scripts/helper.py index 92741a7..3719acf 100644 --- a/calm-integrations/calm-dr-vm-tracking-scripts/helper.py +++ b/calm-integrations/calm-dr-vm-tracking-scripts/helper.py @@ -5,6 +5,7 @@ import requests import ujson import logging +import json from aplos.categories.category import Category, CategoryKey from aplos.insights.entity_capability import EntityCapability @@ -18,6 +19,8 @@ from calm.lib.model.store.db import create_db_connection from calm.lib.model.store.db_session import create_session, set_session_type from calm.pkg.common.scramble import init_scramble +from calm.lib.model.store.db import get_insights_db +from calm.lib.proto import AbacEntityCapability log = logging.getLogger('eylog') logging.basicConfig(level=logging.INFO, @@ -25,6 +28,9 @@ datefmt='%H:%M:%S') init_config() +LENGTH = 100 +HEADERS = {'content-type': 'application/json', 'Accept': 'application/json'} +ESXI_HYPERVISOR_TYPE = "ESX" # This is needed as when we import calm models, Flags needs be initialized @@ -178,6 +184,44 @@ def change_project(application_name, new_project_name): log.info("Successfully moved '{}' application to '{}' project ".format(app_name, new_project_name)) +def change_project_of_vmware_dr_apps(application_name, new_project_name): + """ + change_project method for the vmware dr apps + Raises: + Exception: when command line args are not exepcted + Returns: + None + """ + + tenant_uuid = TenantUtils.get_logged_in_tenant() + project_handle = ProjectUtil() + app_name = application_name + new_project_name = new_project_name + + # Verify if supplied project name is valid + project_proto = project_handle.get_project_by_name(new_project_name) + if not project_proto: + raise Exception("No project in system with name '{}'".format(new_project_name)) + new_project_uuid = str(project_proto.uuid) + + # Verify if supplied application name is valid + apps = Application.query(name=app_name, deleted=False) + if not apps: + raise Exception("No app in system with name '{}'".format(app_name)) + app = apps[0] + + entity_cap = EntityCapability(kind_name="app", kind_id=str(app.uuid)) + + if entity_cap.project_name == new_project_name: + log.info("Application '{}' is already in same project : '{}'".format(app_name, new_project_name)) + return + + log.info("Moving '{}' application to new project : '{}'".format(app_name, new_project_name)) + handle_entity_project_change("app", str(app.uuid), tenant_uuid, new_project_name, new_project_uuid) + log.info("Successfully changed '{}' application's ownership to new project '{}'".format(app_name, new_project_name)) + log.info("**" * 30) + + def change_project_vmware(application_name, new_project_name): """ change_project method for the file @@ -334,4 +378,216 @@ def get_or_create_category(name, value, tenant_uuid): category_obj.tenant_uuid = tenant_uuid category_obj.initialize(name, value, "Created by CALM", None, True) category_obj.save() - return category_obj \ No newline at end of file + return category_obj + + +def get_mh_vm(base_url, auth, uuid): + method = 'GET' + url = base_url + "/mh_vms/{0}".format(uuid) + resp = requests.request( + method, + url, + headers=HEADERS, + auth=(auth["username"], auth["password"]), + verify=False + ) + if resp.ok: + resp_json = resp.json() + return resp_json + else: + log.info(resp.content) + raise Exception("Failed to get vm '{}'.".format(uuid)) + + +def update_mh_vm(base_url, auth, uuid, payload): + method = 'PUT' + url = base_url + "/mh_vms/{0}".format(uuid) + resp = requests.request( + method, + url, + headers=HEADERS, + auth=(auth["username"], auth["password"]), + verify=False, + data=json.dumps(payload) + ) + if resp.ok: + resp_json = resp.json() + return resp_json + else: + log.info(resp.content) + raise Exception("Failed to update vm '{}'.".format(uuid)) + + +def is_category_key_present(base_url, auth, key): + method = 'GET' + url = base_url + "/categories/{}".format(key) + resp = requests.request( + method, + url, + headers=HEADERS, + auth=(auth["username"], auth["password"]), + verify=False + ) + if resp.ok: + return True + else: + False + + +def create_category_key(base_url, auth, key): + method = 'PUT' + url = base_url + "/categories/{}".format(key) + payload = { + "name": key + } + resp = requests.request( + method, + url, + data=json.dumps(payload), + headers=HEADERS, + auth=(auth["username"], auth["password"]), + verify=False + ) + if resp.ok: + return True + else: + log.info("Failed to create category key '{}'.".format(key)) + log.info('Status code: {}'.format(resp.status_code)) + log.info('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4))) + raise Exception("Failed to create category key '{}'.".format(key)) + + +def create_category_value(base_url, auth, key, value): + method = 'PUT' + url = base_url + "/categories/{}/{}".format(key, value) + payload = { + "value": value, + "description": "" + } + resp = requests.request( + method, + url, + data=json.dumps(payload), + headers=HEADERS, + auth=(auth["username"], auth["password"]), + verify=False + ) + if resp.ok: + return True + else: + log.info("Failed to create category value '{}' for key '{}'.".format(value, key)) + log.info('Status code: {}'.format(resp.status_code)) + log.info('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4))) + raise Exception("Failed to create category value '{}' for key '{}'.".format(value, key)) + + +def add_category_to_vm(base_url, auth, vm_uuid, key, value): + + vm_data = get_mh_vm(base_url, auth, vm_uuid) + vm_data.pop("status", None) + vm_data["metadata"].pop("categories_mapping", None) + vm_data["metadata"]["categories"] = vm_data["metadata"].get("categories", {}) + + if vm_data["metadata"]["categories"].get(key, "") == value: + log.info("ignoring vm update of {} as it already have correct key". format(vm_uuid)) + return + + vm_data["metadata"]["categories"][key] = value + update_mh_vm(base_url, auth, vm_uuid, vm_data) + + +def get_application_uuids(project_name): + + project_handle = ProjectUtil() + + project_proto = project_handle.get_project_by_name(project_name) + + if not project_proto: + raise Exception("No project in system with name '{}'".format(project_name)) + project_uuid = str(project_proto.uuid) + + application_uuid_list = [] + + db_handle = get_insights_db() + applications = db_handle.fetch_many(AbacEntityCapability,kind="app",project_reference=project_uuid,select=['kind_id', '_created_timestamp_usecs_']) + for application in applications: + application_uuid_list.append(application[1][0]) + + return application_uuid_list + + +def get_recovery_plan_jobs_list(base_url, auth, offset): + method = 'POST' + url = base_url + "/recovery_plan_jobs/list" + payload = {"length": LENGTH, "offset": offset} + resp = requests.request( + method, + url, + data=json.dumps(payload), + headers=HEADERS, + auth=(auth["username"], auth["password"]), + verify=False + ) + if resp.ok: + resp_json = resp.json() + return resp_json["entities"], resp_json["metadata"]["total_matches"] + else: + log.info("Failed to get recovery plan jobs list.") + log.info('Status code: {}'.format(resp.status_code)) + log.info('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4))) + raise Exception("Failed to get recovery plan jobs list.") + +def get_recovery_plan_job_execution_status(base_url, auth, job_uuid): + method = 'GET' + url = base_url + "/recovery_plan_jobs/{0}/execution_status".format(job_uuid) + resp = requests.request( + method, + url, + headers=HEADERS, + auth=(auth["username"], auth["password"]), + verify=False + ) + if resp.ok: + resp_json = resp.json() + return resp_json + else: + log.info("Failed to get recovery plan jobs {0} exucution status.".format(job_uuid)) + log.info('Status code: {}'.format(resp.status_code)) + log.info('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4))) + raise Exception("Failed to get recovery plan jobs {0} exucution status.".format(job_uuid)) + + +def get_vm_source_dest_uuid_map(base_url, auth): + vm_source_dest_uuid_map = {} + recovery_plan_jobs_list = [] + total_matches = 1 + offset = 0 + while offset < total_matches: + entities, total_matches = get_recovery_plan_jobs_list(base_url, auth, offset) + for entity in entities: + if ( + entity["status"]["resources"]["execution_parameters"]["action_type"] in ["MIGRATE", "FAILOVER"] and + ( + entity["status"]["execution_status"]["status"] == "COMPLETED" or + entity["status"]["execution_status"]["status"] == "COMPLETED_WITH_WARNING" + ) + ): + recovery_plan_jobs_list.append(entity["metadata"]["uuid"]) + offset += LENGTH + + for recovery_plan_job in recovery_plan_jobs_list: + job_execution_status = get_recovery_plan_job_execution_status(base_url, auth, recovery_plan_job) + step_execution_status_list = job_execution_status["operation_status"]["step_execution_status_list"] + for step_execution_status_src in step_execution_status_list: + if step_execution_status_src["operation_type"] == "ENTITY_RECOVERY" : + step_uuid = step_execution_status_src["step_uuid"] + src_vm_uuid = step_execution_status_src["any_entity_reference_list"][0]["uuid"] + for step_execution_status_dest in step_execution_status_list: + if ( + step_execution_status_dest["parent_step_uuid"] == step_uuid and + step_execution_status_dest["operation_type"] in ["ENTITY_RESTORATION", "ENTITY_MIGRATION"] + ): + dest_vm_uuid = step_execution_status_dest["any_entity_reference_list"][0]["uuid"] + vm_source_dest_uuid_map[src_vm_uuid] = dest_vm_uuid + + return vm_source_dest_uuid_map