From 98804e96ef428b1d23b496015055e8471b30f6b6 Mon Sep 17 00:00:00 2001 From: Abdelilah Essiari Date: Wed, 13 Mar 2024 18:32:12 -0700 Subject: [PATCH] fix for sense aws when deleting and fabfed exits with code 1 now --- fabfed/provider/sense/sense_provider.py | 5 +++++ tools/fabfed.py | 19 +++++++++++-------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/fabfed/provider/sense/sense_provider.py b/fabfed/provider/sense/sense_provider.py index bedcbb11..51028e75 100644 --- a/fabfed/provider/sense/sense_provider.py +++ b/fabfed/provider/sense/sense_provider.py @@ -29,6 +29,8 @@ def setup_environment(self): if not can_read(pkey) or not is_private_key(pkey): raise ProviderException(f"{self.name}: unable to read/parse ssh key in {pkey}") + self.config[SENSE_SLICE_PRIVATE_KEY_LOCATION] = pkey + @property def private_key_file_location(self): from .sense_constants import SENSE_SLICE_PRIVATE_KEY_LOCATION @@ -60,9 +62,11 @@ def _handle_peering_config(self, resource): def _init_client(self): if not self.initialized: + self.logger.info(f"{self.name}: Initializing sense client") from .sense_client import init_client init_client(self.config) + self.logger.info(f"{self.name}: Initialized sense client") self.initialized = True def do_add_resource(self, *, resource: dict): @@ -188,6 +192,7 @@ def do_create_resource(self, *, resource: dict): self.logger.debug(f"Created network: {vars(net)}") def do_delete_resource(self, *, resource: dict): + self._init_client() rtype = resource.get(Constants.RES_TYPE) assert rtype in self.supported_resources label = resource.get(Constants.LABEL) diff --git a/tools/fabfed.py b/tools/fabfed.py index 7cdf3ac8..234fc729 100644 --- a/tools/fabfed.py +++ b/tools/fabfed.py @@ -94,6 +94,9 @@ def manage_workflow(args): except ControllerException as ce: logger.error(f"Exceptions while creating resources ... {ce}") workflow_failed = True + except Exception as e: + logger.error(f"Unknown error while creating resources ... {e}") + workflow_failed = True controller_duration = time.time() - controller_duration_start providers_duration = 0 @@ -105,9 +108,7 @@ def manage_workflow(args): states = controller.get_states() nodes, networks, services, pending, failed = utils.get_counters(states=states) - - if pending or failed: - workflow_failed = True + workflow_failed = workflow_failed or pending or failed if Constants.RECONCILE_STATES: states = sutil.reconcile_states(states, args.session) @@ -134,7 +135,7 @@ def manage_workflow(args): logger.info(f"STATS:duration_in_seconds={workflow_duration}") logger.info(f"nodes={nodes}, networks={networks}, services={services}, pending={pending}, failed={failed}") sutil.save_stats(dict(comment="all durations are in seconds", stats=fabfed_stats), args.session) - return + sys.exit(1 if workflow_failed else 0) if args.init: config = WorkflowConfig.parse(dir_path=config_dir, var_dict=var_dict) @@ -168,7 +169,7 @@ def manage_workflow(args): stitch_info=network.attributes.get(Constants.RES_STITCH_INFO)) stitch_info_details.append(details) - stitch_info_details = dict(StitchNetworkDetails = stitch_info_details ) + stitch_info_details = dict(StitchNetworkDetails=stitch_info_details) sutil.dump_objects(objects=stitch_info_details, to_json=args.json) NetworkInfo = namedtuple("NetworkInfo", "label provider_label") @@ -178,7 +179,6 @@ def manage_workflow(args): stitch_info_map = {} stitch_info_network_info_map = {} - for network in filter(lambda n: n.is_network and n.attributes.get(Constants.RES_STITCH_INFO), resources): stitch_info = network.attributes.get(Constants.RES_STITCH_INFO) @@ -196,7 +196,7 @@ def manage_workflow(args): stitch_info_summary = StitchInfoSummary(network_infos=v, stitch_info=stitch_info_map[k]) stitch_info_summaries.append(stitch_info_summary) - stitch_info_summaries = dict(StitchInfoSummary = stitch_info_summaries) + stitch_info_summaries = dict(StitchInfoSummary=stitch_info_summaries) sutil.dump_objects(objects=stitch_info_summaries, to_json=args.json) return @@ -251,10 +251,13 @@ def manage_workflow(args): logger.error(f"Exceptions while initializing controller .... {e}") sys.exit(1) + destroy_failed = False + try: controller.destroy(provider_states=states) except ControllerException as e: logger.error(f"Exceptions while deleting resources ...{e}") + destroy_failed = True except KeyboardInterrupt as kie: logger.error(f"Keyboard Interrupt while deleting resources ... {kie}") sys.exit(1) @@ -293,7 +296,7 @@ def manage_workflow(args): provider_stats=provider_stats) logger.info(f"STATS:duration_in_seconds={workflow_duration}") sutil.save_stats(dict(comment="all durations are in seconds", stats=fabfed_stats), args.session) - return + sys.exit(1 if destroy_failed else 0) def manage_sessions(args):