diff --git a/.github/workflows/flexbe_ci.yml b/.github/workflows/flexbe_ci.yml index b898b64..d7acadb 100644 --- a/.github/workflows/flexbe_ci.yml +++ b/.github/workflows/flexbe_ci.yml @@ -13,15 +13,15 @@ jobs: # - os: ubuntu-22.04 # ros: humble # python: python3 - # ci_branch: ros2-devel + # ci_branch: humble-pre-release - os: ubuntu-22.04 ros: iron python: python3 - ci_branch: ros2-devel + ci_branch: rolling-pre-release - os: ubuntu-22.04 ros: rolling python: python3 - ci_branch: ros2-devel + ci_branch: rolling-pre-release runs-on: ${{ matrix.os }} env: diff --git a/README.md b/README.md index 82cbf12..1018311 100644 --- a/README.md +++ b/README.md @@ -11,10 +11,14 @@ The user interface features a runtime control interface as well as a graphical e Please refer to the FlexBE Homepage ([flexbe.github.io](http://flexbe.github.io)) for further information, tutorials, application examples, and much more. -![FlexBE CI](https://github.com/FlexBE/flexbe_behavior_engine/workflows/FlexBE%20CI/badge.svg?branch=ros2-devel) +You may also want to check out the quick start tutorial demonstrations at [FlexBE Turtlesim Demo](https://github.com/FlexBE/flexbe_turtlesim_demo). + +![FlexBE CI](https://github.com/FlexBE/flexbe_behavior_engine/workflows/FlexBE%20CI/badge.svg?branch=rolling-pre-release) Humble ![ROS Build Farm](https://build.ros2.org/job/Hdev__flexbe_behavior_engine__ubuntu_jammy_amd64/badge/icon) + Iron ![ROS Build Farm](https://build.ros2.org/job/Idev__flexbe_behavior_engine__ubuntu_jammy_amd64/badge/icon) + Rolling ![ROS Build Farm](https://build.ros2.org/job/Rdev__flexbe_behavior_engine__ubuntu_jammy_amd64/badge/icon) ## Installation diff --git a/flexbe_behavior_engine/CHANGELOG.rst b/flexbe_behavior_engine/CHANGELOG.rst index c7a66b3..7adcfd3 100644 --- a/flexbe_behavior_engine/CHANGELOG.rst +++ b/flexbe_behavior_engine/CHANGELOG.rst @@ -1,6 +1,11 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Changelog for package flexbe_behavior_engine ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Forthcoming +----------- +* flake 8 cleanup +* support state id and concurrency handling + 2.3.3 (2023-08-09) ------------------ diff --git a/flexbe_core/CHANGELOG.rst b/flexbe_core/CHANGELOG.rst index 599d810..e51c06e 100644 --- a/flexbe_core/CHANGELOG.rst +++ b/flexbe_core/CHANGELOG.rst @@ -1,6 +1,24 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Changelog for package flexbe_core ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Forthcoming +----------- +* add options to remove service callers and action clients +* proxy publisher dictionary usage +* use modern style names with deprecated flag +* add sync lock to proxies +* added code to remove subscriptions and publishers with sync lock +* use deque for msg buffer add lock to prevent modifications during callback (e.g. when thread starts or finishes) +* add hash for StateMap outcome index to standardize handling +* reinitialize existing state machine instead of rebuilding on sync (1000x faster) +* update with standardized topic handling +* update ui version handling +* OperatableStateMachine is now a pseudo manually transitionable state (TODO -separate logic to shadow state design) +* add is_controlled check to avoid attempts at duplicate subscriptions and cleanup +* onboard side coded to send new BehaviorSync and 'mirror/outcome' +* adding state_id handling; pre-building ContainerStructure to set IDs +* flake8, pep257 and codestyle checks + 2.3.3 (2023-08-09) ------------------ * destroy sub/pub/client in executor thread @@ -27,6 +45,7 @@ Changelog for package flexbe_core * clean up some spam to FlexBE app console * include package name in behavior request (requires flexbe_app 3.1+) to allow duplicate behavior names in packages + 2.2.0 (2023-06-29) ------------------ * Modify to used behavior_id (checksum) and behavior_key consistently diff --git a/flexbe_core/flexbe_core/__init__.py b/flexbe_core/flexbe_core/__init__.py index 5a2c0ac..f59d542 100644 --- a/flexbe_core/flexbe_core/__init__.py +++ b/flexbe_core/flexbe_core/__init__.py @@ -45,6 +45,8 @@ from .logger import Logger # noqa: F401 from .state_logger import StateLogger # noqa: F401 +MIN_UI_VERSION = '4.0.0' # Minimum FlexBE App or UI version required to interact with this version of flexbe_core + # pylint: disable=R0903 diff --git a/flexbe_core/flexbe_core/behavior.py b/flexbe_core/flexbe_core/behavior.py index 0e8ef23..f35f416 100644 --- a/flexbe_core/flexbe_core/behavior.py +++ b/flexbe_core/flexbe_core/behavior.py @@ -181,6 +181,12 @@ def confirm(self): self._state_machine.confirm(self.name, self.beh_id) + # def define_structure(self): + # """ + # Calculate all state ids and prepare the ContainerStructure message + # """ + # self._state_machine.define_structure() + def execute(self): """ Execute the behavior. @@ -220,16 +226,18 @@ def prepare_for_switch(self, state): states[1].replace_state(state) # add to new state machine self.requested_state_path = state.path # set start after switch - def get_current_state(self): - return self._state_machine.get_deep_state() + def get_current_states(self): + return self._state_machine.get_deep_states() def get_locked_state(self): - state = self._state_machine.get_deep_state() - while state is not None: - if state.is_locked(): - return state - - state = state._parent + """Return the first state designated as locked.""" + states = self._state_machine.get_deep_states() + for state in states: + while state is not None: + if state.is_locked(): + return state + + state = state._parent return None @classmethod diff --git a/flexbe_core/flexbe_core/core/__init__.py b/flexbe_core/flexbe_core/core/__init__.py index 96059d9..de9d56f 100644 --- a/flexbe_core/flexbe_core/core/__init__.py +++ b/flexbe_core/flexbe_core/core/__init__.py @@ -46,6 +46,7 @@ from .operatable_state import OperatableState # noqa: F401 from .event_state import EventState # noqa: F401 +from .state_map import StateMap # noqa: F401 from .user_data import UserData # noqa: F401 __all__ = [ @@ -63,5 +64,6 @@ 'PreemptableState', 'OperatableState', 'EventState', + 'StateMap', 'UserData' ] diff --git a/flexbe_core/flexbe_core/core/concurrency_container.py b/flexbe_core/flexbe_core/core/concurrency_container.py index 64b6bff..00f7a67 100644 --- a/flexbe_core/flexbe_core/core/concurrency_container.py +++ b/flexbe_core/flexbe_core/core/concurrency_container.py @@ -34,12 +34,17 @@ It synchronizes its current state with the mirror and supports some control mechanisms. """ -from flexbe_core.logger import Logger -from flexbe_core.core.user_data import UserData from flexbe_core.core.event_state import EventState +from flexbe_core.core.lockable_state_machine import LockableStateMachine +from flexbe_core.core.operatable_state import OperatableState +from flexbe_core.core.operatable_state_machine import OperatableStateMachine from flexbe_core.core.priority_container import PriorityContainer +from flexbe_core.core.topics import Topics +from flexbe_core.core.user_data import UserData +from flexbe_core.logger import Logger +from flexbe_core.state_logger import StateLogger -from flexbe_core.core.operatable_state_machine import OperatableStateMachine +from flexbe_msgs.msg import CommandFeedback, OutcomeRequest class ConcurrencyContainer(OperatableStateMachine): @@ -53,6 +58,9 @@ def __init__(self, conditions=None, *args, **kwargs): super().__init__(*args, **kwargs) self._conditions = conditions if conditions else {} self._returned_outcomes = {} + self._current_state = None + self._type = OperatableStateMachine.ContainerType.ConcurrencyContainer.value + self._manual_transition_requested = None @property def sleep_duration(self): @@ -63,28 +71,111 @@ def sleep_duration(self): return sleep_dur + @property + def current_state(self): + """Return current state of Concurrency container, which is itself and list of active states.""" + return self + + @property + def current_state_label(self): + """Return current state name of Concurrency container. which is itself.""" + return self.name + + def get_required_autonomy(self, outcome, state): + try: + assert state in self._current_state, "get required autonomy in ConcurrencyContainer - state doesn't match!" + return self._autonomy[state.name][outcome] + except Exception as exc: + Logger.error(f"Failure to retrieve autonomy for '{self.name}' in CC - " + f" current state label='{self.name}' state='{state.name}' outcome='{outcome}'.") + Logger.localerr(f"{type(exc)} - {exc}\n\n {self._current_state}") + Logger.localerr(f"{self._autonomy}") + def _execute_current_state(self): # execute all states that are done with sleeping and determine next sleep duration self._inner_sync_request = False # clear prior request for lower level state + self._current_state = [] # Concurrency container has multiple active states so use list + + self._manual_transition_requested = None + # Logger.localinfo(f"-concurrency container {self.name} is_controlled={self._is_controlled}" + # f" has transition request={self._sub.has_buffered(Topics._CMD_TRANSITION_TOPIC)}") + if self._is_controlled and self._sub.has_buffered(Topics._CMD_TRANSITION_TOPIC): + # Special handling in concurrency container - can be either CC or one of several internal states. + command_msg = self._sub.get_from_buffer(Topics._CMD_TRANSITION_TOPIC) + + if command_msg.target == self.name: + self._force_transition = True + outcome = self.outcomes[command_msg.outcome] + self._manual_transition_requested = outcome + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, + CommandFeedback(command="transition", + args=[command_msg.target, self.name])) + Logger.localwarn(f"--> Manually triggered outcome {outcome} of concurrency container {self.name}") + self.on_exit(self.userdata, + states=[s for s in self._states if (s.name not in self._returned_outcomes + or self._returned_outcomes[s.name] is None)]) + self._returned_outcomes = {} + self._current_state = None + self._last_outcome = outcome + return outcome + else: + Logger.localinfo(f"concurrency container {self.name} ") + self._manual_transition_requested = command_msg + for state in self._states: if state.name in self._returned_outcomes and self._returned_outcomes[state.name] is not None: continue # already done with executing + if self._manual_transition_requested is not None and self._manual_transition_requested.target == state.name: + # Transition request applies to this state + # @TODO - Should we be using path not name here? + command_msg = self._manual_transition_requested + self._manual_transition_requested = None # Reset at this level + + if 0 <= command_msg.outcome < len(state.outcomes): + state._force_transition = True + outcome = state.outcomes[command_msg.outcome] + state._manual_transition_requested = outcome + self._returned_outcomes[state.name] = outcome + with UserData(reference=self._userdata, remap=self._remappings[state.name], + input_keys=state.input_keys, output_keys=state.output_keys) as userdata: + state.on_exit(userdata) + + # ConcurrencyContainer bypasses normal operatable state handling of manual request, so do that here + state._publish_outcome(outcome) + + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, + CommandFeedback(command="transition", + args=[command_msg.target, state.name])) + Logger.localerr(f"--> Manually triggered outcome {outcome} ({command_msg.outcome}) " + f"of state {state.name} from inside concurrency {self.name}") + continue + else: + Logger.localerr(f"--> Invalid outcome {command_msg.outcome} request for state {state.name} " + f"from inside concurrency {self.name}\n{state.outcomes}") + if (PriorityContainer.active_container is not None and not all(a == s for a, s in zip(PriorityContainer.active_container.split('/'), state.path.split('/')))): if isinstance(state, EventState): + # Base state not a container state._notify_skipped() - elif state.get_deep_state() is not None: - state.get_deep_state()._notify_skipped() + continue # other state has priority + + # this state must be a container + deep_states = state.get_deep_states() + if deep_states is not None: + for dpst in deep_states: + dpst._notify_skipped() + continue # other state has priority if state.sleep_duration <= 0: # ready to execute out = self._execute_single_state(state) self._returned_outcomes[state.name] = out - if out: - # Track any state with outcome as the current state - self._current_state = state + + # Track any state that remains as being currently active + self._current_state.append(state) # we want to pass sync requests back up to parent, self._inner_sync_request = self._inner_sync_request or state._inner_sync_request @@ -111,9 +202,36 @@ def _execute_current_state(self): self._returned_outcomes = {} # right now, going out of a concurrency container may break sync # thus, as a quick fix, explicitly request sync again on any output - self._inner_sync_request = True + # self._inner_sync_request = True self._current_state = None - Logger.localwarn('ConcurrencyContainer %s returning outcome %s (request inner sync)' % (self.name, str(outcome))) + + if self._is_controlled: + # reset previously requested outcome if applicable + if self._last_requested_outcome is not None and outcome is None: + self._pub.publish(Topics._OUTCOME_REQUEST_TOPIC, OutcomeRequest(outcome=255, target=self.path)) + self._last_requested_outcome = None + + # request outcome because autonomy level is too low + if (not self._force_transition and self.parent is not None + and (not self.parent.is_transition_allowed(self.name, outcome) + or outcome is not None and self.is_breakpoint)): + if outcome != self._last_requested_outcome: + self._pub.publish(Topics._OUTCOME_REQUEST_TOPIC, + OutcomeRequest(outcome=self.outcomes.index(outcome), + target=self.path)) + Logger.localinfo("<-- Want result: %s > %s" % (self.name, outcome)) + StateLogger.log('flexbe.operator', self, type='request', request=outcome, + autonomy=self.parent.autonomy_level, + required=self.parent.get_required_autonomy(outcome, self)) + self._last_requested_outcome = outcome + outcome = None + elif outcome is not None and outcome in self.outcomes: + # autonomy level is high enough, report the executed transition + self._publish_outcome(outcome) + self._force_transition = False + + self._last_outcome = outcome + # Logger.localinfo(f"ConcurrencyContainer '{self.name}' returning outcome '{outcome}' (request inner sync)") return outcome def _execute_single_state(self, state, force_exit=False): @@ -136,21 +254,8 @@ def _execute_single_state(self, state, force_exit=False): return result - def _enable_ros_control(self): - state = self._states[0] - if isinstance(state, EventState): - state._enable_ros_control() - if isinstance(state, OperatableStateMachine): - state._enable_ros_control() - - def _disable_ros_control(self): - state = self._states[0] - if isinstance(state, EventState): - state._disable_ros_control() - if isinstance(state, OperatableStateMachine): - state._disable_ros_control() - def on_enter(self, userdata): # pylint: disable=W0613 + super().on_enter(userdata) for state in self._states: # Force on_enter at state level (userdata passed by _execute_single_state) state._entering = True # force state to handle enter on first execute @@ -162,3 +267,30 @@ def on_exit(self, userdata, states=None): continue # skip states that already exited themselves self._execute_single_state(state, force_exit=True) self._current_state = None + + def get_deep_states(self): + """ + Recursively look for the currently executing states. + + Traverse all state machines down to the terminal child state that is not a container. + + EXCEPT for ConcurrencyContainers. Those are both active state and container. + + @return: The list of active states (not state machine) + """ + deep_states = [self] # Concurrency acts as both state and container for this purpose + if isinstance(self._current_state, list): + for state in self._current_state: + # Internal states (after skipping concurrency container self) + if isinstance(state, LockableStateMachine): + deep_states.extend(state.get_deep_states()) + else: + deep_states.append(state) + # Logger.localinfo(f"Concurrent get_deep_states: {self.name} {[state.path for state in deep_states]}") + return deep_states + elif self._current_state is not None: + Logger.localerr(f"ConcurrentContainer.get_deep_states {self.name} - current state is NOT a list!") + raise TypeError(f"ConcurrentContainer.get_deep_states {self.name} - current state is NOT a list!") + # Otherwise, either haven't fully entered, or all have returned outcomes + + return deep_states diff --git a/flexbe_core/flexbe_core/core/event_state.py b/flexbe_core/flexbe_core/core/event_state.py index 8ab10ba..6c41790 100644 --- a/flexbe_core/flexbe_core/core/event_state.py +++ b/flexbe_core/flexbe_core/core/event_state.py @@ -36,6 +36,7 @@ from flexbe_core.core.preemptable_state import PreemptableState from flexbe_core.core.priority_container import PriorityContainer from flexbe_core.core.operatable_state import OperatableState +from flexbe_core.core.topics import Topics from flexbe_core.logger import Logger from flexbe_core.state_logger import StateLogger @@ -58,25 +59,22 @@ def __init__(self, *args, **kwargs): self._skipped = False self._paused = False self._last_active_container = None - - self._feedback_topic = 'flexbe/command_feedback' - self._repeat_topic = 'flexbe/command/repeat' - self._pause_topic = 'flexbe/command/pause' + self._last_outcome = None def _event_execute(self, *args, **kwargs): - if self._is_controlled and self._sub.has_msg(self._pause_topic): - msg = self._sub.get_last_msg(self._pause_topic) - self._sub.remove_last_msg(self._pause_topic) + if self._is_controlled and self._sub.has_msg(Topics._CMD_PAUSE_TOPIC): + msg = self._sub.get_last_msg(Topics._CMD_PAUSE_TOPIC) + self._sub.remove_last_msg(Topics._CMD_PAUSE_TOPIC) if msg.data: Logger.localinfo("--> Pausing in state %s", self.name) - self._pub.publish(self._feedback_topic, CommandFeedback(command="pause")) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="pause")) self._last_active_container = PriorityContainer.active_container # claim priority to propagate pause event PriorityContainer.active_container = self.path self._paused = True else: Logger.localinfo("--> Resuming in state %s", self.name) - self._pub.publish(self._feedback_topic, CommandFeedback(command="resume")) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="resume")) PriorityContainer.active_container = self._last_active_container self._last_active_container = None self._paused = False @@ -87,7 +85,9 @@ def _event_execute(self, *args, **kwargs): if self._entering: self._entering = False + self._last_outcome = None self.on_enter(*args, **kwargs) + if self._skipped and not PreemptableState.preempt: self._skipped = False self.on_resume(*args, **kwargs) @@ -96,16 +96,17 @@ def _event_execute(self, *args, **kwargs): outcome = self.__execute(*args, **kwargs) repeat = False - if self._is_controlled and self._sub.has_msg(self._repeat_topic): + if self._is_controlled and self._sub.has_msg(Topics._CMD_REPEAT_TOPIC): Logger.localinfo("--> Repeating state %s", self.name) - self._sub.remove_last_msg(self._repeat_topic) - self._pub.publish(self._feedback_topic, CommandFeedback(command="repeat")) + self._sub.remove_last_msg(Topics._CMD_REPEAT_TOPIC) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="repeat")) repeat = True if repeat or outcome is not None and not PreemptableState.preempt: self._entering = True self.on_exit(*args, **kwargs) + self._last_outcome = outcome return outcome def _notify_skipped(self): @@ -115,18 +116,21 @@ def _notify_skipped(self): super()._notify_skipped() def _enable_ros_control(self): - super()._enable_ros_control() - self._pub.createPublisher(self._feedback_topic, CommandFeedback) - self._sub.subscribe(self._repeat_topic, Empty, inst_id=id(self)) - self._sub.subscribe(self._pause_topic, Bool, inst_id=id(self)) + if not self._is_controlled: + super()._enable_ros_control() + self._pub.create_publisher(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback) + self._sub.subscribe(Topics._CMD_REPEAT_TOPIC, Empty, inst_id=id(self)) + self._sub.subscribe(Topics._CMD_PAUSE_TOPIC, Bool, inst_id=id(self)) def _disable_ros_control(self): - super()._disable_ros_control() - self._sub.unsubscribe_topic(self._repeat_topic, inst_id=id(self)) - self._sub.unsubscribe_topic(self._pause_topic, inst_id=id(self)) - self._last_active_container = None - if self._paused: - PriorityContainer.active_container = None + if self._is_controlled: + super()._disable_ros_control() + self._pub.remove_publisher(Topics._CMD_FEEDBACK_TOPIC) + self._sub.unsubscribe_topic(Topics._CMD_REPEAT_TOPIC, inst_id=id(self)) + self._sub.unsubscribe_topic(Topics._CMD_PAUSE_TOPIC, inst_id=id(self)) + self._last_active_container = None + if self._paused: + PriorityContainer.active_container = None # Events # (just implement the ones you need) diff --git a/flexbe_core/flexbe_core/core/lockable_state.py b/flexbe_core/flexbe_core/core/lockable_state.py index 76daff5..a108547 100644 --- a/flexbe_core/flexbe_core/core/lockable_state.py +++ b/flexbe_core/flexbe_core/core/lockable_state.py @@ -36,6 +36,7 @@ from flexbe_core.logger import Logger from flexbe_core.core.manually_transitionable_state import ManuallyTransitionableState +from flexbe_core.core.topics import Topics class LockableState(ManuallyTransitionableState): @@ -55,19 +56,15 @@ def __init__(self, *args, **kwargs): self._locked = False self._stored_outcome = None - self._feedback_topic = 'flexbe/command_feedback' - self._lock_topic = 'flexbe/command/lock' - self._unlock_topic = 'flexbe/command/unlock' - def _lockable_execute(self, *args, **kwargs): - if self._is_controlled and self._sub.has_msg(self._lock_topic): - msg = self._sub.get_last_msg(self._lock_topic) - self._sub.remove_last_msg(self._lock_topic) + if self._is_controlled and self._sub.has_msg(Topics._CMD_LOCK_TOPIC): + msg = self._sub.get_last_msg(Topics._CMD_LOCK_TOPIC) + self._sub.remove_last_msg(Topics._CMD_LOCK_TOPIC) self._execute_lock(msg.data) - if self._is_controlled and self._sub.has_msg(self._unlock_topic): - msg = self._sub.get_last_msg(self._unlock_topic) - self._sub.remove_last_msg(self._unlock_topic) + if self._is_controlled and self._sub.has_msg(Topics._CMD_UNLOCK_TOPIC): + msg = self._sub.get_last_msg(Topics._CMD_UNLOCK_TOPIC) + self._sub.remove_last_msg(Topics._CMD_UNLOCK_TOPIC) self._execute_unlock(msg.data) # locked, so execute until we want to transition @@ -105,8 +102,8 @@ def _execute_lock(self, target): else: found_target = self._parent.lock(target) # provide feedback about lock - self._pub.publish(self._feedback_topic, CommandFeedback(command="lock", - args=[target, target if found_target else ""])) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, + CommandFeedback(command="lock", args=[target, target if found_target else ""])) if not found_target: Logger.logwarn(f"Wanted to lock {target}, but could not find it in current path {self.path}.") else: @@ -120,23 +117,26 @@ def _execute_unlock(self, target): else: found_target = self._parent.unlock(target) # provide feedback about unlock - self._pub.publish(self._feedback_topic, CommandFeedback(command="unlock", - args=[target, target if found_target else ""])) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, + CommandFeedback(command="unlock", args=[target, target if found_target else ""])) if not found_target: Logger.logwarn(f"Wanted to unlock {target}, but could not find it in current path {self.path}.") else: Logger.localinfo(f"--> Unlocking in state {target}") def _enable_ros_control(self): - super()._enable_ros_control() - self._pub.createPublisher(self._feedback_topic, CommandFeedback) - self._sub.subscribe(self._lock_topic, String, inst_id=id(self)) - self._sub.subscribe(self._unlock_topic, String, inst_id=id(self)) + if not self._is_controlled: + super()._enable_ros_control() + self._pub.create_publisher(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback) + self._sub.subscribe(Topics._CMD_LOCK_TOPIC, String, inst_id=id(self)) + self._sub.subscribe(Topics._CMD_UNLOCK_TOPIC, String, inst_id=id(self)) def _disable_ros_control(self): - super()._disable_ros_control() - self._sub.unsubscribe_topic(self._lock_topic, inst_id=id(self)) - self._sub.unsubscribe_topic(self._unlock_topic, inst_id=id(self)) + if self._is_controlled: + super()._disable_ros_control() + self._pub.remove_publisher(Topics._CMD_FEEDBACK_TOPIC) + self._sub.unsubscribe_topic(Topics._CMD_LOCK_TOPIC, inst_id=id(self)) + self._sub.unsubscribe_topic(Topics._CMD_UNLOCK_TOPIC, inst_id=id(self)) def is_locked(self): return self._locked diff --git a/flexbe_core/flexbe_core/core/lockable_state_machine.py b/flexbe_core/flexbe_core/core/lockable_state_machine.py index fa71861..1af9101 100644 --- a/flexbe_core/flexbe_core/core/lockable_state_machine.py +++ b/flexbe_core/flexbe_core/core/lockable_state_machine.py @@ -49,17 +49,6 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._locked = False - def get_deep_state(self): - """ - Look for the current state (traversing all state machines down to the real state). - - @return: The current state (not state machine) - """ - container = self - while isinstance(container._current_state, LockableStateMachine): - container = container._current_state - return container._current_state - def _is_internal_transition(self, transition_target): return transition_target in self._labels diff --git a/flexbe_core/flexbe_core/core/manually_transitionable_state.py b/flexbe_core/flexbe_core/core/manually_transitionable_state.py index b08e716..c64e29b 100644 --- a/flexbe_core/flexbe_core/core/manually_transitionable_state.py +++ b/flexbe_core/flexbe_core/core/manually_transitionable_state.py @@ -34,6 +34,7 @@ from flexbe_msgs.msg import CommandFeedback, OutcomeRequest from flexbe_core.core.ros_state import RosState +from flexbe_core.core.topics import Topics from flexbe_core.logger import Logger @@ -51,15 +52,13 @@ def __init__(self, *args, **kwargs): self._force_transition = False self._manual_transition_requested = None - self._feedback_topic = 'flexbe/command_feedback' - self._transition_topic = 'flexbe/command/transition' def _manually_transitionable_execute(self, *args, **kwargs): self._manual_transition_requested = None - if self._is_controlled and self._sub.has_buffered(self._transition_topic): - command_msg = self._sub.get_from_buffer(self._transition_topic) - self._pub.publish(self._feedback_topic, CommandFeedback(command="transition", - args=[command_msg.target, self.name])) + if self._is_controlled and self._sub.has_buffered(Topics._CMD_TRANSITION_TOPIC): + command_msg = self._sub.get_from_buffer(Topics._CMD_TRANSITION_TOPIC) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, + CommandFeedback(command="transition", args=[command_msg.target, self.name])) if command_msg.target != self.name: Logger.logwarn("Requested outcome for state %s but active state is %s" % (command_msg.target, self.name)) @@ -74,11 +73,14 @@ def _manually_transitionable_execute(self, *args, **kwargs): return self.__execute(*args, **kwargs) def _enable_ros_control(self): - super()._enable_ros_control() - self._pub.createPublisher(self._feedback_topic, CommandFeedback) - self._sub.subscribe(self._transition_topic, OutcomeRequest, inst_id=id(self)) - self._sub.enable_buffer(self._transition_topic) + if not self._is_controlled: + super()._enable_ros_control() + self._pub.create_publisher(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback) + self._sub.subscribe(Topics._CMD_TRANSITION_TOPIC, OutcomeRequest, inst_id=id(self)) + self._sub.enable_buffer(Topics._CMD_TRANSITION_TOPIC) def _disable_ros_control(self): - super()._disable_ros_control() - self._sub.unsubscribe_topic(self._transition_topic, inst_id=id(self)) + if self._is_controlled: + super()._disable_ros_control() + self._pub.remove_publisher(Topics._CMD_FEEDBACK_TOPIC) + self._sub.unsubscribe_topic(Topics._CMD_TRANSITION_TOPIC, inst_id=id(self)) diff --git a/flexbe_core/flexbe_core/core/operatable_state.py b/flexbe_core/flexbe_core/core/operatable_state.py index ece8cb0..35c683f 100644 --- a/flexbe_core/flexbe_core/core/operatable_state.py +++ b/flexbe_core/flexbe_core/core/operatable_state.py @@ -31,11 +31,13 @@ """OperatableState.""" -from std_msgs.msg import UInt8, String +from std_msgs.msg import UInt32, String from flexbe_msgs.msg import OutcomeRequest from flexbe_core.core.preemptable_state import PreemptableState +from flexbe_core.core.topics import Topics +from flexbe_core.core.state_map import StateMap from flexbe_core.logger import Logger from flexbe_core.state_logger import StateLogger @@ -57,48 +59,56 @@ def __init__(self, *args, **kwargs): self._last_requested_outcome = None - self._outcome_topic = 'flexbe/mirror/outcome' - self._request_topic = 'flexbe/outcome_request' - self._debug_topic = 'flexbe/debug/current_state' - def _operatable_execute(self, *args, **kwargs): outcome = self.__execute(*args, **kwargs) if self._is_controlled: # reset previously requested outcome if applicable if self._last_requested_outcome is not None and outcome is None: - self._pub.publish(self._request_topic, OutcomeRequest(outcome=255, target=self.path)) + self._pub.publish(Topics._OUTCOME_REQUEST_TOPIC, OutcomeRequest(outcome=255, target=self.path)) self._last_requested_outcome = None # request outcome because autonomy level is too low if not self._force_transition and (not self.parent.is_transition_allowed(self.name, outcome) or outcome is not None and self.is_breakpoint): if outcome != self._last_requested_outcome: - self._pub.publish(self._request_topic, OutcomeRequest(outcome=self.outcomes.index(outcome), - target=self.path)) + self._pub.publish(Topics._OUTCOME_REQUEST_TOPIC, + OutcomeRequest(outcome=self.outcomes.index(outcome), target=self.path)) Logger.localinfo("<-- Want result: %s > %s" % (self.name, outcome)) StateLogger.log('flexbe.operator', self, type='request', request=outcome, autonomy=self.parent.autonomy_level, - required=self.parent.get_required_autonomy(outcome)) + required=self.parent.get_required_autonomy(outcome, self)) self._last_requested_outcome = outcome outcome = None # autonomy level is high enough, report the executed transition elif outcome is not None and outcome in self.outcomes: - outcome_index = self.outcomes.index(outcome) - Logger.localinfo("State result: %s > %s (%d)" % (self.name, outcome, outcome_index)) - self._pub.publish(self._outcome_topic, UInt8(data=outcome_index)) - self._pub.publish(self._debug_topic, String(data="%s > %s" % (self.path, outcome))) - if self._force_transition: - StateLogger.log('flexbe.operator', self, type='forced', forced=outcome, - requested=self._last_requested_outcome) - self._last_requested_outcome = None - - self._force_transition = False + self._publish_outcome(outcome) + self._force_transition = False return outcome + def _publish_outcome(self, outcome): + """Update the UI and logs about this outcome.""" + # 0 outcome status denotes no outcome, not index so add +1 for valid outcome (subtract in mirror) + outcome_index = self.outcomes.index(outcome) + Logger.localinfo(f"State result: {self.name} > {outcome}") + self._pub.publish(Topics._OUTCOME_TOPIC, UInt32(data=StateMap.hash(self, outcome_index))) + self._pub.publish(Topics._DEBUG_TOPIC, String(data="%s > %s" % (self.path, outcome))) + if self._force_transition: + StateLogger.log('flexbe.operator', self, type='forced', forced=outcome, + requested=self._last_requested_outcome) + self._last_requested_outcome = None + def _enable_ros_control(self): - super()._enable_ros_control() - self._pub.createPublisher(self._outcome_topic, UInt8) - self._pub.createPublisher(self._debug_topic, String) - self._pub.createPublisher(self._request_topic, OutcomeRequest) + if not self._is_controlled: + super()._enable_ros_control() + self._pub.create_publisher(Topics._OUTCOME_TOPIC, UInt32) + self._pub.create_publisher(Topics._DEBUG_TOPIC, String) + self._pub.create_publisher(Topics._OUTCOME_REQUEST_TOPIC, OutcomeRequest) + + def _disable_ros_control(self): + if self._is_controlled: + super()._disable_ros_control() + self._pub.remove_publisher(Topics._OUTCOME_TOPIC) + self._pub.remove_publisher(Topics._DEBUG_TOPIC) + self._pub.remove_publisher(Topics._OUTCOME_REQUEST_TOPIC) diff --git a/flexbe_core/flexbe_core/core/operatable_state_machine.py b/flexbe_core/flexbe_core/core/operatable_state_machine.py index 982d58b..cc1aef0 100644 --- a/flexbe_core/flexbe_core/core/operatable_state_machine.py +++ b/flexbe_core/flexbe_core/core/operatable_state_machine.py @@ -30,18 +30,21 @@ """OperatableStateMachine.""" +from enum import Enum -import zlib -from std_msgs.msg import Empty, UInt8, Int32 -from flexbe_msgs.msg import Container, ContainerStructure, BehaviorSync, CommandFeedback +from std_msgs.msg import Empty, Int32, UInt32, UInt8, String -from flexbe_core.core.state_machine import StateMachineError from flexbe_core.core.operatable_state import OperatableState from flexbe_core.core.preemptable_state_machine import PreemptableStateMachine +from flexbe_core.core.ros_state import RosState +from flexbe_core.core.state_map import StateMap +from flexbe_core.core.topics import Topics from flexbe_core.core.user_data import UserData from flexbe_core.logger import Logger from flexbe_core.state_logger import StateLogger +from flexbe_msgs.msg import BehaviorSync, CommandFeedback, Container, ContainerStructure, OutcomeRequest + class OperatableStateMachine(PreemptableStateMachine): """ @@ -52,15 +55,35 @@ class OperatableStateMachine(PreemptableStateMachine): autonomy_level = 3 + class ContainerType(Enum): + """Define ContainerTypes used in ContainerStructure messages.""" + + State = 0 + OperatableStateMachine = 1 + PriorityContainer = 2 + ConcurrencyContainer = 3 + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.id = None self._autonomy = {} self._inner_sync_request = False self._last_exception = None + self._state_map = None + self._structure = None + self._type = OperatableStateMachine.ContainerType.OperatableStateMachine.value - # construction + # Allow state machines to accept forced transitions + self._last_requested_outcome = None + self._force_transition = None + self._manual_transition_requested = None + + @property + def is_breakpoint(self): + """Check if this state defined as a breakpoint.""" + return self.path in RosState._breakpoints + # construction @staticmethod def add(label, state, transitions, autonomy=None, remapping=None): """ @@ -84,49 +107,73 @@ def add(label, state, transitions, autonomy=None, remapping=None): PreemptableStateMachine.add(label, state, transitions, remapping) self._autonomy[label] = autonomy + def define_structure(self): + """Calculate all state ids and prepare the ContainerStructure message.""" + self._state_map = StateMap() + self._structure = self._build_structure_msg() + def _build_structure_msg(self): """Create a message to describe the structure of this state machine.""" structure_msg = ContainerStructure() - container_msg = self._add_to_structure_msg(structure_msg) + container_msg = self._add_to_structure_msg(structure_msg, self._state_map) container_msg.outcomes = self.outcomes structure_msg.behavior_id = self.id return structure_msg - def _add_to_structure_msg(self, structure_msg): + def _add_to_structure_msg(self, structure_msg, state_map): """ Add this state machine and all children to the structure message. @type structure_msg: ContainerStructure @param structure_msg: The message that will finally contain the structure message. + + @type state_map: StateMap + @param state_map: map of state ids (hash based on path) to instance """ + state_map.add_state(self.path, self) + # add self to message - container_msg = Container() - container_msg.path = self.path + container_msg = Container(state_id=self.state_id, type=self._type, path=self.path) container_msg.children = [state.name for state in self._states] structure_msg.containers.append(container_msg) # add children to message for state in self._states: - # create and add children - if isinstance(state, OperatableStateMachine): - state_msg = state._add_to_structure_msg(structure_msg) - else: - state_msg = Container(path=state.path) - structure_msg.containers.append(state_msg) - # complete structure info for children - state_msg.outcomes = state.outcomes - state_msg.transitions = [self._transitions[state.name][outcome] for outcome in state.outcomes] - state_msg.autonomy = [self._autonomy[state.name][outcome] for outcome in state.outcomes] + try: + # create and add children + if isinstance(state, OperatableStateMachine): + state_msg = state._add_to_structure_msg(structure_msg, state_map) + else: + # Terminal states + state_map.add_state(state.path, state) # Update the state IDs + state_msg = Container(state_id=state.state_id, type=state.type, path=state.path) + structure_msg.containers.append(state_msg) + # complete structure info for children + state_msg.outcomes = state.outcomes + state_msg.transitions = [self._transitions[state.name][outcome] for outcome in state.outcomes] + state_msg.autonomy = [self._autonomy[state.name][outcome] for outcome in state.outcomes] + except Exception as exc: # pylint: disable=W0703 + Logger.logerr(f"Error building container structure for {state.name}!") + Logger.localerr(f"{type(exc)} - {exc}") return container_msg def get_latest_status(self): """Return the latest execution information as a BehaviorSync message.""" - with self._status_lock: - path = self._last_deep_state_path - beh_id = self.id - msg = BehaviorSync() - msg.behavior_id = beh_id if beh_id is not None else 0 - msg.current_state_checksum = zlib.adler32(path.encode()) & 0x7fffffff if path is not None else 0 + with self._status_lock: + active_states = self._last_deep_states_list + msg.behavior_id = self.id if self.id is not None else 0 + + if active_states is not None: + for active in active_states: + if active is not None: + outcome_index = 0 + if active._last_outcome is not None: + try: + outcome_index = active._outcomes.index(active._last_outcome) + except Exception: # pylint: disable=W0703 + Logger.localerr(f"Invalid outcome='{active._last_outcome} for '{active}' - ignore outcome!") + + msg.current_state_checksums.append(StateMap.hash(active, outcome_index)) return msg # execution @@ -144,8 +191,56 @@ def _execute_current_state(self): import traceback # pylint: disable=C0415 Logger.localinfo(traceback.format_exc().replace("%", "%%")) # Guard against exeception including format! + if self._is_controlled: + # reset previously requested outcome if applicable + if self._last_requested_outcome is not None and outcome is None: + self._pub.publish(Topics._OUTCOME_REQUEST_TOPIC, OutcomeRequest(outcome=255, target=self.path)) + self._last_requested_outcome = None + + # request outcome because autonomy level is too low + if not self._force_transition and self.parent is not None: + # This check is not relevant to top-level state machines + if (not self.parent.is_transition_allowed(self.name, outcome) + or outcome is not None and self.is_breakpoint): + if outcome != self._last_requested_outcome: + self._pub.publish(Topics._OUTCOME_REQUEST_TOPIC, + OutcomeRequest(outcome=self.outcomes.index(outcome), + target=self.path)) + Logger.localinfo("<-- Want result: %s > %s" % (self.name, outcome)) + StateLogger.log('flexbe.operator', self, type='request', request=outcome, + autonomy=self.parent.autonomy_level, + required=self.parent.get_required_autonomy(outcome, self)) + self._last_requested_outcome = outcome + outcome = None + + # autonomy level is high enough, report the executed transition + elif outcome is not None and outcome in self.outcomes: + self._publish_outcome(outcome) + self._force_transition = False + + self._last_outcome = outcome return outcome + def _publish_outcome(self, outcome): + """Update the UI and logs about this outcome.""" + # 0 outcome status denotes no outcome, not index so add +1 for valid outcome (subtract in mirror) + try: + outcome_index = self.outcomes.index(outcome) + except Exception as exc: # pylint: disable=W0703 + outcome_index = 0 + Logger.localerr("State Machine outcome error : %s > %s (%d) (%d) (%s)" + % (self.name, outcome, outcome_index, self._state_id, self.__class__.__name__)) + raise exc + + Logger.localinfo("State Machine result: %s > %s (%d) (%d) (%s)" + % (self.name, outcome, outcome_index, self._state_id, self.__class__.__name__)) + self._pub.publish(Topics._OUTCOME_TOPIC, UInt32(data=StateMap.hash(self, outcome_index))) + self._pub.publish(Topics._DEBUG_TOPIC, String(data="%s > %s" % (self.path, outcome))) + if self._force_transition: + StateLogger.log('flexbe.operator', self, type='forced', forced=outcome, + requested=self._last_requested_outcome) + self._last_requested_outcome = None + def process_sync_request(self): """ Provide explicit sync as back-up functionality. @@ -154,49 +249,47 @@ def process_sync_request(self): since it requires additional 8 byte + header update bandwith and time to restart mirror """ if self._inner_sync_request: - if self.id is None: - Logger.localerr("Sync requested - but why processing here - self.id is None!") - - msg = BehaviorSync() - msg.behavior_id = self.id - try: - deep_state = self.get_deep_state() - if deep_state is not None: - try: - current_label = self.current_state_label - except StateMachineError: # pylint: disable=W0703 - current_label = "None" - - Logger.localinfo(f"Sync request processed by {self.id} {self.name} with " - f"current state={current_label} (deep state = {deep_state.name})") - msg.current_state_checksum = zlib.adler32(deep_state.path.encode()) & 0x7fffffff - - else: - Logger.localwarn(f"Inner sync requested for {self.name} : - no active deep state!'") - except Exception as exc: # pylint: disable=W0703 - Logger.localerr(f"Inner sync requested by {self.id} - encountered error {type(exc)}:\n {exc}") - self._inner_sync_request = False - self._pub.publish('flexbe/mirror/sync', msg) - self._pub.publish('flexbe/command_feedback', CommandFeedback(command="sync", args=[])) - Logger.localinfo("<-- Sent synchronization message for mirror.") + self._pub.publish(Topics._MIRROR_SYNC_TOPIC, self.get_latest_status()) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="sync", args=[])) + Logger.localinfo("<-- Sent synchronization message to mirror.") else: Logger.error('Inner sync processed for %s - but no sync request flag?' % (self.name)) def is_transition_allowed(self, label, outcome): return self._autonomy[label].get(outcome, -1) < OperatableStateMachine.autonomy_level - def get_required_autonomy(self, outcome): - return self._autonomy[self.current_state_label][outcome] + def get_required_autonomy(self, outcome, state): + try: + assert self.current_state_label == state.name, "get required autonomys in OSM state doesn't match!" + return self._autonomy[self.current_state_label][outcome] + except Exception: # pylint: disable=W0703 + Logger.error(f"Failure to retrieve autonomy for '{self.name}' - " + f" current state label='{self.name}' outcome='{outcome}'.") + Logger.localerr(f"{self._autonomy}") def destroy(self): - Logger.localinfo(f'Destroy state machine {self.name}: {self.id} ...') - self._notify_stop() + Logger.localinfo(f"Destroy state machine '{self.name}': {self.id} inst_id={id(self)} ...") + self._notify_stop() # Recursively stop the states + + Logger.localinfo(' disable top-level ROS control ...') self._disable_ros_control() - self._sub.unsubscribe_topic('flexbe/command/autonomy', inst_id=id(self)) - self._sub.unsubscribe_topic('flexbe/command/sync', inst_id=id(self)) - self._sub.unsubscribe_topic('flexbe/command/attach', inst_id=id(self)) - self._sub.unsubscribe_topic('flexbe/request_mirror_structure', inst_id=id(self)) + + Logger.localinfo(f" unsubscribe top-level state machine '{self.name}' topics ...") + self._sub.unsubscribe_topic(Topics._CMD_ATTACH_TOPIC, inst_id=id(self)) + self._sub.unsubscribe_topic(Topics._CMD_AUTONOMY_TOPIC, inst_id=id(self)) + self._sub.unsubscribe_topic(Topics._CMD_SYNC_TOPIC, inst_id=id(self)) + self._sub.unsubscribe_topic(Topics._REQUEST_STRUCTURE_TOPIC, inst_id=id(self)) + + Logger.localinfo(f" remove top-level state machine '{self.name}' publishers ...") + self._pub.remove_publisher(Topics._CMD_FEEDBACK_TOPIC) + self._pub.remove_publisher(Topics._DEBUG_TOPIC) + self._pub.remove_publisher(Topics._MIRROR_STRUCTURE_TOPIC) + self._pub.remove_publisher(Topics._MIRROR_SYNC_TOPIC) + self._pub.remove_publisher(Topics._OUTCOME_TOPIC) + self._pub.remove_publisher(Topics._OUTCOME_REQUEST_TOPIC) + + Logger.localinfo(" state logger shutdown ...") StateLogger.shutdown() def confirm(self, name, beh_id): @@ -215,46 +308,55 @@ def confirm(self, name, beh_id): self.set_name(name) self.id = beh_id + self.define_structure() + Logger.localinfo(f"State machine '{self.name}' ({self.id}) (inst_id={id(self)}) confirmed and structure defined.") + Logger.localinfo(f'--> Set up pub/sub for behavior {self.name}: {self.id} ...') - # Update mirror with currently active state (high bandwidth mode) - self._pub.createPublisher('flexbe/mirror/sync', BehaviorSync) - # Sends the current structure to the mirror - self._pub.createPublisher('flexbe/mirror/structure', ContainerStructure) # Gives feedback about executed commands to the GUI - self._pub.createPublisher('flexbe/command_feedback', CommandFeedback) + self._pub.create_publisher(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback) + # transition information for debugging + self._pub.create_publisher(Topics._DEBUG_TOPIC, String) + # Sends the current structure to the mirror + self._pub.create_publisher(Topics._MIRROR_STRUCTURE_TOPIC, ContainerStructure) + # Update mirror with currently active state (high bandwidth mode) + self._pub.create_publisher(Topics._MIRROR_SYNC_TOPIC, BehaviorSync) + # Transition outcome information used by mirror to track onboard state + self._pub.create_publisher(Topics._OUTCOME_TOPIC, UInt32) + # Pass hints to the UI + self._pub.create_publisher(Topics._OUTCOME_REQUEST_TOPIC, OutcomeRequest) - self._sub.subscribe('flexbe/command/autonomy', UInt8, self._set_autonomy_level, inst_id=id(self)) - self._sub.subscribe('flexbe/command/sync', Empty, self._sync_callback, inst_id=id(self)) - self._sub.subscribe('flexbe/command/attach', UInt8, self._attach_callback, inst_id=id(self)) - self._sub.subscribe('flexbe/request_mirror_structure', Int32, self._mirror_structure_callback, inst_id=id(self)) + self._sub.subscribe(Topics._CMD_AUTONOMY_TOPIC, UInt8, self._set_autonomy_level, inst_id=id(self)) + self._sub.subscribe(Topics._CMD_ATTACH_TOPIC, UInt8, self._attach_callback, inst_id=id(self)) + self._sub.subscribe(Topics._CMD_SYNC_TOPIC, Empty, self._sync_callback, inst_id=id(self)) + self._sub.subscribe(Topics._REQUEST_STRUCTURE_TOPIC, Int32, self._mirror_structure_callback, inst_id=id(self)) StateLogger.initialize(name) StateLogger.log('flexbe.initialize', None, behavior=name, autonomy=OperatableStateMachine.autonomy_level) if OperatableStateMachine.autonomy_level != 255: self._enable_ros_control() - Logger.localinfo(f'--> Wait for behavior {self.name}: {self.id} publishers to activate ...') + Logger.localinfo(f"--> Wait for behavior '{self.name}': {self.id} publishers to activate ...") self.wait(seconds=0.25) # no clean way to wait for publisher to be ready... - Logger.localinfo(f'--> Notify behavior {self.name}: {self.id} states to start ...') + Logger.localinfo(f"--> Notify behavior '{self.name}': {self.id} states to start ...") self._notify_start() - Logger.localinfo(f'--> behavior {self.name}: {self.id} confirmation complete!') + Logger.localinfo(f"--> behavior '{self.name}': {self.id} confirmation complete!") # operator callbacks def _set_autonomy_level(self, msg): """Set the current autonomy level.""" if OperatableStateMachine.autonomy_level != msg.data: - Logger.localinfo(f'--> Request autonomy changed to {msg.data} on {self.name}') + Logger.localinfo(f"--> Request autonomy changed to {msg.data} on '{self.name}'") if msg.data < 0: - Logger.localinfo(f'--> Negative autonomy level={msg.data} - Preempt {self.name}!') + Logger.localinfo(f"--> Negative autonomy level={msg.data} - Preempt '{self.name}'!") self._preempt_cb(msg) else: OperatableStateMachine.autonomy_level = msg.data - self._pub.publish('flexbe/command_feedback', CommandFeedback(command="autonomy", args=[])) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="autonomy", args=[])) def _sync_callback(self, msg): - Logger.localinfo(f"--> Synchronization requested ... ({self.id}) {self.name}") + Logger.localwarn(f"--> Synchronization requested ... ({self.id}) '{self.name}' ") self._inner_sync_request = True # Flag to process at the end of spin loop def _attach_callback(self, msg): @@ -267,19 +369,24 @@ def _attach_callback(self, msg): # send command feedback cfb = CommandFeedback(command="attach") cfb.args.append(self.name) - self._pub.publish('flexbe/command_feedback', cfb) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, cfb) Logger.localinfo("<-- Sent attach confirm.") def _mirror_structure_callback(self, msg): - Logger.localinfo(f"--> Creating behavior structure for mirror id={msg.data} ...") - self._pub.publish('flexbe/mirror/structure', self._build_structure_msg()) - Logger.localinfo("<-- Sent behavior structure to mirror.") - # enable control of states since a mirror is listening - self._enable_ros_control() + if self._structure: + Logger.localinfo(f"--> Sending behavior structure to mirror id={msg.data} ...") + self._pub.publish(Topics._MIRROR_STRUCTURE_TOPIC, self._structure) + self._inner_sync_request = True + # enable control of states since a mirror is listening + self._enable_ros_control() + else: + Logger.logwarn(f"No structure defined for '{self.name}'! - nothing sent to mirror.") # handle state events def _notify_start(self): + super()._notify_start() + for state in self._states: if isinstance(state, OperatableState): state.on_start() @@ -287,16 +394,19 @@ def _notify_start(self): state._notify_start() def _notify_stop(self): - self.on_stop() + Logger.localinfo(f"Notify stop for {self.name} {self.path} ({id(self)}) ") + for state in self._states: if isinstance(state, OperatableState): state.on_stop() if isinstance(state, OperatableStateMachine): - state.on_stop() state._notify_stop() if state._is_controlled: state._disable_ros_control() + super()._notify_stop() + self._structure = None # Flag for destruction + def on_exit(self, userdata): if self._current_state is not None: udata = UserData(reference=self.userdata, diff --git a/flexbe_core/flexbe_core/core/preemptable_state.py b/flexbe_core/flexbe_core/core/preemptable_state.py index afd67ae..c07df7e 100644 --- a/flexbe_core/flexbe_core/core/preemptable_state.py +++ b/flexbe_core/flexbe_core/core/preemptable_state.py @@ -36,6 +36,7 @@ from flexbe_msgs.msg import CommandFeedback from flexbe_core.core.lockable_state import LockableState +from flexbe_core.core.topics import Topics from flexbe_core.logger import Logger @@ -46,7 +47,6 @@ class PreemptableState(LockableState): If preempted, the state will not be executed anymore and return the outcome preempted. """ - _preempted_name = 'preempted' preempt = False def __init__(self, *args, **kwargs): @@ -56,13 +56,10 @@ def __init__(self, *args, **kwargs): PreemptableState.preempt = False - self._feedback_topic = 'flexbe/command_feedback' - self._preempt_topic = 'flexbe/command/preempt' - def _preemptable_execute(self, *args, **kwargs): - if self._is_controlled and self._sub.has_msg(self._preempt_topic): - self._sub.remove_last_msg(self._preempt_topic) - self._pub.publish(self._feedback_topic, CommandFeedback(command="preempt")) + if self._is_controlled and self._sub.has_msg(Topics._CMD_PREEMPT_TOPIC): + self._sub.remove_last_msg(Topics._CMD_PREEMPT_TOPIC) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="preempt")) PreemptableState.preempt = True Logger.localinfo("--> Behavior will be preempted") @@ -76,17 +73,20 @@ def _preemptable_execute(self, *args, **kwargs): def _notify_skipped(self): # make sure we dont miss a preempt even if not being executed - if self._is_controlled and self._sub.has_msg(self._preempt_topic): - self._sub.remove_last_msg(self._preempt_topic) - self._pub.publish(self._feedback_topic, CommandFeedback(command="preempt")) + if self._is_controlled and self._sub.has_msg(Topics._CMD_PREEMPT_TOPIC): + self._sub.remove_last_msg(Topics._CMD_PREEMPT_TOPIC) + self._pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="preempt")) PreemptableState.preempt = True def _enable_ros_control(self): - super()._enable_ros_control() - self._pub.createPublisher(self._feedback_topic, CommandFeedback) - self._sub.subscribe(self._preempt_topic, Empty, inst_id=id(self)) - PreemptableState.preempt = False + if not self._is_controlled: + super()._enable_ros_control() + self._pub.create_publisher(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback) + self._sub.subscribe(Topics._CMD_PREEMPT_TOPIC, Empty, inst_id=id(self)) + PreemptableState.preempt = False def _disable_ros_control(self): - super()._disable_ros_control() - self._sub.unsubscribe_topic(self._preempt_topic, inst_id=id(self)) + if self._is_controlled: + super()._disable_ros_control() + self._sub.unsubscribe_topic(Topics._CMD_PREEMPT_TOPIC, inst_id=id(self)) + self._pub.remove_publisher(Topics._CMD_FEEDBACK_TOPIC) diff --git a/flexbe_core/flexbe_core/core/preemptable_state_machine.py b/flexbe_core/flexbe_core/core/preemptable_state_machine.py index a44d6a0..ea7af2d 100644 --- a/flexbe_core/flexbe_core/core/preemptable_state_machine.py +++ b/flexbe_core/flexbe_core/core/preemptable_state_machine.py @@ -31,16 +31,14 @@ """A state machine that can be preempted.""" import threading -import zlib import rclpy from std_msgs.msg import Empty -from flexbe_msgs.msg import BehaviorSync from flexbe_core.core.lockable_state_machine import LockableStateMachine from flexbe_core.core.preemptable_state import PreemptableState +from flexbe_core.core.topics import Topics from flexbe_core.logger import Logger -from flexbe_core.proxy import ProxySubscriberCached class PreemptableStateMachine(LockableStateMachine): @@ -54,23 +52,22 @@ class PreemptableStateMachine(LockableStateMachine): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - # always listen to preempt so that the behavior can be stopped even if unsupervised - self._preempt_topic = 'flexbe/command/preempt' - self._sub = ProxySubscriberCached({self._preempt_topic: Empty}, inst_id=id(self)) - self._sub.set_callback(self._preempt_topic, self._preempt_cb, inst_id=id(self)) self._status_lock = threading.Lock() - self._last_deep_state_name = None - self._last_deep_state_path = None + self._last_deep_states_list = None + self._last_outcome = None + + def _notify_start(self): + # always listen to preempt so that the behavior can be stopped even if unsupervised (e.g not ROS controlled via OCS) + self._sub.subscribe(Topics._CMD_PREEMPT_TOPIC, Empty, self._preempt_cb, inst_id=id(self)) + + def _notify_stop(self): + self._sub.unsubscribe_topic(Topics._CMD_PREEMPT_TOPIC, inst_id=id(self)) def _preempt_cb(self, msg): if not self._is_controlled: Logger.localinfo(f'Preempting {self.name}!') PreemptableState.preempt = True - def on_stop(self): - # No other SM classes have on_stop, so no call to super().on_stop - self._sub.unsubscribe_topic(self._preempt_topic, inst_id=id(self)) - @staticmethod def add(label, state, transitions=None, remapping=None): transitions[PreemptableState._preempted_name] = PreemptableStateMachine._preempted_name @@ -86,17 +83,16 @@ def spin(self, userdata=None): outcome = self.execute(userdata) # Store the information for safely passing to heartbeat thread - deep_state = self.get_deep_state() - if deep_state: - if deep_state.name != self._last_deep_state_name: - with self._status_lock: - self._last_deep_state_path = str(deep_state.path) - self._last_deep_state_name = str(deep_state.name) - else: - if self._last_deep_state_name is not None: - with self._status_lock: - self._last_deep_state_path = None - self._last_deep_state_name = None + deep_states = self.get_deep_states() + assert isinstance(deep_states, list), f"Expecting a list here, not {deep_states}" + if deep_states != self._last_deep_states_list: + # Logger.localinfo(f"New deep states for '{self.name}' len={len(deep_states)} " + # f"deep states: {[dpst.path for dpst in deep_states if dpst is not None]}") + with self._status_lock: + self._last_deep_states_list = deep_states + # else: + # Logger.localinfo(f"Same old deep states for '{self.name}' len={len(deep_states)} - " + # f"deep states: {[dpst.name for dpst in deep_states]}") if self._inner_sync_request: # Top-level state machine with sync request @@ -106,7 +102,7 @@ def spin(self, userdata=None): Logger.loginfo(f"PreemptableStateMachine {self.name} spin() - done with outcome={outcome}") break - self.sleep() + self.wait(seconds=self.sleep_duration) return outcome @@ -116,16 +112,8 @@ def get_latest_status(self): Note: Mirror uses derived version that cleans up mirror paths """ - with self._status_lock: - path = self._last_deep_state_path - - msg = BehaviorSync() - msg.behavior_id = -1 - if path is not None: - msg.current_state_checksum = zlib.adler32(path.encode()) & 0x7fffffff - else: - msg.current_state_checksum = -1 - return msg + raise NotImplementedError("Do not call this version directly - " + "either OperatableStateMachine or Mirror should override") @classmethod def process_sync_request(cls): diff --git a/flexbe_core/flexbe_core/core/priority_container.py b/flexbe_core/flexbe_core/core/priority_container.py index 1cf00b6..d7f2924 100644 --- a/flexbe_core/flexbe_core/core/priority_container.py +++ b/flexbe_core/flexbe_core/core/priority_container.py @@ -41,6 +41,7 @@ class PriorityContainer(OperatableStateMachine): def __init__(self, conditions=None, *args, **kwargs): super().__init__(*args, **kwargs) self._parent_active_container = None + self._type = OperatableStateMachine.ContainerType.PriorityContainer.value def execute(self, *args, **kwargs): if (PriorityContainer.active_container is None diff --git a/flexbe_core/flexbe_core/core/ros_state_machine.py b/flexbe_core/flexbe_core/core/ros_state_machine.py index 84a7c44..7486d0a 100644 --- a/flexbe_core/flexbe_core/core/ros_state_machine.py +++ b/flexbe_core/flexbe_core/core/ros_state_machine.py @@ -31,6 +31,7 @@ """A state machine to interface with ROS.""" from rclpy.duration import Duration + from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached from flexbe_core.core.state_machine import StateMachine @@ -53,17 +54,30 @@ def __init__(self, *args, **kwargs): self._pub = ProxyPublisher() self._sub = ProxySubscriberCached() - def wait(self, seconds=None): - """Wait for designated ROS clock time to keep APPROXIMATELY on scheduled tic rate.""" + def _notify_start(self): + """Call when state machine starts up.""" + + def _notify_stop(self): + """Call when state machine shuts down.""" + + def wait(self, seconds=None, context=None): + """ + Wait for designated ROS clock time to keep APPROXIMATELY on scheduled tic rate. + + @param seconds - floating point seconds to sleep + @param context - rclpy Context, normally None for default context, but specific context used in testing + """ if seconds is not None and seconds > 0: - self._node.get_clock().sleep_for(Duration(seconds=seconds)) + self._node.get_clock().sleep_for(Duration(seconds=seconds), context=context) def _enable_ros_control(self): - self._is_controlled = True - for state in self._states: - state._enable_ros_control() + if not self._is_controlled: + self._is_controlled = True + for state in self._states: + state._enable_ros_control() def _disable_ros_control(self): - self._is_controlled = False - for state in self._states: - state._disable_ros_control() + if self._is_controlled: + self._is_controlled = False + for state in self._states: + state._disable_ros_control() diff --git a/flexbe_core/flexbe_core/core/state.py b/flexbe_core/flexbe_core/core/state.py index bcabb1e..6d5d6a4 100644 --- a/flexbe_core/flexbe_core/core/state.py +++ b/flexbe_core/flexbe_core/core/state.py @@ -44,6 +44,8 @@ def _remove_duplicates(input_list): class State: """Basic FlexBE State.""" + _preempted_name = 'preempted' # Define name here, but handle logic in derived class + def __init__(self, *args, **kwargs): self._outcomes = _remove_duplicates(kwargs.get('outcomes', [])) io_keys = kwargs.get('io_keys', []) @@ -52,7 +54,9 @@ def __init__(self, *args, **kwargs): # properties of instances of a state machine self._name = None self._parent = None + self._state_id = None # Assigned with structure after all states added to behavior self._inner_sync_request = False # Any state can generate request, but should be rare + self._type = 0 # Basic states are type 0, containers have non-zero type def __str__(self): return self._name @@ -98,6 +102,14 @@ def set_parent(self, value): self._parent = value + @property + def state_id(self): + return self._state_id + @property def path(self): return "" if self.parent is None else self.parent.path + "/" + self.name + + @property + def type(self): + return self._type diff --git a/flexbe_core/flexbe_core/core/state_machine.py b/flexbe_core/flexbe_core/core/state_machine.py index 3c0cc7e..43b2681 100644 --- a/flexbe_core/flexbe_core/core/state_machine.py +++ b/flexbe_core/flexbe_core/core/state_machine.py @@ -108,7 +108,8 @@ def spin(self, userdata=None): if outcome is not None: break - self.sleep() + self.wait(seconds=self.sleep_duration) + return outcome def execute(self, userdata): @@ -117,15 +118,10 @@ def execute(self, userdata): self._current_state = self.initial_state self._userdata = userdata if userdata is not None else UserData() self._userdata(add_from=self._own_userdata) + # Logger.localinfo(f"Entering StateMachine {self.name} ({self._state_id}) initial state='{self._current_state.name}'") outcome = self._execute_current_state() return outcome - def sleep(self): - if self._current_state is not None: - self.wait(seconds=self._current_state.sleep_duration) - else: - self.wait(seconds=self.sleep_duration) - def _execute_current_state(self): with UserData(reference=self._userdata, remap=self._remappings[self._current_state.name], input_keys=self._current_state.input_keys, output_keys=self._current_state.output_keys @@ -145,7 +141,11 @@ def _execute_current_state(self): self._current_state = self._labels.get(target) if self._current_state is None: + Logger.localinfo(f" SM '{self.name}' ({self.id}) returning '{target}' ") return target + # else: + # Logger.localinfo(f" SM '{self.name}' ({self.id}) updated current state to " + # f"'{self._current_state.name}' ({self._current_state._state_id}) given outcome='{target}' ") return None @@ -185,7 +185,22 @@ def sleep_duration(self): if self._current_state is not None: return self._current_state.sleep_duration - return 0. + return 0.00005 # return some minimal wait + + def get_deep_states(self): + """ + Recursively look for the currently executing states. + + Traverse all state machines down to the terminal child state that is not a container. + (Except concurrency containers, which override this method) + + @return: The list of active states (not state machine) + """ + if isinstance(self._current_state, StateMachine): + return self._current_state.get_deep_states() + + # Base case is current_state is not a state machine + return [self._current_state] if self._current_state is not None else [] # Return as a list # consistency checks diff --git a/flexbe_core/flexbe_core/core/state_map.py b/flexbe_core/flexbe_core/core/state_map.py new file mode 100644 index 0000000..9fdbc85 --- /dev/null +++ b/flexbe_core/flexbe_core/core/state_map.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python + +# Copyright 2023 Christopher Newport University +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of the Christopher Newport University nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import zlib +from flexbe_core.logger import Logger + + +class StateMap: + """Standardize hashing of states and provide for storing lookup from id to state instance.""" + + __HASH_BITS = 31 # Keep in signed 32-bit range + # 2**31 - 1 - 256 + __HASH_MASK = 0x7FFFFF00 # 23-bits keeps in signed range, and allows for 256 output encoding + + def __init__(self): + self._state_map = {} + self._num_collision_processed = 0 + + def __str__(self): + return (f"State map with {len(self._state_map)} entries" + "." if self._num_collision_processed == 0 else f" with {self._num_collision_processed} collisions!") + + def __getitem__(self, index): + """Get existing state if possible, or return None.""" + if index in self._state_map: + return self._state_map[index] + else: + return None + + @classmethod + def _hash_path(cls, path): + """ + Convert unique string path to state to numeric ID. + + The hash mask is used to: + 1) Keep as a positive integer value + 2) Mask off the lower 8 bits for use as outcome value for status updates + + Using adler32 for now, but if this causes too many collisions, we may + consider going to MurmurHash3 (mmh3) library. + """ + return zlib.adler32(path.encode()) & cls.__HASH_MASK + + def get_path_hash(self, path): + """Return hash value for path that is in state map.""" + hash_path = path + hash_val = self._hash_path(hash_path) + collisions = 0 + while hash_val not in self._state_map: + collisions += 1 + if collisions > 20: + return None + hash_path += path + hash_val = self._hash_path(hash_path) + return hash_val + + def add_state(self, path, state): + """Define state id hash and store in map to state instance.""" + hash_path = path + if state._state_id is None or state._state_id == -1: + state._state_id = self._hash_path(hash_path) + collisions = 0 + while state._state_id in self._state_map: + collisions += 1 + if collisions > 20: + raise KeyError(f"Unable to avoid collisions in StateMap with {path}") + hash_path += path + state._state_id = self._hash_path(hash_path) + self._state_map[state._state_id] = state + self._num_collision_processed += collisions + else: + if state._state_id in self._state_map: + Logger.error(f"State '{path}' : id={state._state_id} is already in map!") + raise KeyError(f"Existing state in StateMap with {path}") + self._state_map[state._state_id] = state + + def get_state(self, state_id): + """Return reference to state given id.""" + if state_id in self._state_map: + return self._state_map[state_id] + + Logger.error("State id={state_id} is not in the state map!") + return None + + @classmethod + def unhash(cls, hash_code): + """ + Convert numeric ID to state id and outcome index. + + @return tuple of state id, outcome index + """ + state_id = hash_code & cls.__HASH_MASK + + # remove outcome offset to return true index + # https://stackoverflow.com/questions/31151107/how-do-i-do-a-bitwise-not-operation-in-python + outcome_index = (hash_code & ((1 << cls.__HASH_BITS) - 1 - cls.__HASH_MASK)) - 1 + + if outcome_index == -1: + # No outcome, just a status update + return state_id, None + + return state_id, outcome_index + + @classmethod + def hash(cls, state, outcome_index): + """Convert state id and outcome to hashed identifier for outcome reports.""" + return state._state_id + 1 + outcome_index diff --git a/flexbe_core/flexbe_core/core/topics.py b/flexbe_core/flexbe_core/core/topics.py new file mode 100644 index 0000000..3198f83 --- /dev/null +++ b/flexbe_core/flexbe_core/core/topics.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python + +# Copyright 2023 Christopher Newport University +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of the Christopher Newport University nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from std_msgs.msg import Bool, Empty, String, UInt32, UInt8 + +from flexbe_msgs.action import BehaviorExecution + +from flexbe_msgs.msg import BehaviorLog, BehaviorRequest, BehaviorSelection, BehaviorSync +from flexbe_msgs.msg import BEStatus, CommandFeedback +from flexbe_msgs.msg import ContainerStructure, OutcomeRequest + + +class Topics: + """Standardize handling of FlexBE topics.""" + + _BEHAVIOR_LOGGING_TOPIC = 'flexbe/log' # Logger topic + _BEHAVIOR_UPDATE_TOPIC = 'flexbe/behavior_update' + _CMD_AUTONOMY_TOPIC = 'flexbe/command/autonomy' # OCS request autonomy level + _CMD_ATTACH_TOPIC = 'flexbe/command/attach' # OCS request attach UI and Mirror to running behavior + _CMD_FEEDBACK_TOPIC = 'flexbe/command_feedback' # Feedback to OCS of command outcome + _CMD_LOCK_TOPIC = 'flexbe/command/lock' # OCS request to lock state + _CMD_PAUSE_TOPIC = 'flexbe/command/pause' # OCS request to pause execution + _CMD_PREEMPT_TOPIC = 'flexbe/command/preempt' # OCS request behavior preempt + _CMD_REPEAT_TOPIC = 'flexbe/command/repeat' # OCS request to repeat execution of state + _CMD_SYNC_TOPIC = 'flexbe/command/sync' # OCS request synchronization + _CMD_TRANSITION_TOPIC = 'flexbe/command/transition' # OCS request transition + _CMD_UNLOCK_TOPIC = 'flexbe/command/unlock' # OCS request to unlock state + _DEBUG_TOPIC = 'flexbe/debug/current_state' # Used for logging state entry/exit + _LAUNCHER_HEARTBEAT_TOPIC = 'flexbe/launcher/heartbeat' # Clock seconds active + _MIRROR_HEARTBEAT_TOPIC = 'flexbe/mirror/heartbeat' # Clock seconds active + _MIRROR_PREEMPT_TOPIC = 'flexbe/mirror/preempt' # Preempt + _MIRROR_STRUCTURE_TOPIC = 'flexbe/mirror/structure' # Pass behavior structure back to mirror + _MIRROR_SYNC_TOPIC = 'flexbe/mirror/sync' # Trigger mirror to re-synchronize with onboard + _ONBOARD_HEARTBEAT_TOPIC = 'flexbe/heartbeat' # Onboard behavior executive is alive + _ONBOARD_STATUS_TOPIC = 'flexbe/status' # Onboard behavior engine status + _OUTCOME_REQUEST_TOPIC = 'flexbe/outcome_request' # OCS request outcome + _OUTCOME_TOPIC = 'flexbe/mirror/outcome' # State outcomes used in mirror to track status + _REQUEST_BEHAVIOR_TOPIC = "flexbe/request_behavior" + _REQUEST_STRUCTURE_TOPIC = 'flexbe/request_mirror_structure' # Request state machine structure from onboard + _START_BEHAVIOR_TOPIC = 'flexbe/start_behavior' # OCS or launcher command to start behavior + _STATE_LOGGER_TOPIC = 'flexbe/state_logger' + _UI_VERSION_TOPIC = 'flexbe/ui_version' # OCS Version topic + + # Action interfaces + _EXECUTE_BEHAVIOR_ACTION = 'flexbe/execute_behavior' + + _topic_types = {_BEHAVIOR_LOGGING_TOPIC: BehaviorLog, + _BEHAVIOR_UPDATE_TOPIC: String, + _CMD_ATTACH_TOPIC: UInt8, + _CMD_AUTONOMY_TOPIC: UInt8, + _CMD_FEEDBACK_TOPIC: CommandFeedback, + _CMD_LOCK_TOPIC: String, + _CMD_PAUSE_TOPIC: Bool, + _CMD_PREEMPT_TOPIC: Empty, + _CMD_REPEAT_TOPIC: Empty, + _CMD_SYNC_TOPIC: Empty, + _CMD_TRANSITION_TOPIC: OutcomeRequest, + _CMD_UNLOCK_TOPIC: String, + _DEBUG_TOPIC: String, + _EXECUTE_BEHAVIOR_ACTION: BehaviorExecution, + _LAUNCHER_HEARTBEAT_TOPIC: UInt32, + _MIRROR_HEARTBEAT_TOPIC: UInt32, + _MIRROR_PREEMPT_TOPIC: Empty, + _MIRROR_STRUCTURE_TOPIC: ContainerStructure, + _MIRROR_SYNC_TOPIC: BehaviorSync, + _ONBOARD_HEARTBEAT_TOPIC: BehaviorSync, + _ONBOARD_STATUS_TOPIC: BEStatus, + _OUTCOME_REQUEST_TOPIC: OutcomeRequest, + _OUTCOME_TOPIC: UInt32, + _REQUEST_BEHAVIOR_TOPIC: BehaviorRequest, + _REQUEST_STRUCTURE_TOPIC: UInt32, + _START_BEHAVIOR_TOPIC: BehaviorSelection, + _UI_VERSION_TOPIC: String + } + + @staticmethod + def get_type(topic): + return Topics._topic_types[topic] diff --git a/flexbe_core/flexbe_core/logger.py b/flexbe_core/flexbe_core/logger.py index 54afc90..aa01663 100644 --- a/flexbe_core/flexbe_core/logger.py +++ b/flexbe_core/flexbe_core/logger.py @@ -33,6 +33,7 @@ from rclpy.node import Node +from flexbe_core.core.topics import Topics from flexbe_msgs.msg import BehaviorLog @@ -45,15 +46,13 @@ class Logger: REPORT_ERROR = BehaviorLog.ERROR REPORT_DEBUG = BehaviorLog.DEBUG - LOGGING_TOPIC = 'flexbe/log' - _pub = None _node = None @staticmethod def initialize(node: Node): Logger._node = node - Logger._pub = node.create_publisher(BehaviorLog, Logger.LOGGING_TOPIC, 100) + Logger._pub = node.create_publisher(BehaviorLog, Topics._BEHAVIOR_LOGGING_TOPIC, 100) @staticmethod def log(text: str, severity: int): @@ -74,15 +73,15 @@ def local(text: str, severity: int): if severity == Logger.REPORT_INFO: Logger._node.get_logger().info(text) elif severity == Logger.REPORT_WARN: - Logger._node.get_logger().warning(text) + Logger._node.get_logger().warning(f"\033[93m{text}\033[0m") elif severity == Logger.REPORT_HINT: - Logger._node.get_logger().info('\033[94mBehavior Hint: %s\033[0m' % text) + Logger._node.get_logger().info(f"\033[94mBehavior Hint: {text}\033[0m") elif severity == Logger.REPORT_ERROR: - Logger._node.get_logger().error(text) + Logger._node.get_logger().error(f"\033[91m{text}\033[0m") elif severity == Logger.REPORT_DEBUG: - Logger._node.get_logger().debug(text) + Logger._node.get_logger().debug(f"\033[92m{text}\033[0m") else: - Logger._node.get_logger().debug(text + ' (unknown log level %s)' % str(severity)) + Logger._node.get_logger().debug(f"\033[95m{text}\033[91m(unknown log level {str(severity)})\033[0m") # NOTE: Below text strings can only have single % symbols if they are being treated # as format strings with appropriate arguments (otherwise replace with %% for simple string without args) diff --git a/flexbe_core/flexbe_core/proxy/proxy_action_client.py b/flexbe_core/flexbe_core/proxy/proxy_action_client.py index 22f37ca..a48b981 100644 --- a/flexbe_core/flexbe_core/proxy/proxy_action_client.py +++ b/flexbe_core/flexbe_core/proxy/proxy_action_client.py @@ -29,7 +29,7 @@ """A proxy for calling actions provides a single point for all state action interfaces.""" from functools import partial -from threading import Timer +from threading import Timer, Lock from rclpy.action import ActionClient @@ -48,6 +48,8 @@ class ProxyActionClient: _result = {} _feedback = {} + _client_sync_lock = Lock() + @staticmethod def initialize(node): """Initialize ROS setup for proxy action client.""" @@ -56,17 +58,16 @@ def initialize(node): @staticmethod def shutdown(): - """Shuts this proxy down by reseting all action clients.""" + """Shuts this proxy down by resetting all action clients.""" try: print(f"Shutdown proxy action clients with {len(ProxyActionClient._clients)} topics ...") - for topic, client in ProxyActionClient._clients.items(): + for topic, client_dict in ProxyActionClient._clients.items(): try: ProxyActionClient._clients[topic] = None - ProxyActionClient._node.destroy_client(client) + ProxyActionClient._node.destroy_client(client_dict['client']) except Exception as exc: # pylint: disable=W0703 Logger.error(f"Something went wrong during shutdown of proxy action client for {topic}!\n{str(exc)}") - print("Shutdown proxy action clients ...") ProxyActionClient._result.clear() ProxyActionClient._feedback.clear() ProxyActionClient._cancel_current_goal.clear() @@ -88,10 +89,15 @@ def __init__(self, topics=None, wait_duration=10): """ if topics is not None: for topic, action_type in topics.items(): - ProxyActionClient.setupClient(topic, action_type, wait_duration) + ProxyActionClient.setup_client(topic, action_type, wait_duration) @classmethod def setupClient(cls, topic, action_type, wait_duration=10): + Logger.localerr("Deprecated: Use ProxyActionClient.setup_client instead!") + cls.setup_client(topic, action_type, wait_duration=10) + + @classmethod + def setup_client(cls, topic, action_type, wait_duration=None): """ Set up an action client for calling it later. @@ -104,24 +110,36 @@ def setupClient(cls, topic, action_type, wait_duration=10): @type wait_duration: int @param wait_duration: Defines how long to wait for the given client if it is not available right now. """ - if topic not in ProxyActionClient._clients: - ProxyActionClient._clients[topic] = ActionClient(ProxyActionClient._node, action_type, topic) - ProxyActionClient._check_topic_available(topic, wait_duration) - - else: - if action_type is not ProxyActionClient._clients[topic]._action_type: - if action_type.__name__ == ProxyActionClient._clients[topic]._action_type.__name__: - Logger.localinfo(f'Existing action client for {topic}' - f' with same action type name, but different instance - re-create client!') - - # Destroy the existing client in executor thread - client = ProxyActionClient._clients[topic] - ProxyActionClient._node.executor.create_task(ProxyActionClient.destroy_client, client, topic) + with cls._client_sync_lock: + if topic not in ProxyActionClient._clients: + ProxyActionClient._clients[topic] = {'client': ActionClient(ProxyActionClient._node, action_type, topic), + 'count': 1} - ProxyActionClient._clients[topic] = ActionClient(ProxyActionClient._node, action_type, topic) - ProxyActionClient._check_topic_available(topic, wait_duration) + else: + if action_type is not ProxyActionClient._clients[topic]['client']._action_type: + if action_type.__name__ == ProxyActionClient._clients[topic]['client']._action_type.__name__: + if ProxyActionClient._clients[topic]['count'] == 1: + Logger.localinfo(f'Existing action client for {topic}' + f' with same action type name, but different instance - re-create client!') + else: + Logger.localwarn(f"Existing action client for {topic} with {ProxyActionClient._clients[topic]['count']} references\n" + f" with same action type name, but different instance\n" + f" just re-create client with 1 reference - but be warned!") + + # Destroy the existing client in executor thread + client = ProxyActionClient._clients[topic]['client'] + ProxyActionClient._node.executor.create_task(ProxyActionClient.destroy_client, client, topic) + + ProxyActionClient._clients[topic] = {'client': ActionClient(ProxyActionClient._node, + action_type, topic), + 'count': 1} + else: + raise TypeError("Trying to replace existing action client with different action type") else: - raise TypeError("Trying to replace existing action client with different action type") + ProxyActionClient._clients[topic]['count'] = ProxyActionClient._clients[topic]['count'] + 1 + + if isinstance(wait_duration, (float, int)): + ProxyActionClient._check_topic_available(topic, wait_duration) @classmethod def send_goal(cls, topic, goal, wait_duration=0.0): @@ -139,6 +157,12 @@ def send_goal(cls, topic, goal, wait_duration=0.0): """ if not ProxyActionClient._check_topic_available(topic, wait_duration=wait_duration): raise ValueError(f'Cannot send goal for action client {topic}: Topic not available.') + + client_dict = ProxyActionClient._clients.get(topic) + if client_dict is None: + raise ValueError(f'Cannot send goal for action client {topic}: Client is not initialized.') + client = client_dict['client'] + # reset previous results ProxyActionClient._result[topic] = None ProxyActionClient._feedback[topic] = None @@ -146,12 +170,12 @@ def send_goal(cls, topic, goal, wait_duration=0.0): ProxyActionClient._has_active_goal[topic] = True ProxyActionClient._current_goal[topic] = None - if not isinstance(goal, ProxyActionClient._clients[topic]._action_type.Goal): + if not isinstance(goal, client._action_type.Goal): if goal.__class__.__name__ == ProxyActionClient._clients[topic]._action_type.Goal.__name__: # This is the case if the same class is imported multiple times # To avoid rclpy TypeErrors, we will automatically convert to the base type # used in the original service/publisher clients - new_goal = ProxyActionClient._clients[topic]._action_type.Goal() + new_goal = client._action_type.Goal() Logger.localinfo(f" converting goal {str(type(new_goal))} vs. {str(type(goal))}") assert new_goal.__slots__ == goal.__slots__, f"Message attributes for {topic} do not match!" for attr in goal.__slots__: @@ -164,11 +188,10 @@ def send_goal(cls, topic, goal, wait_duration=0.0): new_goal = goal # send goal - ProxyActionClient._clients[topic].wait_for_server() - future = ProxyActionClient._clients[topic].send_goal_async( - new_goal, - feedback_callback=lambda f: ProxyActionClient._feedback_callback(topic, f) - ) + client.wait_for_server() + future = client.send_goal_async(new_goal, + feedback_callback=lambda f: ProxyActionClient._feedback_callback(topic, f) + ) future.add_done_callback(partial(ProxyActionClient._done_callback, topic=topic)) @@ -196,10 +219,15 @@ def is_available(cls, topic): @type topic: string @param topic: The topic of interest. """ - client = ProxyActionClient._clients.get(topic) - if client is None: + client_dict = ProxyActionClient._clients.get(topic) + if client_dict is None: Logger.logerr("Action client '%s' is not yet registered, need to add it first!" % topic) return False + + client = client_dict['client'] + if client is None: + Logger.logerr("Action client '%s' is not yet initialized, need to add it first!" % topic) + return False return client.server_is_ready() @@ -273,7 +301,11 @@ def get_state(cls, topic): @type topic: string @param topic: The topic of interest. """ - return ProxyActionClient._clients[topic].get_state() + client_dict = ProxyActionClient._clients.get(topic) + if client_dict is None: + return None + + return client_dict['client'].get_state() @classmethod def is_active(cls, topic): @@ -311,8 +343,8 @@ def _check_topic_available(cls, topic, wait_duration=0.1): @type wait_duration: int @param wait_duration: Defines how long to wait for the given client if it is not available right now. """ - client = ProxyActionClient._clients.get(topic) - if client is None: + client_dict = ProxyActionClient._clients.get(topic) + if client_dict is None: Logger.logerr("Action client '%s' is not yet registered, need to add it first!" % topic) return False @@ -320,6 +352,7 @@ def _check_topic_available(cls, topic, wait_duration=0.1): tmr = Timer(.5, ProxyActionClient._print_wait_warning, [topic]) tmr.start() + client = client_dict['client'] available = client.wait_for_server(wait_duration) warning_sent = False @@ -343,15 +376,42 @@ def _check_topic_available(cls, topic, wait_duration=0.1): def _print_wait_warning(cls, topic): Logger.logwarn(f"Waiting for action client/server for '{topic}'") + @classmethod + def remove_client(cls, topic): + """ + Remove action client from proxy. + + @type topic: string + @param topic: The topic to publish on. + """ + client = None + count = -1 + with cls._client_sync_lock: + if topic in ProxyActionClient._clients: + ProxyActionClient._clients[topic]['count'] = ProxyActionClient._clients[topic]['count'] - 1 + count = ProxyActionClient._clients[topic]['count'] + if count < 1: + client = ProxyActionClient._clients[topic]['client'] + ProxyActionClient._clients.pop(topic) + + if topic in ProxyActionClient._result: + ProxyActionClient._result.pop(topic) + + if topic in ProxyActionClient._feedback: + ProxyActionClient._feedback.pop(topic) + + if client is not None: + Logger.localdebug(f"Action client for '{topic}' has {count} references remaining.") + ProxyActionClient._node.executor.create_task(ProxyActionClient.destroy_client, client, topic) + else: + Logger.localdebug(f"Publisher for '{topic}' remains with {count} references!") + @classmethod def destroy_client(cls, client, topic): """Handle client destruction from within the executor threads.""" try: - if ProxyActionClient._node.destroy_client(client): - Logger.localinfo(f'Destroyed the proxy action client for {topic} ({id(client)})!') - else: - Logger.localwarn(f'Some issue destroying the proxy action client for {topic}!') del client + Logger.localinfo(f'Destroyed the proxy action client for {topic}!') except Exception as exc: # pylint: disable=W0703 Logger.error("Something went wrong destroying proxy action client" f" for {topic}!\n {type(exc)} - {str(exc)}") diff --git a/flexbe_core/flexbe_core/proxy/proxy_publisher.py b/flexbe_core/flexbe_core/proxy/proxy_publisher.py index 4789516..39ebb09 100644 --- a/flexbe_core/flexbe_core/proxy/proxy_publisher.py +++ b/flexbe_core/flexbe_core/proxy/proxy_publisher.py @@ -33,7 +33,7 @@ Provides a single point for comminications for all states in behavior """ -from threading import Timer, Event +from threading import Timer, Event, Lock from flexbe_core.logger import Logger from flexbe_core.proxy.qos import QOS_DEFAULT @@ -44,6 +44,7 @@ class ProxyPublisher: _node = None _topics = {} + _publisher_sync_lock = Lock() @staticmethod def initialize(node): @@ -53,18 +54,17 @@ def initialize(node): @staticmethod def shutdown(): - """Shuts this proxy down by reseting all publishers.""" + """Shuts this proxy down by resetting all publishers.""" try: print(f"Shutdown proxy publisher with {len(ProxyPublisher._topics)} topics ...") for topic, pub in ProxyPublisher._topics.items(): try: ProxyPublisher._topics[topic] = None - ProxyPublisher._node.destroy_publisher(pub) + ProxyPublisher._node.destroy_publisher(pub['publisher']) except Exception as exc: # pylint: disable=W0703 Logger.error(f"Something went wrong during shutdown of proxy publisher for {topic}!\n%s %s", type(exc), str(exc)) - print("Shutdown proxy publisher ...") ProxyPublisher._topics.clear() except Exception as exc: # pylint: disable=W0703 @@ -87,10 +87,15 @@ def __init__(self, topics=None, qos=None, **kwargs): """ if topics is not None: for topic, msg_type in topics.items(): - ProxyPublisher.createPublisher(topic, msg_type, qos, **kwargs) + ProxyPublisher.create_publisher(topic, msg_type, qos, **kwargs) @classmethod def createPublisher(cls, topic, msg_type, qos=None, **kwargs): + Logger.localerr("Deprecated: Use ProxyPublisher.create_publisher instead!") + cls.create_publisher(topic, msg_type, qos, **kwargs) + + @classmethod + def create_publisher(cls, topic, msg_type, qos=None, **kwargs): """ Add a new publisher to the proxy. @@ -103,25 +108,56 @@ def createPublisher(cls, topic, msg_type, qos=None, **kwargs): if '_latch' in kwargs or '_queue_size' in kwargs: Logger.warning('DEPRECATED use of arguments in publisher') - if topic not in ProxyPublisher._topics: - qos = qos or QOS_DEFAULT - ProxyPublisher._topics[topic] = ProxyPublisher._node.create_publisher(msg_type, topic, qos) - else: - if msg_type is not ProxyPublisher._topics[topic].msg_type: - # Change in required msg_type for topic name - update publisher with new type - if msg_type.__name__ == ProxyPublisher._topics[topic].msg_type.__name__: - # Same message type name, so likely due to reloading Python module on behavior change - Logger.localinfo(f'Existing publisher for {topic} with same message type name,' - ' but different instance - re-create publisher!') - ProxyPublisher._node.executor.create_task(ProxyPublisher.destroy_publisher, - ProxyPublisher._topics[topic], topic) - qos = qos or QOS_DEFAULT - ProxyPublisher._topics[topic] = ProxyPublisher._node.create_publisher(msg_type, topic, qos) + with cls._publisher_sync_lock: + if topic not in ProxyPublisher._topics: + qos = qos or QOS_DEFAULT + ProxyPublisher._topics[topic] = {'publisher': ProxyPublisher._node.create_publisher(msg_type, topic, qos), + 'count': 1} + else: + if msg_type is not ProxyPublisher._topics[topic]['publisher'].msg_type: + # Change in required msg_type for topic name - update publisher with new type + if msg_type.__name__ == ProxyPublisher._topics[topic]['publisher'].msg_type.__name__: + # Same message type name, so likely due to reloading Python module on behavior change + Logger.localwarn(f'Existing publisher for {topic} with same message type name,' + ' but different instance - re-create publisher!') + + pub = ProxyPublisher._topics[topic]['publisher'] + ProxyPublisher._node.executor.create_task(ProxyPublisher.destroy_publisher, pub, topic) + + qos = qos or QOS_DEFAULT + ProxyPublisher._topics[topic]['publisher'] = ProxyPublisher._node.create_publisher(msg_type, topic, qos) + ProxyPublisher._topics[topic]['count'] = 1 + else: + Logger.info(f"Mis-matched msg_types ({msg_type.__name__} vs." + f" {ProxyPublisher._topics[topic]['publisher'].msg_type.__name__}) for {topic}" + f" (possibly due to reload of behavior)!") + raise TypeError(f"Trying to replace existing publisher with different msg type for {topic}") else: - Logger.info(f'Mis-matched msg_types ({msg_type.__name__} vs.' - f' {ProxyPublisher._topics[topic].msg_type.__name__}) for {topic}' - f' (possibly due to reload of behavior)!') - raise TypeError("Trying to replace existing publisher with different msg type") + ProxyPublisher._topics[topic]['count'] = ProxyPublisher._topics[topic]['count'] + 1 + + @classmethod + def remove_publisher(cls, topic): + """ + Remove publisher from proxy. + + @type topic: string + @param topic: The topic to publish on. + """ + pub = None + count = -1 + with cls._publisher_sync_lock: + if topic in ProxyPublisher._topics: + ProxyPublisher._topics[topic]['count'] = ProxyPublisher._topics[topic]['count'] - 1 + count = ProxyPublisher._topics[topic]['count'] + if count < 1: + pub = ProxyPublisher._topics[topic]['publisher'] + ProxyPublisher._topics.pop(topic) + + if pub is not None: + Logger.localdebug(f"Publisher for '{topic}' has {count} references remaining ({id(pub)}).") + ProxyPublisher._node.executor.create_task(ProxyPublisher.destroy_publisher, pub, topic) + else: + Logger.localdebug(f"Publisher for '{topic}' remains with {count} references ({id(pub)})!") @classmethod def is_available(cls, topic): @@ -148,30 +184,34 @@ def publish(cls, topic, msg): Logger.warning('ProxyPublisher: topic %s not yet registered!' % topic) return - if not isinstance(msg, ProxyPublisher._topics[topic].msg_type): + with cls._publisher_sync_lock: + msg_type = ProxyPublisher._topics[topic]['publisher'].msg_type + + if not isinstance(msg, msg_type): # Change in required msg_type for topic name - update publisher with new type - if msg.__class__.__name__ == ProxyPublisher._topics[topic].msg_type.__name__: + if msg.__class__.__name__ == msg_type.__name__: # This is the case if the same class is imported multiple times # To avoid rclpy TypeErrors, we will automatically convert to the base type # used in the original publisher Logger.localinfo('Publish - converting datatype for ' '%s!\n%s: %s/%s' % (topic, str(msg.__class__.__name__), str(id(msg.__class__)), - str(id(ProxyPublisher._topics[topic].msg_type)))) + str(id(msg_type)))) - new_msg = ProxyPublisher._topics[topic].msg_type() + new_msg = msg_type() assert new_msg.__slots__ == msg.__slots__, f"Message attributes for {topic} do not match!" for attr in msg.__slots__: setattr(new_msg, attr, getattr(msg, attr)) else: raise TypeError(f"Invalid request type {msg.__class__.__name__}" - f" (vs. {ProxyPublisher._topics[topic].msg_type.__name__}) for topic {topic}") + f" (vs. {msg_type.__name__}) for topic {topic}") else: # Same class definition instance as stored new_msg = msg try: - ProxyPublisher._topics[topic].publish(new_msg) + with cls._publisher_sync_lock: + ProxyPublisher._topics[topic]['publisher'].publish(new_msg) except Exception as exc: # pylint: disable=W0703 Logger.warning('Something went wrong when publishing to %s!\n%s: %s' % (topic, str(type(exc)), str(exc))) import traceback # pylint: disable=C0415 @@ -189,7 +229,7 @@ def number_of_subscribers(cls, topic): if pub is None: Logger.error("Publisher %s not yet registered, need to add it first!" % topic) return -1 - return pub.get_subscription_count() + return pub['publisher'].get_subscription_count() @classmethod def wait_for_any(cls, topic, timeout=5.0): @@ -209,7 +249,7 @@ def wait_for_any(cls, topic, timeout=5.0): tmr = Timer(.5, ProxyPublisher._print_wait_warning, [topic]) tmr.start() - available = ProxyPublisher._wait_for_subscribers(pub, timeout) + available = ProxyPublisher._wait_for_subscribers(pub['publisher'], timeout) warning_sent = False try: tmr.cancel() @@ -249,7 +289,7 @@ def destroy_publisher(cls, pub, topic): """Handle publisher destruction from within the executor threads.""" try: if ProxyPublisher._node.destroy_publisher(pub): - Logger.localinfo(f'Destroyed the proxy publisher for {topic} ({id(pub)})!') + Logger.localinfo(f'Destroyed the proxy publisher for {topic}!') else: Logger.localwarn(f'Some issue destroying the proxy publisher for {topic}!') del pub diff --git a/flexbe_core/flexbe_core/proxy/proxy_service_caller.py b/flexbe_core/flexbe_core/proxy/proxy_service_caller.py index e0ad212..043abbc 100644 --- a/flexbe_core/flexbe_core/proxy/proxy_service_caller.py +++ b/flexbe_core/flexbe_core/proxy/proxy_service_caller.py @@ -31,7 +31,7 @@ """A proxy for providing single service connection for FlexBE behaviors.""" -from threading import Timer +from threading import Timer, Lock import rclpy @@ -45,6 +45,7 @@ class ProxyServiceCaller: _services = {} _results = {} + _service_sync_lock = Lock() @staticmethod def initialize(node): @@ -54,7 +55,7 @@ def initialize(node): @staticmethod def shutdown(): - """Shut down this proxy by reseting all service callers.""" + """Shut down this proxy by resetting all service callers.""" try: print(f"Shutdown proxy service caller with {len(ProxyServiceCaller._services)} topics ...") for topic, service in ProxyServiceCaller._services.items(): @@ -69,7 +70,6 @@ def shutdown(): Logger.error("Something went wrong during shutdown of proxy service" f" caller for {topic}!\n {type(exc)} - {exc}") - print("Shutdown proxy service caller ...") ProxyServiceCaller._results.clear() except Exception as exc: # pylint: disable=W0703 @@ -87,10 +87,15 @@ def __init__(self, topics=None, wait_duration=10): """ if topics is not None: for topic, srv_type in topics.items(): - ProxyServiceCaller.setupService(topic, srv_type, wait_duration) + ProxyServiceCaller.setup_service(topic, srv_type, wait_duration) @classmethod def setupService(cls, topic, srv_type, wait_duration=10): + Logger.localerr("Deprecated: Use ProxyServiceCaller.setup_service instead!") + cls.setup_service(topic, srv_type, wait_duration) + + @classmethod + def setup_service(cls, topic, srv_type, wait_duration=None): """ Set up a service caller for calling it later. @@ -100,28 +105,39 @@ def setupService(cls, topic, srv_type, wait_duration=10): @type srv_type: service class @param srv_type: The type of messages of this service. - @type wait_duration: float + @type wait_duration: float or None @param wait_duration: Defines how long to wait for the given service if it is not available right now. """ - if topic not in ProxyServiceCaller._services: - Logger.localinfo(f'Set up ProxyServiceCaller for new topic {topic} ...') - ProxyServiceCaller._services[topic] = ProxyServiceCaller._node.create_client(srv_type, topic) - if isinstance(wait_duration, float): - ProxyServiceCaller._check_service_available(topic, wait_duration) - - else: - if srv_type is not ProxyServiceCaller._services[topic].srv_type: - if srv_type.__name__ == ProxyServiceCaller._services[topic].srv_type.__name__: - Logger.localinfo(f'Existing service for {topic} with same message type name,' - f' but different instance - re-create service!') - ProxyServiceCaller._node.executor.create_task(ProxyServiceCaller.destroy_service, - ProxyServiceCaller._services[topic], topic) - - ProxyServiceCaller._services[topic] = ProxyServiceCaller._node.create_client(srv_type, topic) - if isinstance(wait_duration, float): - ProxyServiceCaller._check_service_available(topic, wait_duration) + with cls._service_sync_lock: + if topic not in ProxyServiceCaller._services: + Logger.localinfo(f'Set up ProxyServiceCaller for new topic {topic} ...') + ProxyServiceCaller._services[topic] = {'service': ProxyServiceCaller._node.create_client(srv_type, topic), + 'count': 1} + else: + if srv_type is not ProxyServiceCaller._services[topic]['service'].srv_type: + if srv_type.__name__ == ProxyServiceCaller._services[topic]['service'].srv_type.__name__: + if ProxyServiceCaller._services[topic]['count'] == 1: + Logger.localinfo(f'Existing service for {topic} with same message type name,' + f' but different instance - re-create service!') + else: + Logger.localwarn(f"Existing service for {topic} with " + f"{ProxyServiceCaller._services[topic]['count']} references\n" + f" with same service type name, but different instance\n" + f" just re-create client with 1 reference - but be warned!") + + ProxyServiceCaller._node.executor.create_task(ProxyServiceCaller.destroy_service, + ProxyServiceCaller._services[topic]['service'], topic) + + ProxyServiceCaller._services[topic] = {'service': ProxyServiceCaller._node.create_client(srv_type, + topic), + 'count': 1} + else: + raise TypeError("Trying to replace existing service caller with different service msg type") else: - raise TypeError("Trying to replace existing service caller with different service msg type") + ProxyServiceCaller._services[topic]['count'] = ProxyServiceCaller._services[topic]['count'] + 1 + + if isinstance(wait_duration, (float, int)): + ProxyServiceCaller._check_service_available(topic, wait_duration) @classmethod def is_available(cls, topic, wait_duration=1.0): @@ -152,14 +168,18 @@ def call(cls, topic, request, wait_duration=1.0): if not ProxyServiceCaller._check_service_available(topic, wait_duration): raise ValueError('Cannot call service client %s: Topic not available.' % topic) # call service (forward any exceptions) + client_dict = ProxyServiceCaller._services.get(topic) + if client_dict is None: + raise ValueError('Cannot call service client %s: Topic not available.' % topic) + client = client_dict['service'] - if not isinstance(request, ProxyServiceCaller._services[topic].srv_type.Request): - if request.__class__.__name__ == ProxyServiceCaller._services[topic].srv_type.Request.__name__: + if not isinstance(request, client.srv_type.Request): + if request.__class__.__name__ == client.srv_type.Request.__name__: # This is the case if the same class is imported multiple times # To avoid rclpy TypeErrors, we will Automatically convert the to base type # used in the service clients - new_request = ProxyServiceCaller._services[topic].srv_type.Request() + new_request = client.srv_type.Request() for attr, val in vars(new_request): # Validate that attributes in common assert hasattr(request, attr), "Types must share common attributes!" @@ -167,7 +187,7 @@ def call(cls, topic, request, wait_duration=1.0): setattr(new_request, attr, val) else: raise TypeError(f"Invalid request type {request.__class__.__name__}" - f" (vs. {ProxyServiceCaller._services[topic].srv_type.Request.__name__}) " + f" (vs. {client.srv_type.Request.__name__}) " f"for topic {topic}") else: # Same class definition instance as stored @@ -175,7 +195,7 @@ def call(cls, topic, request, wait_duration=1.0): Logger.loginfo("Client about to call service") - return ProxyServiceCaller._services[topic].call(new_request) + return client.call(new_request) @classmethod def call_async(cls, topic, request, wait_duration=1.0): @@ -197,24 +217,29 @@ def call_async(cls, topic, request, wait_duration=1.0): raise ValueError('Cannot call service client %s: Topic not available.' % topic) # call service (forward any exceptions) - if not isinstance(request, ProxyServiceCaller._services[topic].srv_type.Request): - if request.__class__.__name__ == ProxyServiceCaller._services[topic].srv_type.Request.__name__: + client_dict = ProxyServiceCaller._services.get(topic) + if client_dict is None: + raise ValueError('Cannot call service client %s: Topic not available.' % topic) + client = client_dict['service'] + + if not isinstance(request, client.srv_type.Request): + if request.__class__.__name__ == client.srv_type.Request.__name__: # This is the case if the same class is imported multiple times # To avoid rclpy TypeErrors, we will automatically convert to the base type # used in the original service clients - new_request = ProxyServiceCaller._services[topic].srv_type.Request() + new_request = client.srv_type.Request() assert new_request.__slots__ == request.__slots__, f"Message attributes for {topic} do not match!" for attr in request.__slots__: setattr(new_request, attr, getattr(request, attr)) else: raise TypeError(f"Invalid request type {request.__class__.__name__} " - f"(vs. {ProxyServiceCaller._services[topic].srv_type.Request.__name__}) for topic {topic}") + f"(vs. {client.srv_type.Request.__name__}) for topic {topic}") else: # Same class definition instance as stored new_request = request - ProxyServiceCaller._results[topic] = ProxyServiceCaller._services[topic].call_async(new_request) + ProxyServiceCaller._results[topic] = client.call_async(new_request) @classmethod def done(cls, topic): @@ -251,11 +276,12 @@ def _check_service_available(cls, topic, wait_duration=1): @type wait_duration: float @param wait_duration: Defines how long to wait for the given service if it is not available right now. """ - client = ProxyServiceCaller._services.get(topic) - if client is None: + client_dict = ProxyServiceCaller._services.get(topic) + if client_dict is None: Logger.error(f"Service client {topic} not yet registered, need to add it first!") return False + client = client_dict['service'] if not isinstance(wait_duration, float): Logger.localinfo(f"Check for service {topic} requires floating point wait_duration in seconds (change to 0.001)!") wait_duration = 0.001 @@ -291,6 +317,32 @@ def _check_service_available(cls, topic, wait_duration=1): def _print_wait_warning(cls, topic): Logger.warning("Waiting for service %s..." % (topic)) + @classmethod + def remove_client(cls, topic): + """ + Remove service caller client from proxy. + + @type topic: string + @param topic: The topic to publish on. + """ + srv = None + count = -1 + with cls._service_sync_lock: + if topic in ProxyServiceCaller._services: + ProxyServiceCaller._services[topic]['count'] = ProxyServiceCaller._services[topic]['count'] - 1 + count = ProxyServiceCaller._services[topic]['count'] + if count < 1: + srv = ProxyServiceCaller._services[topic]['service'] + ProxyServiceCaller._services.pop(topic) + if topic in ProxyServiceCaller._results: + ProxyServiceCaller._results.pop(topic) + if srv is not None: + Logger.localdebug(f"Service caller client for '{topic}' has {count} references remaining.") + ProxyServiceCaller._node.executor.create_task(ProxyServiceCaller.destroy_service, srv, topic) + else: + Logger.localdebug(f"Service caller for '{topic}' remains with {count} references!") + + @classmethod def destroy_service(cls, srv, topic): """Handle service client destruction from within the executor threads.""" diff --git a/flexbe_core/flexbe_core/proxy/proxy_subscriber_cached.py b/flexbe_core/flexbe_core/proxy/proxy_subscriber_cached.py index c0e4929..3e9b62e 100644 --- a/flexbe_core/flexbe_core/proxy/proxy_subscriber_cached.py +++ b/flexbe_core/flexbe_core/proxy/proxy_subscriber_cached.py @@ -34,8 +34,8 @@ """ from functools import partial -from collections import defaultdict - +from collections import defaultdict, deque +from threading import Lock from flexbe_core.logger import Logger from flexbe_core.proxy.qos import QOS_DEFAULT @@ -47,6 +47,8 @@ class ProxySubscriberCached: _topics = {} _persistant_topics = [] + _subscription_lock = Lock() # Prevent modifications during processing + @staticmethod def initialize(node): """Initialize ROS setup for proxy subscriber.""" @@ -57,24 +59,25 @@ def initialize(node): def shutdown(): """Shut down this proxy by unregistering all subscribers.""" try: - print(f"Shutdown proxy subscriber with {len(ProxySubscriberCached._topics)} topics ...") - for topic, topic_dict in ProxySubscriberCached._topics.items(): - try: - ProxySubscriberCached._topics[topic] = None - topic_dict['callbacks'] = {} - ProxySubscriberCached._node.destroy_subscription(topic_dict['subscription']) - except Exception as exc: # pylint: disable=W0703 - Logger.error(f"Something went wrong during shutdown of proxy subscriber for " - f"{topic}!\n{type(exc)} - {exc}") - - print("Shutdown proxy subscriber ...") - ProxySubscriberCached._topics.clear() - ProxySubscriberCached._persistant_topics.clear() + with ProxySubscriberCached._subscription_lock: + print(f"Shutdown proxy subscriber with {len(ProxySubscriberCached._topics)} topics ...", flush=True) + for topic, topic_dict in ProxySubscriberCached._topics.items(): + try: + ProxySubscriberCached._topics[topic] = None + topic_dict['callbacks'] = {} + ProxySubscriberCached._node.destroy_subscription(topic_dict['subscription']) + except Exception as exc: # pylint: disable=W0703 + Logger.error(f"Something went wrong during shutdown of proxy subscriber for " + f"{topic}!\n{type(exc)} - {exc}") + + ProxySubscriberCached._topics.clear() + ProxySubscriberCached._persistant_topics.clear() + print("Shutdown proxy subscriber - finished!") except Exception as exc: # pylint: disable=W0703 print(f'Something went wrong during shutdown of proxy subscriber !\n{str(exc)}') - def __init__(self, topics=None, qos=None, inst_id=-1): + def __init__(self, topics=None, qos=None, inst_id=None): """ Initialize the proxy with optionally a given set of topics. @@ -84,12 +87,14 @@ def __init__(self, topics=None, qos=None, inst_id=-1): @type inst_id: int @param inst_id: identifier of instance creating subscription """ + if inst_id is None: + inst_id = id(self) + if topics is not None: for topic, msg_type in topics.items(): self.subscribe(topic, msg_type, qos=qos, inst_id=inst_id) - @classmethod - def subscribe(cls, topic, msg_type, callback=None, buffered=False, qos=None, inst_id=-1): # pylint: disable=R0913 + def subscribe(self, topic, msg_type, callback=None, buffered=False, qos=None, inst_id=None): # pylint: disable=R0913 """ Add a new subscriber to the proxy. @@ -109,54 +114,60 @@ def subscribe(cls, topic, msg_type, callback=None, buffered=False, qos=None, ins @type inst_id: int @param inst_id: identifier of instance creating subscription """ + if inst_id is None: + inst_id = id(self) + if topic not in ProxySubscriberCached._topics: qos = qos or QOS_DEFAULT sub = ProxySubscriberCached._node.create_subscription(msg_type, topic, - partial(cls._callback, topic=topic), qos) - - ProxySubscriberCached._topics[topic] = {'subscription': sub, - 'last_msg': None, - 'buffered': buffered, - 'msg_queue': [], - 'callbacks': defaultdict(None), - 'subscribers': []} - ProxySubscriberCached._topics[topic]['subscribers'].append(inst_id) + partial(self._callback, topic=topic), qos) + + with ProxySubscriberCached._subscription_lock: + # Lock to prevent modifications during callback + ProxySubscriberCached._topics[topic] = {'subscription': sub, + 'last_msg': None, + 'buffered': buffered, + 'msg_queue': deque(), + 'callbacks': defaultdict(None), + 'subscribers': [inst_id]} + Logger.localinfo(f"Created subscription for {topic} with message type {msg_type.__name__}!") else: - if msg_type is not ProxySubscriberCached._topics[topic]['subscription'].msg_type: - # Change in required msg_type for topic name - update subscription with new type - if msg_type.__name__ == ProxySubscriberCached._topics[topic]['subscription'].msg_type.__name__: - # Same message type name, so likely due to reloading Python module on behavior change - # Since we don't throw TypeErrors based on isinstance, and count on Python's duck typing - # for callbacks, we will ignore on FlexBE side for subscribers + with ProxySubscriberCached._subscription_lock: + if msg_type is not ProxySubscriberCached._topics[topic]['subscription'].msg_type: + # Change in required msg_type for topic name - update subscription with new type + if msg_type.__name__ == ProxySubscriberCached._topics[topic]['subscription'].msg_type.__name__: + # Same message type name, so likely due to reloading Python module on behavior change + # Since we don't throw TypeErrors based on isinstance, and count on Python's duck typing + # for callbacks, we will ignore on FlexBE side for subscribers + if inst_id not in ProxySubscriberCached._topics[topic]['subscribers']: + Logger.localinfo(f"Add subscriber to existing subscription for {topic}" + " - keep existing subscriber! (" + f"{len(ProxySubscriberCached._topics[topic]['subscribers'])})") + ProxySubscriberCached._topics[topic]['subscribers'].append(inst_id) + else: + Logger.localinfo(f"Existing subscription for {topic} with same message type name" + " - keep existing subscriber! " + f"({len(ProxySubscriberCached._topics[topic]['subscribers'])})") + else: + Logger.info(f"Mis-matched msg_types ({msg_type.__name__} vs. " + f"{ProxySubscriberCached._topics[topic]['subscription'].msg_type.__name__})" + f" for {topic} subscription (possibly due to reload of behavior)!") + raise TypeError(f"Trying to replace existing subscription with different msg type for {topic}") + else: if inst_id not in ProxySubscriberCached._topics[topic]['subscribers']: - Logger.localinfo(f"Add subscriber to existing subscription for {topic}" - " - keep existing subscriber! (" - f"{len(ProxySubscriberCached._topics[topic]['subscribers'])})") + Logger.localinfo(f"Add subscriber to existing subscription for {topic}! " + f"({len(ProxySubscriberCached._topics[topic]['subscribers'])})") ProxySubscriberCached._topics[topic]['subscribers'].append(inst_id) else: - Logger.localinfo(f"Existing subscription for {topic} with same message type name" - " - keep existing subscriber! " - f"({len(ProxySubscriberCached._topics[topic]['subscribers'])})") - else: - Logger.info(f"Mis-matched msg_types ({msg_type.__name__} vs. " - f"{ProxySubscriberCached._topics[topic]['subscription'].msg_type.__name__})" - f" for {topic} subscription (possibly due to reload of behavior)!") - raise TypeError("Trying to replace existing subscription with different msg type") - else: - if inst_id not in ProxySubscriberCached._topics[topic]['subscribers']: - Logger.localinfo(f"Add subscriber to existing subscription for {topic}! " - f"({len(ProxySubscriberCached._topics[topic]['subscribers'])})") - ProxySubscriberCached._topics[topic]['subscribers'].append(inst_id) - else: - Logger.localinfo(f"Existing subscription for {topic} with same message type " - "- keep existing subscriber! " - f"({len(ProxySubscriberCached._topics[topic]['subscribers'])})") + Logger.localinfo(f"Existing subscription for {topic} with same message type " + "- keep existing subscriber! " + f"({len(ProxySubscriberCached._topics[topic]['subscribers'])})") # Register the local callback for topic message if callback is not None: - cls.set_callback(topic, callback, inst_id) + self.set_callback(topic, callback, inst_id) @classmethod def _callback(cls, msg, topic): @@ -173,21 +184,27 @@ def _callback(cls, msg, topic): Logger.localinfo(f"-- invalid topic={topic} for callback!") return - try: + with ProxySubscriberCached._subscription_lock: ProxySubscriberCached._topics[topic]['last_msg'] = msg if ProxySubscriberCached._topics[topic]['buffered']: ProxySubscriberCached._topics[topic]['msg_queue'].append(msg) - for inst_id, callback in ProxySubscriberCached._topics[topic]['callbacks'].items(): + callbacks = dict(ProxySubscriberCached._topics[topic]['callbacks']) + + try: + # Use copy of callbacks in case something changes in dictionary subscription during processing + # which caused a RuntimeError if updates happened during callback + # but don't hold subscription lock during callbacks + for inst_id, callback in callbacks.items(): try: callback(msg) except Exception as exc: # pylint: disable=W0703 Logger.error(f"Exception in callback for {topic}: " - f"{callback.__module__} {callback.__name__} @ {inst_id} \n {exc} ") - except KeyError: - Logger.error(f"Error: {topic} is no longer available for processing callback! ") + f"{callback.__module__} {callback.__name__} @ {inst_id} \n {type(exc)} - {exc} ") + except Exception as exc: # pylint: disable=W0703 + Logger.error(f"Exception in main callback for {topic}: \n {type(exc)} - {exc} ") @classmethod - def set_callback(cls, topic, callback, inst_id=-1): + def set_callback(cls, topic, callback, inst_id): """ Add the given callback to the topic subscriber. @@ -199,9 +216,10 @@ def set_callback(cls, topic, callback, inst_id=-1): @type inst_id: int @param inst_id: identifier of instance creating subscription + (presumes no more than one callback per instance) """ if topic not in ProxySubscriberCached._topics: - Logger.localinfo(f"-- invalid topic={topic} for set_callback @inst_id={inst_id}!") + Logger.localerr(f"-- invalid topic={topic} for set_callback @inst_id={inst_id}!") return if callback is not None: @@ -210,19 +228,20 @@ def set_callback(cls, topic, callback, inst_id=-1): @classmethod def __set_callback(cls, topic, callback, inst_id): """Set callback in executor thread.""" - try: - if inst_id not in ProxySubscriberCached._topics[topic]['callbacks']: - ProxySubscriberCached._topics[topic]['callbacks'][inst_id] = callback - Logger.localinfo(f" Set local callback {callback.__name__} of " - f"{len(ProxySubscriberCached._topics[topic]['callbacks'])} for {topic}!") - else: - Logger.localinfo("Update existing callback " - f"{ProxySubscriberCached._topics[topic]['callbacks'][inst_id].__name__} with " - f"{callback.__name__} of {len(ProxySubscriberCached._topics[topic]['callbacks'])}" - f" for {topic}!") - ProxySubscriberCached._topics[topic]['callbacks'][inst_id] = callback - except KeyError: - Logger.localwarn("Error: topic {topic} is not longer available - cannot set callback!") + with ProxySubscriberCached._subscription_lock: + try: + if inst_id not in ProxySubscriberCached._topics[topic]['callbacks']: + ProxySubscriberCached._topics[topic]['callbacks'][inst_id] = callback + Logger.localinfo(f" Set local callback {callback.__name__} of " + f"{len(ProxySubscriberCached._topics[topic]['callbacks'])} for {topic}!") + else: + Logger.localinfo("Update existing callback " + f"{ProxySubscriberCached._topics[topic]['callbacks'][inst_id].__name__} with " + f"{callback.__name__} of {len(ProxySubscriberCached._topics[topic]['callbacks'])}" + f" for {topic}!") + ProxySubscriberCached._topics[topic]['callbacks'][inst_id] = callback + except KeyError: + Logger.localwarn("Error: topic {topic} is not longer available - cannot set callback!") @classmethod def enable_buffer(cls, topic): @@ -243,7 +262,7 @@ def disable_buffer(cls, topic): @param topic: The topic of interest. """ ProxySubscriberCached._topics[topic]['buffered'] = False - ProxySubscriberCached._topics[topic]['msg_queue'] = [] + ProxySubscriberCached._topics[topic]['msg_queue'] = deque() @classmethod def is_available(cls, topic): @@ -278,9 +297,7 @@ def get_from_buffer(cls, topic): return None if len(ProxySubscriberCached._topics[topic]['msg_queue']) == 0: return None - msg = ProxySubscriberCached._topics[topic]['msg_queue'][0] - ProxySubscriberCached._topics[topic]['msg_queue'] = ProxySubscriberCached._topics[topic]['msg_queue'][1:] - return msg + return ProxySubscriberCached._topics[topic]['msg_queue'].popleft() @classmethod def has_msg(cls, topic): @@ -315,15 +332,17 @@ def remove_last_msg(cls, topic, clear_buffer=False): @type topic: boolean @param topic: Set to true if the buffer of the given topic should be cleared as well. """ - if topic in ProxySubscriberCached._persistant_topics: - return - try: - ProxySubscriberCached._topics[topic]['last_msg'] = None - if clear_buffer: - ProxySubscriberCached._topics[topic]['msg_queue'] = [] - except KeyError: - Logger.localwarn(f"remove_last_msg: {topic} is not available!") + with ProxySubscriberCached._subscription_lock: + if topic in ProxySubscriberCached._persistant_topics: + return + + try: + ProxySubscriberCached._topics[topic]['last_msg'] = None + if clear_buffer: + ProxySubscriberCached._topics[topic]['msg_queue'] = deque() + except KeyError: + Logger.localwarn(f"remove_last_msg: {topic} is not available!") @classmethod def make_persistant(cls, topic): @@ -351,34 +370,37 @@ def unsubscribe_topic(cls, topic, inst_id=-1): """ if topic in ProxySubscriberCached._topics: - try: - topic_dict = ProxySubscriberCached._topics[topic] - if inst_id in topic_dict['subscribers']: - topic_dict['subscribers'].remove(inst_id) - Logger.localinfo(f"Unsubscribed {topic} from proxy! " - f"({len(topic_dict['subscribers'])} remaining)") - - remaining_subscribers = len(topic_dict['subscribers']) - if remaining_subscribers == 0: - ProxySubscriberCached._topics.pop(topic) # remove from list of valid topics - sub = topic_dict['subscription'] - ProxySubscriberCached._node.executor.create_task(ProxySubscriberCached.destroy_subscription, - sub, topic) - elif inst_id in topic_dict['callbacks']: - # Remove callback in executor thread to avoid changing size during callback - ProxySubscriberCached._node.executor.create_task(topic_dict['callbacks'].pop, inst_id) - Logger.localinfo(f"Removed callback from proxy subscription for {topic} " - f"from proxy! ({len(topic_dict['callbacks'])} remaining)") - - except Exception as exc: # pylint: disable=W0703 - Logger.error(f'Something went wrong unsubscribing {topic} of proxy subscriber!\n%s', str(exc)) + with ProxySubscriberCached._subscription_lock: + try: + topic_dict = ProxySubscriberCached._topics[topic] + if inst_id in topic_dict['subscribers']: + topic_dict['subscribers'].remove(inst_id) + Logger.localdebug(f"Unsubscribed {topic} from proxy! " + f"({len(topic_dict['subscribers'])} remaining)") + + remaining_subscribers = len(topic_dict['subscribers']) + if remaining_subscribers == 0: + ProxySubscriberCached._topics.pop(topic) # remove from list of valid topics + sub = topic_dict['subscription'] + ProxySubscriberCached._node.executor.create_task(ProxySubscriberCached.destroy_subscription, + sub, topic) + elif inst_id in topic_dict['callbacks']: + # Remove callback in executor thread to avoid changing size during callback + ProxySubscriberCached._node.executor.create_task(topic_dict['callbacks'].pop, inst_id) + Logger.localdebug(f"Removed callback from proxy subscription for {topic} " + f"from proxy! ({len(topic_dict['callbacks'])} remaining)") + + except Exception as exc: # pylint: disable=W0703 + Logger.error(f'Something went wrong unsubscribing {topic} of proxy subscriber!\n%s', str(exc)) + else: + Logger.localinfo(f"Unsubscribe : {topic} is not subscribed!") @classmethod def destroy_subscription(cls, sub, topic): """Handle subscription destruction from within the executor threads.""" try: if ProxySubscriberCached._node.destroy_subscription(sub): - Logger.localinfo(f'Destroyed the proxy subscription for {topic} ({id(sub)})!') + Logger.localinfo(f'Destroyed the proxy subscription for {topic}!') else: Logger.localwarn(f'Some issue destroying the proxy subscription for {topic}!') del sub diff --git a/flexbe_core/flexbe_core/proxy/proxy_transform_listener.py b/flexbe_core/flexbe_core/proxy/proxy_transform_listener.py index e2d09ce..0d146f3 100644 --- a/flexbe_core/flexbe_core/proxy/proxy_transform_listener.py +++ b/flexbe_core/flexbe_core/proxy/proxy_transform_listener.py @@ -52,9 +52,9 @@ def initialize(node): def shutdown(): """Shut down this proxy by reseting the transform listener.""" try: - print("Shutdown proxy transform listener ...") ProxyTransformListener._listener = None ProxyTransformListener._buffer = None + print("Shutdown proxy transform listener - finished!") except Exception as exc: # pylint: disable=W0703 print(f'Something went wrong during shutdown of proxy transform listener !\n{str(exc)}') diff --git a/flexbe_core/flexbe_core/state_logger.py b/flexbe_core/flexbe_core/state_logger.py index 5603ca4..0523ad6 100644 --- a/flexbe_core/flexbe_core/state_logger.py +++ b/flexbe_core/flexbe_core/state_logger.py @@ -44,6 +44,7 @@ from std_msgs.msg import String +from flexbe_core.core.topics import Topics from flexbe_core.proxy import ProxyPublisher @@ -119,7 +120,7 @@ def initialize(be_name=None): }, 'publish': { 'class': 'flexbe_core.state_logger.PublishBehaviorLogMessage', - 'topic': 'flexbe/state_logger', + 'topic': Topics._STATE_LOGGER_TOPIC, 'formatter': 'yaml' } }, @@ -285,7 +286,7 @@ def format(self, record): class PublishBehaviorLogMessage(logging.Handler): """publish messages to behavior logs.""" - def __init__(self, level=logging.NOTSET, topic='flexbe/state_logger'): + def __init__(self, level=logging.NOTSET, topic=Topics._STATE_LOGGER_TOPIC): super().__init__(level) self._topic = topic self._pub = ProxyPublisher({self._topic: String}) diff --git a/flexbe_core/test/test_core.py b/flexbe_core/test/test_core.py index 0cf281c..3953d4c 100755 --- a/flexbe_core/test/test_core.py +++ b/flexbe_core/test/test_core.py @@ -36,23 +36,28 @@ import rclpy from rclpy.executors import MultiThreadedExecutor -from std_msgs.msg import Bool, Empty, UInt8, String +from std_msgs.msg import Bool, Empty, UInt32, String from flexbe_msgs.msg import CommandFeedback, OutcomeRequest from flexbe_core.proxy import initialize_proxies, shutdown_proxies, ProxySubscriberCached from flexbe_core import EventState, OperatableStateMachine, ConcurrencyContainer from flexbe_core.core import PreemptableState from flexbe_core.core.exceptions import StateMachineError +from flexbe_core.core.topics import Topics class CoreTestState(EventState): """Test state.""" + _set_state_id = 256 + def __init__(self): super().__init__(outcomes=['done', 'error']) self.result = None self.last_events = [] self.count = 0 + CoreTestState._set_state_id = CoreTestState._set_state_id + 1 + self._state_id = CoreTestState._set_state_id def execute(self, userdata): self.count += 1 @@ -82,7 +87,6 @@ class ConcurrencyTestState(CoreTestState): def __init__(self): super().__init__() - # def execute(self, userdata): # outcome = super().execute(userdata) # self._node.get_logger().info(" %s - ConcurrencyTestState execute" @@ -148,6 +152,7 @@ def _create(self): state = CoreTestState() state._enable_ros_control() sm = OperatableStateMachine(outcomes=['done', 'error']) + sm._state_id = 1024 with sm: OperatableStateMachine.add('subject', state, transitions={'done': 'done', 'error': 'error'}, @@ -177,6 +182,13 @@ def assertMessage(self, sub, topic, msg, timeout=1): raise AssertionError('Did not receive message on topic %s, expected:\n%s' % (topic, str(msg))) + if topic in Topics._topic_types: + self.assertIsInstance(received, Topics.get_type(topic), + f"Failed message type for {topic} - was {type(received)}" + f" not {Topics.get_type(topic).__name__}") + else: + print(f" Unknown topic {topic} - not standard FlexBE topic - skip type test!", flush=True) + for slot in msg.__slots__: expected = getattr(msg, slot) actual = getattr(received, slot) @@ -205,7 +217,7 @@ def test_event_state(self): rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) state, sm = self._create() - fb_topic = 'flexbe/command_feedback' + fb_topic = Topics._CMD_FEEDBACK_TOPIC rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) sub = ProxySubscriberCached({fb_topic: CommandFeedback}, inst_id=id(self)) time.sleep(0.2) @@ -218,7 +230,7 @@ def test_event_state(self): self.assertListEqual([], state.last_events) # pause and resume as commanded - state._sub._callback(Bool(data=True), 'flexbe/command/pause') + state._sub._callback(Bool(data=True), Topics._CMD_PAUSE_TOPIC) self._execute(state) self.assertListEqual(['on_pause'], state.last_events) @@ -229,13 +241,13 @@ def test_event_state(self): state.result = None self.assertIsNone(outcome) - state._sub._callback(Bool(data=False), 'flexbe/command/pause') + state._sub._callback(Bool(data=False), Topics._CMD_PAUSE_TOPIC) self._execute(state) self.assertListEqual(['on_resume'], state.last_events) self.assertMessage(sub, fb_topic, CommandFeedback(command="resume")) # repeat triggers exit and enter again - state._sub._callback(Empty(), 'flexbe/command/repeat') + state._sub._callback(Empty(), Topics._CMD_REPEAT_TOPIC) self._execute(state) self.assertListEqual(['on_exit'], state.last_events) self.assertMessage(sub, fb_topic, CommandFeedback(command="repeat")) @@ -260,9 +272,9 @@ def test_operatable_state(self): ProxySubscriberCached.initialize(self.node) state, sm = self._create() self.node.get_logger().info("test_operatable_state - ProxySubscribe request ...") - out_topic = 'flexbe/mirror/outcome' - req_topic = 'flexbe/outcome_request' - sub = ProxySubscriberCached({out_topic: UInt8, req_topic: OutcomeRequest}, inst_id=id(self)) + out_topic = Topics._OUTCOME_TOPIC + req_topic = Topics._OUTCOME_REQUEST_TOPIC + sub = ProxySubscriberCached({out_topic: UInt32, req_topic: OutcomeRequest}, inst_id=id(self)) # wait for pub/sub end_time = time.time() + 1 while time.time() < end_time: @@ -275,7 +287,7 @@ def test_operatable_state(self): self._execute(state) self.assertNoMessage(sub, req_topic) - self.assertMessage(sub, out_topic, UInt8(data=1)) + self.assertMessage(sub, out_topic, UInt32(data=(2 + state._state_id))) # Test presumes outcome = 1 + offset # request outcome on same autonomy and clear request on loopback self.node.get_logger().info("test_operatable_state - request outcome on autonomy level=2 ...") @@ -293,7 +305,7 @@ def test_operatable_state(self): state.result = 'done' self._execute(state) self.assertNoMessage(sub, req_topic) - self.assertMessage(sub, out_topic, UInt8(data=0)) + self.assertMessage(sub, out_topic, UInt32(data=(1 + state._state_id))) # Test presumes outcome = 0 + offset # request outcome on lower autonomy, return outcome after level increase self.node.get_logger().info("test_operatable_state - lower autonomy level=0") @@ -305,7 +317,7 @@ def test_operatable_state(self): OperatableStateMachine.autonomy_level = 3 self.node.get_logger().info("test_operatable_state -autonomy level=3") self._execute(state) - self.assertMessage(sub, out_topic, UInt8(data=0)) + self.assertMessage(sub, out_topic, UInt32(data=(1 + state._state_id))) # Test presumes outcome = 0 + offset self.node.get_logger().info("test_operatable_state - OK! ") def test_preemptable_state(self): @@ -314,7 +326,7 @@ def test_preemptable_state(self): rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) ProxySubscriberCached.initialize(self.node) state, sm = self._create() - fb_topic = 'flexbe/command_feedback' + fb_topic = Topics._CMD_FEEDBACK_TOPIC self.node.get_logger().info("test_preemptable_state - subscribers ...") @@ -340,7 +352,7 @@ def test_preemptable_state(self): # preempt when command is received self.node.get_logger().info("test_preemptable_state - preempt on command ...") - state._sub._callback(Empty(), 'flexbe/command/preempt') + state._sub._callback(Empty(), Topics._CMD_PREEMPT_TOPIC) outcome = self._execute(state) self.assertEqual(outcome, PreemptableState._preempted_name) @@ -356,14 +368,14 @@ def test_lockable_state(self): rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) ProxySubscriberCached.initialize(self.node) state, sm = self._create() - fb_topic = 'flexbe/command_feedback' + fb_topic = Topics._CMD_FEEDBACK_TOPIC sub = ProxySubscriberCached({fb_topic: CommandFeedback}, inst_id=id(self)) time.sleep(0.2) # lock and unlock as commanded, return outcome after unlock self.node.get_logger().info(" test lock on command ... ") rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) - state._sub._callback(String(data='/subject'), 'flexbe/command/lock') + state._sub._callback(String(data='/subject'), Topics._CMD_LOCK_TOPIC) rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) state.result = 'done' @@ -381,7 +393,7 @@ def test_lockable_state(self): self.node.get_logger().info(" test unlock on command ... ") rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) - state._sub._callback(String(data='/subject'), 'flexbe/command/unlock') + state._sub._callback(String(data='/subject'), Topics._CMD_UNLOCK_TOPIC) rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) outcome = self._execute(state) self.assertEqual(outcome, 'done') @@ -389,26 +401,26 @@ def test_lockable_state(self): # lock and unlock without target self.node.get_logger().info(" test lock and unlock without target ... ") - state._sub._callback(String(data=''), 'flexbe/command/lock') + state._sub._callback(String(data=''), Topics._CMD_LOCK_TOPIC) state.result = 'done' outcome = self._execute(state) self.assertIsNone(outcome) self.assertMessage(sub, fb_topic, CommandFeedback(command='lock', args=['/subject', '/subject'])) - state._sub._callback(String(data=''), 'flexbe/command/unlock') + state._sub._callback(String(data=''), Topics._CMD_UNLOCK_TOPIC) outcome = self._execute(state) self.assertEqual(outcome, 'done') self.assertMessage(sub, fb_topic, CommandFeedback(command='unlock', args=['/subject', '/subject'])) # reject invalid lock command self.node.get_logger().info(" test reject invalid lock command ... ") - state._sub._callback(String(data='/invalid'), 'flexbe/command/lock') + state._sub._callback(String(data='/invalid'), Topics._CMD_LOCK_TOPIC) outcome = self._execute(state) self.assertEqual(outcome, 'done') self.assertMessage(sub, fb_topic, CommandFeedback(command='lock', args=['/invalid', ''])) # reject generic unlock command when not locked self.node.get_logger().info(" test reject invalid unlock when not locked command ... ") - state._sub._callback(String(data=''), 'flexbe/command/unlock') + state._sub._callback(String(data=''), Topics._CMD_UNLOCK_TOPIC) self._execute(state) self.assertMessage(sub, fb_topic, CommandFeedback(command='unlock', args=['', ''])) @@ -429,65 +441,23 @@ def test_manually_transitionable_state(self): rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) ProxySubscriberCached.initialize(self.node) state, sm = self._create() - fb_topic = 'flexbe/command_feedback' + fb_topic = Topics._CMD_FEEDBACK_TOPIC sub = ProxySubscriberCached({fb_topic: CommandFeedback}, inst_id=id(self)) time.sleep(0.2) # return requested outcome - state._sub._callback(OutcomeRequest(target='subject', outcome=1), 'flexbe/command/transition') + state._sub._callback(OutcomeRequest(target='subject', outcome=1), Topics._CMD_TRANSITION_TOPIC) outcome = self._execute(state) self.assertEqual(outcome, 'error') self.assertMessage(sub, fb_topic, CommandFeedback(command='transition', args=['subject', 'subject'])) # reject outcome request for different target - state._sub._callback(OutcomeRequest(target='invalid', outcome=1), 'flexbe/command/transition') + state._sub._callback(OutcomeRequest(target='invalid', outcome=1), Topics._CMD_TRANSITION_TOPIC) outcome = self._execute(state) self.assertIsNone(outcome) self.assertMessage(sub, fb_topic, CommandFeedback(command='transition', args=['invalid', 'subject'])) self.node.get_logger().info("test_manually_transitionable_state - OK! ") - # --------------------------------------------------------------------------- - # This is old code - # The state no longer sleeps internally, so this test is irrelevant - # Saving for now - # def test_ros_state(self): - # self.node.get_logger().info("test_ros_state - SKIPPED!") - # return - # # rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) - # state, sm = self._create() - # - # # default rate is 10Hz - # start = self.node.get_clock().now() - # for _ in range(10): - # rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) - # state.sleep() - # duration = (self.node.get_clock().now() - start).nanoseconds - # duration = duration * 10 ** -9 - # self.assertAlmostEqual(duration, 1.0, places=2) - # - # sleep_duration = state.sleep_duration - # sleep_duration = sleep_duration * 10 ** -9 - # self.assertAlmostEqual(sleep_duration, .1, places=2) - # - # # change of rate works as expected - # state.set_rate(1) - # start = self.node.get_clock().now() - # rclpy.spin_once(self.node, executor=self.executor, timeout_sec=1) - # print("Getting ready to sleep") - # state.sleep() - # print("Done sleeping") - # - # duration = (self.node.get_clock().now() - start).nanoseconds - # duration = duration * 10 ** -9 - # self.assertAlmostEqual(duration, 1., places=2) - # - # sleep_duration = state.sleep_duration - # sleep_duration = sleep_duration * 10 ** -9 - # self.assertAlmostEqual(sleep_duration, 1., places=2) - # self.node.get_logger().info("test_ros_state - OK! ") - # ^^^^^^^^^^^^^^^^^^^^^^ Old, irrelevant test code due to change in sleep handling ^^^^^^^^^^^ - # Only duration is reported by - def test_cross_combinations(self): self.node.get_logger().info("test_cross_combinations ...") @@ -499,29 +469,31 @@ def test_cross_combinations(self): state.result = 'error' outcome = self._execute(state) self.assertIsNone(outcome) - state._sub._callback(OutcomeRequest(target='subject', outcome=0), 'flexbe/command/transition') + state._sub._callback(OutcomeRequest(target='subject', outcome=0), Topics._CMD_TRANSITION_TOPIC) outcome = self._execute(state) self.assertEqual(outcome, 'done') OperatableStateMachine.autonomy_level = 3 state.result = None # manual transition blocked by lock - state._sub._callback(String(data='/subject'), 'flexbe/command/lock') + self.node.get_logger().info("test_cross_combinations - check manual transition blocked by lock ... ") + state._sub._callback(String(data='/subject'), Topics._CMD_LOCK_TOPIC) outcome = self._execute(state) self.assertIsNone(outcome) - state._sub._callback(OutcomeRequest(target='subject', outcome=1), 'flexbe/command/transition') + state._sub._callback(OutcomeRequest(target='subject', outcome=1), Topics._CMD_TRANSITION_TOPIC) outcome = self._execute(state) self.assertIsNone(outcome) - state._sub._callback(String(data='/subject'), 'flexbe/command/unlock') + state._sub._callback(String(data='/subject'), Topics._CMD_UNLOCK_TOPIC) outcome = self._execute(state) self.assertEqual(outcome, 'error') # preempt works on low autonomy + self.node.get_logger().info("test_cross_combinations - verify preempt works in low autonomy ... ") OperatableStateMachine.autonomy_level = 0 state.result = 'error' outcome = self._execute(state) self.assertIsNone(outcome) - state._sub._callback(Empty(), 'flexbe/command/preempt') + state._sub._callback(Empty(), Topics._CMD_PREEMPT_TOPIC) outcome = self._execute(state) self.assertEqual(outcome, PreemptableState._preempted_name) PreemptableState.preempt = False @@ -529,14 +501,15 @@ def test_cross_combinations(self): state.result = None # preempt also works when locked - state._sub._callback(String(data='/subject'), 'flexbe/command/lock') + self.node.get_logger().info("test_cross_combinations - verify preempt works when locked ... ") + state._sub._callback(String(data='/subject'), Topics._CMD_LOCK_TOPIC) outcome = self._execute(state) self.assertIsNone(outcome) - state._sub._callback(Empty(), 'flexbe/command/preempt') + state._sub._callback(Empty(), Topics._CMD_PREEMPT_TOPIC) outcome = self._execute(state) self.assertEqual(outcome, PreemptableState._preempted_name) PreemptableState.preempt = False - state._sub._callback(String(data='/subject'), 'flexbe/command/unlock') + state._sub._callback(String(data='/subject'), Topics._CMD_UNLOCK_TOPIC) outcome = self._execute(state) self.assertIsNone(outcome) self.node.get_logger().info("test_cross_combinations - OK! ") @@ -552,6 +525,7 @@ def test_concurrency_container(self): ('error', [('side', 'error')]), ('done', [('main', 'done'), ('side', 'done')]) ]) + cc._state_id = 2048 with cc: OperatableStateMachine.add('main', ConcurrencyTestState(), transitions={'done': 'done', 'error': 'error'}, @@ -572,8 +546,13 @@ def test_concurrency_container(self): try: self.assertAlmostEqual(cc.sleep_duration, .1, places=2) - except AssertionError: - self.node.get_logger().warn(f"Sleep duration {cc.sleep_duration} is not .1 - likely due to OS sleep") + except AssertionError: # pylint: disable=W0703 + self.node.get_logger().warn(f" Caught error with cc.sleep_duration = {cc.sleep_duration:.6f} =/= 0.1 " + f"- Sometimes fails if OS interruption! ... ") + + # Not controlled yet (e.g. as if no UI connected) + self.assertFalse(cc['main']._is_controlled) + self.assertFalse(cc['side']._is_controlled) # cc.sleep() cc['main'].set_rate(15) @@ -612,10 +591,11 @@ def test_concurrency_container(self): self.assertLessEqual(cc_count, 30) # 1 / (1/10 - 1/15) self.node.get_logger().info(" verify ROS properties ... ") + # verify ROS properties cc._enable_ros_control() self.assertTrue(cc['main']._is_controlled) - self.assertFalse(cc['side']._is_controlled) + self.assertTrue(cc['side']._is_controlled) # New change! # return outcome when all return done or any returns error cc['main'].set_rate(1.e16) # run every time @@ -634,7 +614,7 @@ def test_concurrency_container(self): outcome = cc.execute(None) self.assertEqual(outcome, 'error') - # self.node.get_logger().info(" verify outcome only if both done (as set by conditions above) ... ") + self.node.get_logger().info(" verify outcome only if both done (as set by conditions above) ... ") cc['main'].result = None cc['side'].result = None outcome = cc.execute(None) @@ -646,17 +626,18 @@ def test_concurrency_container(self): self.assertIsNone(outcome) cc['main'].result = 'done' - # self.node.get_logger().info(" verify both done returns outcome(as set by conditions above) ... ") + self.node.get_logger().info(" verify both done returns outcome(as set by conditions above) ... ") outcome = cc.execute(None) self.assertEqual(outcome, 'done') # always call on_exit exactly once when returning an outcome - # self.node.get_logger().info(" verify on_exit called once and only once ... ") + self.node.get_logger().info(" verify on_exit called once and only once ... ") cc['main'].result = None cc['side'].result = None outcome = cc.execute(None) self.assertIsNone(outcome) + self.node.get_logger().info(" check last events ... ") cc['main'].last_events = [] cc['side'].last_events = [] cc['main'].result = 'error' @@ -689,6 +670,7 @@ def execute(self, userdata): TestUserdataState.initialize_ros(self.node) OperatableStateMachine.initialize_ros(self.node) inner_sm = OperatableStateMachine(outcomes=['done'], input_keys=['sm_in'], output_keys=['sm_out']) + inner_sm._state_id = 4096 inner_sm.userdata.own = 'own_data' with inner_sm: OperatableStateMachine.add('own_state', TestUserdataState('inner_data'), transitions={'done': 'outside_state'}, @@ -699,6 +681,7 @@ def execute(self, userdata): remapping={}) sm = OperatableStateMachine(outcomes=['done']) + sm._state_id = 8192 sm.userdata.outside = 'outside_data' with sm: OperatableStateMachine.add('before_state', TestUserdataState(), transitions={'done': 'inner_sm'}, diff --git a/flexbe_mirror/CHANGELOG.rst b/flexbe_mirror/CHANGELOG.rst index 3b75bc4..ab776e3 100644 --- a/flexbe_mirror/CHANGELOG.rst +++ b/flexbe_mirror/CHANGELOG.rst @@ -1,6 +1,18 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Changelog for package flexbe_mirror ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Forthcoming +----------- +* flake8, pep257 and codestyle checks +* reinitialize existing state machine instead of rebuilding on sync (1000x faster) +* fix CONSTANT style across flexbe_behavior_engine +* update with standardized topic handling +* update ui version handling; comment out some spam +* publish last active state on changes +* update behavior for UI if internal concurrent returns +* refactor mirror handling +* adding state_id handling; pre-building ContainerStructure to set IDs + 2.3.3 (2023-08-09) ------------------ * streamline pub/sub for mirror diff --git a/flexbe_mirror/flexbe_mirror/behavior_mirror_sm.py b/flexbe_mirror/flexbe_mirror/behavior_mirror_sm.py index c4bc60c..5c4cee1 100644 --- a/flexbe_mirror/flexbe_mirror/behavior_mirror_sm.py +++ b/flexbe_mirror/flexbe_mirror/behavior_mirror_sm.py @@ -45,7 +45,7 @@ def main(args=None): executor = rclpy.executors.SingleThreadedExecutor() executor.add_node(mirror) - print("Begin behavior mirror processing ...", flush=True) + mirror.get_logger().info("Begin behavior mirror processing ...") # Wait for ctrl-c to stop the application try: diff --git a/flexbe_mirror/flexbe_mirror/flexbe_mirror.py b/flexbe_mirror/flexbe_mirror/flexbe_mirror.py index aa35d21..2c9790f 100644 --- a/flexbe_mirror/flexbe_mirror/flexbe_mirror.py +++ b/flexbe_mirror/flexbe_mirror/flexbe_mirror.py @@ -29,22 +29,33 @@ """Class to handle the FlexBE mirror of onboard statemachine.""" +try: + from prctl import set_name as set_thread_name +except Exception: + def set_thread_name(name): + print("Python thread names are not visible in ps/top unless you install prctl") + import threading +import time import traceback -import zlib - +from rclpy.clock import Clock from rclpy.node import Node -from std_msgs.msg import Empty, String, Int32, UInt8 +from rclpy.qos import QoSProfile, QoSDurabilityPolicy -from flexbe_core import Logger -from flexbe_core.core import PreemptableState, PreemptableStateMachine, LockableStateMachine +from std_msgs.msg import Empty, String, Int32, UInt32 + +from flexbe_core import Logger, MIN_UI_VERSION +from flexbe_core.core import LockableStateMachine, OperatableStateMachine +from flexbe_core.core import PreemptableState, PreemptableStateMachine, StateMap +from flexbe_core.core.topics import Topics from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached from flexbe_msgs.msg import ContainerStructure, BehaviorSync, BEStatus from .mirror_state import MirrorState from .mirror_state_machine import MirrorStateMachine +from .mirror_concurrency_container import MirrorConcurrencyContainer class FlexbeMirror(Node): @@ -65,6 +76,7 @@ def __init__(self): LockableStateMachine.initialize_ros(self) self._timing_event = threading.Event() # Used for wait timer + self._running = False self._stopping = False self._active_id = BehaviorSync.INVALID @@ -72,69 +84,95 @@ def __init__(self): self._current_struct = None self._struct_buffer = [] self._sync_lock = threading.Lock() - self._state_checksums = {} + self._state_map = None + self._system_clock = Clock() + self._active_thread_start = None - # set up pub/sub sm <--> GUI communication of mirror + # set up proxys for sm <--> GUI communication # publish topics - self._heartbeat_pub = self.create_publisher(Int32, 'flexbe/mirror/heartbeat', 2) - self._request_struct_pub = self.create_publisher(Int32, 'flexbe/request_mirror_structure', 2) + self._heartbeat_pub = self.create_publisher(Int32, Topics._MIRROR_HEARTBEAT_TOPIC, 2) + latching_qos = QoSProfile(depth=1, durability=QoSDurabilityPolicy.TRANSIENT_LOCAL) + self._version_sub = self.create_subscription(String, Topics._UI_VERSION_TOPIC, + self._version_callback, qos_profile=latching_qos) + self._request_struct_pub = self.create_publisher(Int32, Topics._REQUEST_STRUCTURE_TOPIC, 2) # listen for mirror control messages using standard subscriptions - self._status_sub = self.create_subscription(BEStatus, 'flexbe/status', self._status_callback, 10) - self._struct_sub = self.create_subscription(ContainerStructure, 'flexbe/mirror/structure', + self._status_sub = self.create_subscription(BEStatus, Topics._ONBOARD_STATUS_TOPIC, self._status_callback, 10) + self._struct_sub = self.create_subscription(ContainerStructure, Topics._MIRROR_STRUCTURE_TOPIC, self._mirror_structure_callback, 10) - self._sync_sub = self.create_subscription(BehaviorSync, 'flexbe/mirror/sync', self._sync_callback, 10) - self._preempt_sub = self.create_subscription(Empty, 'flexbe/mirror/preempt', self._preempt_callback, 10) - self._onboard_heartbeat_sub = self.create_subscription(BehaviorSync, 'flexbe/heartbeat', self._heartbeat_callback, 10) + self._sync_sub = self.create_subscription(BehaviorSync, Topics._MIRROR_SYNC_TOPIC, self._sync_callback, 10) + self._preempt_sub = self.create_subscription(Empty, Topics._MIRROR_PREEMPT_TOPIC, self._preempt_callback, 10) + self._onboard_heartbeat_sub = self.create_subscription(BehaviorSync, Topics._ONBOARD_HEARTBEAT_TOPIC, + self._onboard_heartbeat_callback, 10) self._sync_heartbeat_mismatch_counter = 0 - self._outcome_topic = 'flexbe/mirror/outcome' - self._update_topic = 'flexbe/behavior_update' # Use proxy publisher/subscriber for access in states # but just initialize here once for all - self._beh_update_pub = ProxyPublisher({self._update_topic: String}) + self._beh_update_pub = ProxyPublisher({Topics._BEHAVIOR_UPDATE_TOPIC: String}) - self._sub = ProxySubscriberCached() - self._sub.subscribe(self._outcome_topic, UInt8, inst_id=id(self)) - self._sub.enable_buffer(self._outcome_topic) + self._outcome_sub = ProxySubscriberCached() + self._outcome_sub.subscribe(Topics._OUTCOME_TOPIC, UInt32, inst_id=id(self)) + self._outcome_sub.enable_buffer(Topics._OUTCOME_TOPIC) # no clean way to wait for publisher to be ready... Logger.loginfo('--> Mirror - setting up publishers and subscribers ...') - self._timing_event.wait(1.0) # Give publishers time to initialize + threading.Event().wait(1.0) # Give publishers time to initialize # Require periodic events in case behavior is not connected to allow orderly shutdown self._heartbeat_timer = self.create_timer(2.0, self.heartbeat_timer_callback) Logger.loginfo('--> Mirror - ready!') + def _version_callback(self, msg): + vui = FlexbeMirror._parse_version(msg.data) + vex = FlexbeMirror._parse_version(MIN_UI_VERSION) + if vui < vex: + Logger.logwarn('FlexBE App needs to be updated!\n' + f'Mirror requires at least version {MIN_UI_VERSION}, ' + f' but you have {msg.data}\n' + 'Please update the flexbe_app software.') + + @staticmethod + def _parse_version(v): + result = 0 + offset = 1 + for n in reversed(v.split('.')): + result += int(n) * offset + offset *= 100 + return result + + def get_elapsed_str(self, start_time): + """Return a truncated time string for debugging.""" + elapsed = self._system_clock.now() - start_time + sec, nsec = start_time.seconds_nanoseconds() + return f"started at {sec & 0xFFFF}.{nsec//1000:06d} s (elapsed={elapsed.nanoseconds/1e9} s)" + def heartbeat_timer_callback(self): """ Allow monitoring of Mirror liveness. + Use negative time in seconds if mirror is not active, and loop count when running + Guarantee some event triggers wake up so that we can catch Ctrl-C in case where no active messages are available. """ - self._heartbeat_pub.publish(Int32(data=self.get_clock().now().seconds_nanoseconds()[0])) + heartbeat = Int32(data=-(self.get_clock().now().seconds_nanoseconds()[0] & 0x0000FFFF)) + if self._sm is not None and self._running: + heartbeat.data = self._sm._total_loop_count + + self._heartbeat_pub.publish(heartbeat) def shutdown_mirror(self): """Shut mirror down.""" try: - print(" Shutting down behavior mirror ...", flush=True) + print(f" Shutting down behavior mirror '{self._active_id}' ...", flush=True) with self._sync_lock: - self._stopping = True if self._sm is not None and self._running: - print(' Mirror is shutting down with behavior still active!', flush=True) - PreemptableState.preempt = True - - stopping_cnt = 0 - while self._running and stopping_cnt < 200: - if stopping_cnt % 49 == 0: - print(' Waiting for mirror to stop ...') - stopping_cnt += 1 - self._timing_event.wait(0.005) # Use system time for polling check, never sim_time + print(f" Mirror '{self._active_id}' is shutting down with behavior still active!", flush=True) + self._wait_stop_running(self._system_clock.now()) if self._running: - print(' Failed to stop mirror while it is already running!', flush=True) + print(f" Failed to stop mirror '{self._active_id}' while it is already running!", flush=True) return False else: self._stopping = False @@ -144,7 +182,14 @@ def shutdown_mirror(self): print(' Stop heartbeat timer ...', flush=True) self.destroy_timer(self._heartbeat_timer) - self._timing_event.wait(0.05) + threading.Event().wait(0.05) + + print(' Remove subscribers ...', flush=True) + self._outcome_sub.unsubscribe_topic(Topics._OUTCOME_TOPIC, inst_id=id(self)) + + print(' Remove publishers ...', flush=True) + self._beh_update_pub.remove_publisher(Topics._BEHAVIOR_UPDATE_TOPIC) + print(' Mirror is shutdown!', flush=True) return True return False # No active behavior @@ -155,22 +200,25 @@ def shutdown_mirror(self): print(traceback.format_exc().replace("%", "%%"), flush=True) def _mirror_structure_callback(self, msg): + start_time = self._system_clock.now() Logger.loginfo(f'--> Mirror - received updated structure with checksum id = {msg.behavior_id}') - thread = threading.Thread(target=self._activate_mirror, args=[msg]) + Logger.localinfo(f' at {start_time.nanoseconds} ns') + thread = threading.Thread(target=self._activate_mirror, args=[msg, start_time], + name=f"activate_mirror_{msg.behavior_id}_{start_time.nanoseconds}") thread.daemon = True thread.start() - def _activate_mirror(self, struct_msg): + def _activate_mirror(self, struct_msg, start_time): + set_thread_name("act" + f"{start_time.nanoseconds}"[-12:]) # only 15 chars allowed - self.get_logger().info(f' waiting for sync to activate checksum id = {struct_msg.behavior_id}') with self._sync_lock: - # Logger.loginfo(f'Got sync - try to activate checksum id = {struct_msg.behavior_id}') - self._wait_stopping() + Logger.loginfo(f'Activate mirror for behavior id = {struct_msg.behavior_id} ...') + self._wait_stopping(start_time) if self._running: Logger.localwarn(f'Received a new mirror structure for checksum id={struct_msg.behavior_id} ' f'while mirror is already running with active id={self._active_id}; ' - 'adding to buffer for later') + 'adding to buffer for later!') self._struct_buffer.append(struct_msg) return elif self._active_id not in (BehaviorSync.INVALID, struct_msg.behavior_id): @@ -179,14 +227,15 @@ def _activate_mirror(self, struct_msg): return # At this point, either active_id is invalid or same behavior checksum id - Logger.loginfo(f'Process the updated mirror structure for checksum id = {struct_msg.behavior_id}') + Logger.localinfo(f'Process the updated mirror structure for checksum id = {struct_msg.behavior_id} ...') self._active_id = struct_msg.behavior_id # in case invalid self._struct_buffer = [] self._mirror_state_machine(struct_msg) if self._sm: - Logger.localinfo(f'Mirror built for checksum id = {self._active_id}') + Logger.localinfo(f'Mirror built for behavior id = {self._active_id}.') + self._sm.set_name("root") else: - Logger.localwarn(f'Error processing mirror structure for behavior checksum id = {struct_msg.behavior_id}') + Logger.localwarn(f'Error processing mirror structure for behavior checksum id = {self._active_id} ...') Logger.logwarn('Requesting a new mirror structure from onboard ...') self._request_struct_pub.publish(Int32(data=struct_msg.behavior_id)) self._active_id = struct_msg.behavior_id @@ -195,48 +244,63 @@ def _activate_mirror(self, struct_msg): self._running = True # Ready to start execution, so flag it as so before releasing sync # Release sync lock and execute the mirror - Logger.localinfo(f'--> Mirror - begin execution of ' - f'active mirror for checksum id = {struct_msg.behavior_id}') + Logger.localinfo(f"Mirror - begin execution of active mirror behavior id = '{self._sm.id}' " + f"{self.get_elapsed_str(start_time)}") try: - self._execute_mirror() + self._execute_mirror(start_time) except Exception as exc: # pylint: disable=W0703 - Logger.logerr(f'Exception in mirror_callback: {type(exc)} ...\n {exc}') + Logger.logerr(f'Exception in activate mirror: {type(exc)} started at {start_time.nanoseconds} ns ...\n {exc}') Logger.localerr(f"{traceback.format_exc().replace('%', '%%')}") - self._running = False + self._running = False # normally set false in execute_mirror (but not if exeception) + + Logger.localwarn(f"Done executing mirror {self._active_id} from activation " + f"{self.get_elapsed_str(start_time)}") def _status_callback(self, msg): + start_time = self._system_clock.now() if msg.code == BEStatus.STARTED: - self.get_logger().info(f'--> Mirror - received BEstate={msg.code} - ' - f'start mirror with checksum id = {msg.behavior_id}') - thread = threading.Thread(target=self._start_mirror, args=[msg]) + Logger.localinfo(f'Mirror - received BEstate={msg.code} - start mirror') + # f'start mirror with behavior id = {msg.behavior_id} started at {start_time.nanoseconds} ns') + thread = threading.Thread(target=self._start_mirror, args=[msg, start_time], + name=f"start_mirror_{msg.behavior_id}_{start_time.nanoseconds}") thread.daemon = True thread.start() - elif self._sm: - self.get_logger().info(f'--> Mirror - received BEstate={msg.code} with active SM - stop current mirror') - thread = threading.Thread(target=self._stop_mirror, args=[msg]) + elif self._sm and not self._stopping and msg.code == BEStatus.FINISHED: + Logger.localinfo(f'Mirror - received BEstate={msg.code} - stop current mirror') + thread = threading.Thread(target=self._stop_mirror, args=[msg, start_time], + name=f"stop_mirror_{msg.behavior_id}_{start_time.nanoseconds}") thread.daemon = True thread.start() - def _start_mirror(self, msg): - self.get_logger().info(f'--> Mirror - request to start mirror with checksum id = {msg.behavior_id}') + def _start_mirror(self, msg, start_time): + set_thread_name("str" + f"{start_time.nanoseconds}"[-12:]) # only 15 chars allowed with self._sync_lock: - # self.get_logger().info(f'--> Mirror - starting mirror for {msg.behavior_id} with sync lock ...') - self._wait_stopping() + self._wait_stopping(start_time) if self._running: - Logger.localwarn(f"Tried to start mirror for id={msg.behavior_id} while " - f"mirror for id={self._active_id} is already running, will ignore.") + if self._active_id != msg.behavior_id: + Logger.loginfo(f"Tried to start mirror for id={msg.behavior_id} while" + f" mirror for id={self._active_id} is already running - will ignore start request!") + Logger.localwarn(f" (active thread started {self._active_thread_start}) " + f" {self.get_elapsed_str(start_time)}") + else: + Logger.localinfo(f" Received start request for already active {self._active_id} " + f"(active thread started {self._active_thread_start}) " + f" {self.get_elapsed_str(start_time)}") return + Logger.localinfo(f" Start request mirror for {msg.behavior_id} in thread {self.get_elapsed_str(start_time)}") + if len(msg.args) > 0: self._starting_path = "/" + msg.args[0][1:].replace("/", "_mirror/") + "_mirror" self._active_id = msg.behavior_id - + assert self._sm is None, ("No SM should be active with start command here " + f"(id = {self._active_id}, {self._sm.id}) {self.get_elapsed_str(start_time)}") if len(self._struct_buffer) > 0: - Logger.localinfo(f"Building mirror structure for checksum id={msg.behavior_id} " - f"current len(struct_buffer)={len(self._struct_buffer)} ...") + # Logger.localinfo(f"Building mirror structure for checksum id={msg.behavior_id} " + # f"current len(struct_buffer)={len(self._struct_buffer)} ...") while self._sm is None and len(self._struct_buffer) > 0: struct = self._struct_buffer[0] self._struct_buffer = self._struct_buffer[1:] @@ -244,195 +308,360 @@ def _start_mirror(self, msg): self._mirror_state_machine(struct) Logger.localinfo(f"Mirror built for checksum '{self._active_id}'") else: - Logger.logwarn('Discarded mismatching buffered structure for checksum %d' - % (struct.behavior_id)) - # else: - # Logger.localinfo(f"No existing structure buffer for checksum id={msg.behavior_id} " - # f"request updated structure from onboard!") + Logger.logwarn(f"Discarded mismatching buffered structure for checksum '{struct.behavior_id}'") if self._sm is None: - Logger.localwarn(f'Missing correct mirror structure for starting behavior checksum id ={msg.behavior_id}') + Logger.localwarn(f"Missing correct mirror structure for starting behavior checksum id ='{msg.behavior_id}'") + Logger.localwarn(f" Canceling start request for behavior checksum id ={msg.behavior_id} " + f"{self.get_elapsed_str(start_time)}") Logger.logwarn('Requesting mirror structure from onboard ...') self._request_struct_pub.publish(Int32(data=msg.behavior_id)) self._active_id = msg.behavior_id return + if self._sm._current_state is None: + try: + # Set default initial state for state machine + self._sm._current_state = self._state_map[struct.containers[0].state_id] + Logger.localwarn(f'Set initial state as {self._sm._current_state.path} for checksum id ={msg.behavior_id}') + except Exception: # pylint: disable=W0703 + Logger.localwarn(f'Failed to set the initial state for checksum id ={msg.behavior_id}') + self._running = True # Ready to execute, so flag as running before releasing sync lock try: - Logger.localinfo(f"Begin mirror execution for checksum '{self._active_id}' ...") - self._execute_mirror() + Logger.localinfo(f"Begin mirror execution for checksum '{self._active_id}' " + f"{self.get_elapsed_str(start_time)}") + self._execute_mirror(start_time) except Exception as exc: # pylint: disable=W0703 Logger.logerr(f'Exception in start_mirror: {type(exc)} ...\n {exc}') Logger.localerr(f"{traceback.format_exc().replace('%', '%%')}") + self._running = False # normally set false in execute_mirror (but not if exception) + + Logger.localwarn(f"Mirror execution for '{self._active_id}' is finished " + f"{self.get_elapsed_str(start_time)}") - def _stop_mirror(self, msg): - self.get_logger().info('--> Mirror - request to stop mirror for ' - f'checksum id={msg.behavior_id if isinstance(msg, BEStatus) else None} ' - f'- waiting for sync lock ...') + def _stop_mirror(self, msg, start_time): + # self.get_logger().info('--> Mirror - request to stop mirror for ' + # f'checksum id={msg.behavior_id if isinstance(msg, BEStatus) else None} ' + # f'- waiting for sync lock ...') + + set_thread_name("stp" + f"{start_time.nanoseconds}"[-12:]) # only 15 chars allowed with self._sync_lock: - # self.get_logger().info(f'--> Mirror - stopping mirror for checksum id={msg.behavior_id} with sync lock ...') + Logger.localinfo(f"Mirror '{self._active_id}' - stopping mirror " + f"for checksum id={msg.behavior_id if msg is not None else 'None'} " + f" {self.get_elapsed_str(start_time)}") self._stopping = True if self._sm is not None and self._running: - if msg is not None and msg.code == BEStatus.FINISHED: + if msg is None: + Logger.logwarn('Onboard behavior stop request (from sync)!') + elif msg.code == BEStatus.FINISHED: Logger.loginfo('Onboard behavior finished successfully.') - self._beh_update_pub.publish(self._update_topic, String()) - elif msg is not None and msg.code == BEStatus.SWITCHING: + self._beh_update_pub.publish(Topics._BEHAVIOR_UPDATE_TOPIC, String()) + elif msg.code == BEStatus.SWITCHING: self._starting_path = None Logger.loginfo('Onboard performing behavior switch.') - elif msg is not None and msg.code == BEStatus.READY: + elif msg.code == BEStatus.READY: Logger.loginfo('Onboard engine just started, stopping currently running mirror.') - self._beh_update_pub.publish(self._update_topic, String()) - elif msg is not None: + self._beh_update_pub.publish(Topics._BEHAVIOR_UPDATE_TOPIC, String()) + else: Logger.logwarn('Onboard behavior failed!') - self._beh_update_pub.publish(self._update_topic, String()) + self._beh_update_pub.publish(Topics._BEHAVIOR_UPDATE_TOPIC, String()) - self._wait_stop_running() + self._wait_stop_running(start_time) self._sm.destroy() else: - Logger.localinfo('No onboard behavior is active.') + Logger.localinfo('Stop request - but no onboard behavior is currently active.') self._active_id = BehaviorSync.INVALID self._sm = None self._current_struct = None - self._sub.remove_last_msg(self._outcome_topic, clear_buffer=True) + self._outcome_sub.remove_last_msg(Topics._OUTCOME_TOPIC, clear_buffer=True) if msg is not None and msg.code != BEStatus.SWITCHING: - Logger.localinfo('\033[92m--- Behavior Mirror ready! ---\033[0m') + Logger.loginfo('\033[92m--- Behavior Mirror ready! ---\033[0m') - # self.get_logger().info('--> Mirror - stopped mirror for ' - # f'checksum id={msg.behavior_id} - ready to release sync lock ...') + # Logger.localinfo("Mirror - stopped mirror for " + # f"behavior id={msg.behavior_id if msg is not None else BehaviorSync.INVALID} " + # f"- ready to release sync lock in stopping thread {self.get_elapsed_str(start_time)} s ...") self._stopping = False def _sync_callback(self, msg): + start_time = self._system_clock.now() if msg.behavior_id == self._active_id: - self.get_logger().info(f'--> Mirror - sync request for checksum id={msg.behavior_id} - restart mirror') - thread = threading.Thread(target=self._restart_mirror, args=[msg]) + Logger.logwarn(f"--> Mirror - sync request for behavior id={msg.behavior_id} - restart mirror") + thread = threading.Thread(target=self._restart_mirror, args=[msg, start_time]) thread.daemon = True thread.start() else: - Logger.error('Cannot synchronize! Different behavior is running onboard, ' + Logger.error('--> Mirror - Cannot synchronize!\n Different behavior is running onboard, ' 'please stop execution while we reset the mirror!') - self.get_logger().info(f"Cannot synchronize! onboard checksum id={msg.behavior_id} active={self._active_id}") - thread = threading.Thread(target=self._stop_mirror, args=[None]) + Logger.localwarn(f"Cannot synchronize! onboard checksum id={msg.behavior_id} active={self._active_id} " + f"{self.get_elapsed_str(start_time)}") + thread = threading.Thread(target=self._stop_mirror, args=[None, start_time]) thread.daemon = True thread.start() - def _heartbeat_callback(self, msg): - """Use heartbeat to monitor for persistent sync issues.""" - if self._active_id == BehaviorSync.INVALID: - return # do not check sync status if no behavior is active - - if msg.behavior_id == self._active_id: - if self._sm is not None: - mirror_status = self._sm.get_latest_status() - mirror_status.behavior_id = self._active_id - - if msg.current_state_checksum != mirror_status.current_state_checksum: - if self._sync_heartbeat_mismatch_counter > 0: - # Two consecutive out of sync heartbeats - if msg.current_state_checksum in self._state_checksums: - onboard_state_path = self._state_checksums[msg.current_state_checksum] - else: + def _onboard_heartbeat_callback(self, msg): + try: + """Use heartbeat to monitor for persistent sync issues.""" + if self._active_id == BehaviorSync.INVALID: + return # do not check sync status if no behavior is active + + if msg.behavior_id == self._active_id: + if self._sm is not None: + mirror_status = self._sm.get_latest_status() + if mirror_status.behavior_id != self._active_id: + Logger.localwarn(f"mirror_status.behavior_id ({mirror_status.behavior_id}) != " + f" self._active_id ({self._active_id})") + + mirror_status.behavior_id = self._active_id + + if msg.current_state_checksums != mirror_status.current_state_checksums: + if self._sync_heartbeat_mismatch_counter > 0: + # Two consecutive out of sync heartbeats onboard_state_path = "Unknown" + if len(msg.current_state_checksums) > 0: + # Use deepest state as the best path estimate + ob_state_id, ob_out = StateMap.unhash(msg.current_state_checksums[-1]) + ob_state = self._state_map[ob_state_id] + onboard_state_path = ob_state.path + + if self._sync_heartbeat_mismatch_counter % 5 == 1: + Logger.error(f'OCS is possibly out of sync - onboard state is {onboard_state_path}\n' + f' Check UI and consider manual re-sync!\n' + ' (mismatch may be temporarily understandable for rapidly changing outcomes)' + f' {self._sync_heartbeat_mismatch_counter}') + Logger.localinfo(f'IDs {msg.behavior_id} {self._active_id} : \n' + f' Onboard IDs: {msg.current_state_checksums}\n ' + f'Mirror IDs {mirror_status.current_state_checksums}') + + for state_hash in msg.current_state_checksums: + try: + ob_state_id, ob_out = StateMap.unhash(state_hash) + ob_state = self._state_map[ob_state_id] + Logger.localinfo(f" onboard {ob_state_id} : '{ob_state.name}' " + f"out={ob_out} - {ob_state.path}") + except Exception as exc: # pylint: disable=W0703 + Logger.localinfo(f" error for onboard state hash {state_hash} - {type(exc)} - {exc}") + for state_hash in mirror_status.current_state_checksums: + try: + mr_state_id, mr_out = StateMap.unhash(state_hash) + mr_state = self._state_map[mr_state_id] + Logger.localinfo(f" mirror {mr_state_id} : '{mr_state.name.replace('_mirror', '')}' " + f" out={mr_out} - {mr_state.path.replace('_mirror', '')}") + except Exception as exc: # pylint: disable=W0703 + Logger.localinfo(f" error for mirror state hash {state_hash} - {type(exc)} - {exc}") + Logger.localinfo(30 * "=") - if self._sync_heartbeat_mismatch_counter % 5 == 1: - Logger.error(f'OCS is possibly out of sync - onboard state is {onboard_state_path}\n' - f' Check UI and consider manual re-sync!\n' - ' (mismatch may be temporarily understandable for rapidly changing outcomes)' - f' {self._sync_heartbeat_mismatch_counter}') - Logger.localinfo(f'{msg.behavior_id} {self._active_id} : {msg.current_state_checksum}' - f' {mirror_status.current_state_checksum}') + else: + # Start counting mismatches + self._sync_heartbeat_mismatch_counter = 1 else: - # Start counting mismatches - self._sync_heartbeat_mismatch_counter = 1 + # Reset mismatch counter + self._sync_heartbeat_mismatch_counter = 0 + elif self._active_id != 0: + Logger.warning(f'Received matching behavior id {msg.behavior_id} with no mirror state machine active!') else: - # Reset mismatch counter - self._sync_heartbeat_mismatch_counter = 0 - elif self._active_id != 0: - Logger.warning(f'Received matching behavior id {msg.behavior_id} with no mirror state machine active!') + Logger.warning(f'Received invalid behavior id {msg.behavior_id} with active id = {self._active_id} active!') + + elif msg.INVALID not in (msg.behavior_id, self._active_id): + if self._sync_heartbeat_mismatch_counter % 10 == 1: + Logger.error('Out of sync! Different behavior is running onboard, please stop execution! ' + f'{self._sync_heartbeat_mismatch_counter}') + self._sync_heartbeat_mismatch_counter += 1 + elif not self._stopping: + self._sync_heartbeat_mismatch_counter += 1 + if self._sync_heartbeat_mismatch_counter % 10 == 1: + Logger.warning(f"Mismatched behavior ids ('{msg.behavior_id}', '{self._active_id}')- " + f"please restart behavior! {self._sync_heartbeat_mismatch_counter}") else: - Logger.localinfo(f'Received invalid behavior id {msg.behavior_id} with active id = {self._active_id} active!') + Logger.localinfo(f"Heartbeat: mirror is stopping - waiting for {self._active_id} to stop ...") - elif msg.INVALID not in (msg.behavior_id, self._active_id): - if self._sync_heartbeat_mismatch_counter % 10 == 1: - Logger.error('Out of sync! Different behavior is running onboard, please stop execution! ' - f'{self._sync_heartbeat_mismatch_counter}') - self._sync_heartbeat_mismatch_counter += 1 - else: - self._sync_heartbeat_mismatch_counter += 1 - if self._sync_heartbeat_mismatch_counter % 10 == 1: - Logger.warning('Mismatched behavior ids - please restart behavior! ' - f'{self._sync_heartbeat_mismatch_counter}') - - def _wait_stop_running(self): - PreemptableState.preempt = True - running_cnt = 1 - while self._running: - if running_cnt % 50 == 0: - Logger.logwarn('Waiting for another mirror to stop ...') - running_cnt += 1 - self._timing_event.wait(0.02) # Use system time for polling check, never sim_time - Logger.loginfo('Mirror stopped running !') - - def _wait_stopping(self): - stopping_cnt = 1 - while self._stopping: - if stopping_cnt % 50 == 0: - Logger.logwarn(f'Waiting for another mirror with {self._active_id} to stop ...') - stopping_cnt += 1 - self._timing_event.wait(0.02) # use wall clock not sim time - - def _restart_mirror(self, msg): - Logger.localinfo('Wait for sync lock to restart mirror for synchronization of behavior {msg.behavior_id}...') + except Exception as exc: + Logger.localinfo(f"Exception in heartbeat callback {type(exc)} - {exc}") + Logger.localinfo(f"{traceback.format_exc().replace('%', '%%')}") + + def _wait_stop_running(self, start_time): + if self._running: + PreemptableState.preempt = True + running_cnt = 0 + timing_event = threading.Event() + while self._running: + if running_cnt % 2000 == 0: + try: + Logger.localinfo(f"Waiting for another mirror (start thread {self._active_thread_start}) to stop " + f"with id = '{self._active_id}' " + f"(this {self.get_elapsed_str(start_time)}) (running cnt={running_cnt}) " + f"preempt={PreemptableState.preempt}") + except Exception: # pylint: disable=W703 + # Likely during shutdown + print(f"Waiting for another mirror (start thread {self._active_thread_start}) to stop " + f"with id = '{self._active_id}' " + f"(this {self.get_elapsed_str(start_time)}) (running cnt={running_cnt})", flush=True) + running_cnt += 1 + if running_cnt > 100000: + Logger.logerr(f"Timeout waiting for another mirror ({self._active_thread_start}) to stop running " + f" with {self._active_id} (this {self.get_elapsed_str(start_time)})") + return + + timing_event.wait(0.00002) # Use system time for polling check, never sim_time + Logger.localinfo(f"Mirror for active id {self._active_id} stopped running (start thread {self._active_thread_start}) " + f" ({running_cnt}) (this {self.get_elapsed_str(start_time)})") + Logger.loginfo("Mirror stopped running!") + + def _wait_stopping(self, start_time): + if self._stopping: + stopping_cnt = 0 + timing_event = threading.Event() + while self._stopping: + if stopping_cnt % 5000 == 0: + try: + Logger.localinfo(f"Waiting for another mirror (start thread {self._active_thread_start}) " + f" to finish stopping with id='{self._active_id}' " + f"(this {self.get_elapsed_str(start_time)}) (stopping {stopping_cnt})... ") + except Exception: # pylint: disable=W0703 + print(f"Waiting for another mirror (start thread {self._active_thread_start}) " + f" to finish stopping with id='{self._active_id}' " + f"(this {self.get_elapsed_str(start_time)}) (stopping {stopping_cnt})... ", flush=True) + stopping_cnt += 1 + if stopping_cnt > 100000: + Logger.logerr(f"Timeout waiting for another mirror to finish stopping with '{self._active_id}'" + f" (this {self.get_elapsed_str(start_time)})") + return + + timing_event.wait(0.00002) # use wall clock not sim time + Logger.localinfo(f"Mirror completed stopping for active id {self._active_id} " + f" (this {self.get_elapsed_str(start_time)}) ({stopping_cnt})!") + Logger.loginfo("Mirror stopped.") + + def _reinitialize_state_machine(self, state_machine): + """Reinitialize existing SM when restarting mirror during sync (faster than rebuilding current structure).""" + state_machine._entering = False + state_machine._current_state = None + for state in state_machine._states: + state._entering = True + if isinstance(state, MirrorStateMachine): + self._reinitialize_state_machine(state) + + def _restart_mirror(self, msg, restart_time): + set_thread_name("rsm" + f"{restart_time.nanoseconds}"[-12:]) # only 15 chars allowed with self._sync_lock: - Logger.loginfo('Restarting mirror for synchronization of behavior checksum id ={msg.behavior_id}...') - self._sub.remove_last_msg(self._outcome_topic, clear_buffer=True) if self._sm is not None and self._running: - self._wait_stop_running() - self._sm = None + self._wait_stop_running(restart_time) + + Logger.localinfo(f"Restarting mirror for synchronization of behavior checksum id ={msg.behavior_id} " + f"in thread {self.get_elapsed_str(restart_time)} ...") + + # Clear existing outcome messages + self._outcome_sub.remove_last_msg(Topics._OUTCOME_TOPIC, clear_buffer=True) + MirrorState._last_state_id = None + MirrorState._last_state_outcome = None - if msg.current_state_checksum in self._state_checksums: - current_state_path = self._state_checksums[msg.current_state_checksum] - self._starting_path = "/" + current_state_path[1:].replace("/", "_mirror/") + "_mirror" - Logger.loginfo(f"Current state: {current_state_path}") try: - self._mirror_state_machine(self._current_struct) - if self._sm: - Logger.loginfo(f'Mirror built for behavior checksum id = {msg.behavior_id}.') - else: - Logger.localwarn(f'Missing correct mirror structure for restarting behavior checksum id ={msg.behavior_id}') - Logger.logwarn('Requesting mirror structure from onboard ...') - self._request_struct_pub.publish(Int32(data=msg.behavior_id)) + self._starting_path = None + if self._sm is not None and self._sm.id == msg.behavior_id: self._active_id = msg.behavior_id - return - - except (AttributeError, RuntimeError): + start = time.time() + PreemptableState.preempt = False # Reset preempt flag before restarting + self._reinitialize_state_machine(self._sm) + end = time.time() + Logger.localinfo(f"Done reinitializing the existing state machine with matching " + f"behavior id='{msg.behavior_id}' in thread " + f"{self.get_elapsed_str(restart_time)} (reinitialized in {end-start} seconds) ") + else: + # Reconstruct the state machine from structure + self._mirror_state_machine(self._current_struct) + if self._sm is None: + Logger.localwarn(f"Missing correct mirror structure for restarting " + f"behavior checksum id ={msg.behavior_id}\n" + f" Canceling restart request for behavior checksum id ={msg.behavior_id} " + f"in thread {self.get_elapsed_str(restart_time)} ") + Logger.logwarn('Requesting mirror structure from onboard ...') + self._pub.publish(Topics._REQUEST_STRUCTURE_TOPIC, Int32(data=msg.behavior_id)) + self._active_id = msg.behavior_id + return + + Logger.localinfo(f' Reset active states using current state checksums: {msg.current_state_checksums}') + for active_state in msg.current_state_checksums: + # For now, just set the active state to the lowest order state with no outcome + state_id, outcome = StateMap.unhash(active_state) + state = self._state_map[state_id] + if state is not None: + if self._starting_path is None: + self._starting_path = state.path + + if outcome != 0 and MirrorState._last_state_id is None: + # Some outcome to process + MirrorState._last_state_id = state_id + MirrorState._last_state_outcome = outcome + + state._entering = False # state considered already active with sync + parent = state.parent + while parent is not None: + parent._entering = False # state considered already active + if isinstance(parent, MirrorConcurrencyContainer): + if parent._current_state is None: + parent._current_state = [] + if state not in parent._current_state: + parent._current_state.append(state) + elif isinstance(parent, MirrorStateMachine): + parent._current_state = state + else: + Logger.logerr(f" Sync: Unexpected parent reference {parent.name} ({type(parent)}) " + f"from {state.name} in {self._sm.name}") + state = parent + parent = parent.parent + else: + Logger.logerr(f" Unknown state from {state_id} in {self._sm.name} from restart " + f"in thread {self.get_elapsed_str(restart_time)}!") + curst = self._sm._current_state + self._sm._last_deep_states_list = self._sm.get_deep_states() + Logger.localwarn(f" Restart SM with current top-level state = {curst.name if curst is not None else 'None'} " + f"starting path={self._starting_path}") + Logger.localinfo(f" active states = {self._sm.get_latest_status()}") + self._running = True # set running while we have sync lock + except (AttributeError, RuntimeError) as exc: Logger.loginfo(f'Stopping synchronization because behavior{msg.behavior_id} has stopped.') + Logger.localinfo(f"{type(exc)} - {exc}") + return try: - Logger.localinfo('Execute mirror after sync lock of restart mirror' - f' for synchronization of behavior {msg.behavior_id}...') - self._execute_mirror() + Logger.localinfo("Execute mirror after sync lock of restart mirror" + f" for synchronization of behavior '{msg.behavior_id}' from restart " + f"in thread {self.get_elapsed_str(restart_time)} ...") + self._execute_mirror(restart_time) except Exception as exc: # pylint: disable=W0703 - Logger.logerr(f'Exception in restart_mirror: {type(exc)} ...\n {exc}') + Logger.logerr(f'Exception in restart_mirror in thread {self.get_elapsed_str(restart_time)}:\n {type(exc)} - {exc}') Logger.localerr(f"{traceback.format_exc().replace('%', '%%')}") + self._running = False - def _execute_mirror(self): - self._running = True + Logger.localwarn(f"Finished execution of restart request for behavior checksum id ={msg.behavior_id} " + f"in thread {self.get_elapsed_str(restart_time)}.") + + def _execute_mirror(self, start_time): + if self._active_thread_start is not None: + Logger.localwarn(f"Trying to start execution for {start_time.nanoseconds} but " + f"older {self._active_thread_start} is still active!") + Logger.logerr("Mirror issue - shutdown all threads - need to reattach!") + PreemptableState.preempt = True + return - Logger.loginfo("Executing mirror...") + self._running = True + self._active_thread_start = start_time.nanoseconds # Track starting time + Logger.loginfo("Executing mirror ...") + Logger.localinfo(f" in thread {self.get_elapsed_str(start_time)} s ") if self._starting_path is not None: LockableStateMachine.path_for_switch = self._starting_path Logger.loginfo("Starting mirror in state " + self._starting_path) self._starting_path = None - result = 'preempted' + result = PreemptableStateMachine._preempted_name try: - result = self._sm.spin() - Logger.loginfo(f"Mirror for active id = {self._active_id} finished with result '{result}'") + result = self._sm.spin(start_time) + Logger.localinfo(f"Mirror finished spin with result '{result}' after {self.get_elapsed_str(start_time)} s") self._sm.destroy() except Exception as exc: try: @@ -443,26 +672,50 @@ def _execute_mirror(self): # so just try a simple print print('\n(_execute_mirror Traceback): Caught exception on preempt:\n%s' % str(exc), flush=True) print(traceback.format_exc().replace("%", "%%"), flush=True) - result = 'preempted' + result = PreemptableStateMachine._preempted_name + self._active_thread_start = None self._running = False def _mirror_state_machine(self, msg): + start = time.time() # Track construction time try: self._current_struct = msg - self._state_checksums = {} + self._state_map = StateMap() root = None for con_msg in msg.containers: if con_msg.path.find('/') == -1: root = con_msg.path break + + # self.get_logger().info(f"Constructing top-level mirror for {root} ...") self._add_node(msg, root) - # calculate checksums of all states - for con_msg in msg.containers: - if con_msg.path.find('/') != -1: - self._state_checksums[zlib.adler32(con_msg.path.encode()) & 0x7fffffff] = con_msg.path + + if self._sm: + self._sm.id = msg.behavior_id + # verify checksums of all states + # self.get_logger().info(f"Validate checksums for all states in '{root}' ...") + for con_msg in msg.containers: + if con_msg.path.find('/') != -1: + hash_val = self._state_map.get_path_hash(con_msg.path) + if hash_val: + state = self._state_map[hash_val] + for path_seg in con_msg.path.split("/"): + assert path_seg in state.path, (f"Mismatched state id={state.state_id} for {state.path} vs. " + f"({hash_val}) {con_msg.path}") + end = time.time() + Logger.localinfo(f"Constructed mirror for behavior id ='{self._sm.id}' in {end - start} seconds !") + return # success here + else: + Logger.logerr(f"Failed to construct mirror SM for {root}!") + except Exception as exc: self.get_logger().warn(f"_mirror_statemachine Exception: {type(exc)} - {exc}") + self.get_logger().warn(f"{traceback.format_exc().replace('%', '%%')}") + self._sm = None + + end = time.time() + Logger.localinfo(f"Failed to construct mirror of state machine '{msg.behavior_id}' in {end - start} seconds !") def _add_node(self, msg, path): container = None @@ -479,11 +732,20 @@ def _add_node(self, msg, path): path_frags = path.split('/') container_name = path_frags[len(path_frags) - 1] + if len(container.children) > 0: sm_outcomes = [] for outcome in container.outcomes: sm_outcomes.append(outcome + '_mirror') - sm = MirrorStateMachine(outcomes=sm_outcomes) + if container.type == OperatableStateMachine.ContainerType.ConcurrencyContainer.value: + sm = MirrorConcurrencyContainer(container_name, path, outcomes=sm_outcomes) + else: + sm = MirrorStateMachine(container_name, path, outcomes=sm_outcomes) + + self._state_map.add_state(path, sm) + assert sm.state_id == container.state_id, ("Failed to validate container state_id " + f"= {sm.state_id} vs. {container.state_id}") + with sm: for child in container.children: self._add_node(msg, path + '/' + child) @@ -493,11 +755,21 @@ def _add_node(self, msg, path): container_transitions[sm_outcomes[i]] = transitions[container.outcomes[i]] MirrorStateMachine.add(container_name + '_mirror', sm, transitions=container_transitions) else: + # Add instance attributes to top-level state machine + sm._state_map = self._state_map + sm._total_loop_count = 0 + self._sm = sm + else: - MirrorStateMachine.add(container_name + '_mirror', - MirrorState(container_name, path, container.outcomes, container.autonomy), - transitions=transitions) + # Basic state + # self.get_logger().info(f" Adding mirror state {container_name} at {path} ...") + assert container.type == 0, f"{container_name} - Non-containers should have type 0 not {container.type}!" + mrst = MirrorState(container_name, path, container.outcomes, container.autonomy) + self._state_map.add_state(path, mrst) + assert mrst.state_id == container.state_id, ("Failed to validate container state_id " + f"= {mrst.state_id} vs. {container.state_id}") + MirrorStateMachine.add(container_name + '_mirror', mrst, transitions=transitions) def _preempt_callback(self, msg): # pylint: disable=unused-argument diff --git a/flexbe_mirror/flexbe_mirror/mirror_concurrency_container.py b/flexbe_mirror/flexbe_mirror/mirror_concurrency_container.py new file mode 100644 index 0000000..95a6d6a --- /dev/null +++ b/flexbe_mirror/flexbe_mirror/mirror_concurrency_container.py @@ -0,0 +1,138 @@ +# Copyright 2023 Christopher Newport University +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of the Christopher Newport University nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + + +"""Simplified state machine for use with FlexBE UI State machine mirror.""" + +from flexbe_core import Logger +from flexbe_core.core import StateMachine + +from flexbe_mirror.mirror_state import MirrorState +from flexbe_mirror.mirror_state_machine import MirrorStateMachine + + +class MirrorConcurrencyContainer(MirrorStateMachine): + """Manage updates of ConcurrencyContainer in the FlexBE mirror in response to changes.""" + + def __init__(self, target_name, target_path, *args, **kwargs): + MirrorStateMachine.__init__(self, target_name, target_path, *args, **kwargs) + self._returned_outcomes = {} + + def on_enter_mirror(self, userdata): + self.assert_consistent_transitions() + self._current_state = self._states[:] + self._userdata = None # Mirror does not use user data + self._last_outcome = None + self._entering = False + for state in self._states: + # Force on_enter at state level + state._entering = True # force state to handle enter on first execute + state._last_execution = None + + MirrorState.publish_update(self._target_path) + + def on_exit_mirror(self, userdata, desired_outcome=-1, states=None): + """Exit state and prepare for next entry (outcome -1 means preempt).""" + self._entering = True + for state in self._states if states is None else states: + if state in self._returned_outcomes: + continue # skip states that already exited themselves + state._entering = True + state.on_exit_mirror(userdata, -1) # preempted + + self._current_state = None + self._returned_outcomes = {} + self._last_outcome = self.outcomes[desired_outcome] + return self._last_outcome + + def execute_mirror(self, userdata): + """ + Define custom mirror execute method. + + Does not need to handle user state like onboard state machine + """ + if self._entering: + self.on_enter_mirror(userdata) + + if MirrorState._last_state_id == self.state_id: + # Handle outcome of this internal SM + if self._last_outcome is not None: + Logger.localwarn(f"Mirror SM concurrency execute for '{self.name}' ({self._state_id}) : " + f"Already processed outcome={self._last_outcome} for " + f"outcome index={MirrorState._last_state_outcome} - reprocessing anyway") + + MirrorState._last_state_id = None # Flag that the message was handled + if MirrorState._last_state_outcome is not None: + desired_outcome = MirrorState._last_state_outcome + MirrorState._last_state_outcome = None # Flag that the message was handled + return self.on_exit_mirror(userdata, desired_outcome, + states=[s for s in self._states if (s.name not in self._returned_outcomes + or self._returned_outcomes[s.name] is None)]) + + return self._execute_current_state_mirror(userdata) + + def _execute_current_state_mirror(self, userdata): + self._current_state = [] # Concurrent updates active states list each cycle + # Handle interior containers + for state in self._states: + if state.name in self._returned_outcomes and self._returned_outcomes[state.name] is not None: + continue # already done with executing + + out = state.execute_mirror(userdata) + if out is not None: + self._returned_outcomes[state.name] = out + MirrorStateMachine._execute_flag = True # Spin it again on change + else: + self._current_state.append(state) + # Handle container return in execute + return None + + def get_deep_states(self): + """ + Recursively look for the currently executing states. + + Traverse all state machines down to the terminal child state that is not a container. + + EXCEPT for ConcurrencyContainers. Those are both active state and container. + + @return: The list of active states (not state machine) + """ + deep_states = [self] # Concurrency acts as both state and container for this purpose + if isinstance(self._current_state, list): + for state in self._current_state: + if isinstance(state, StateMachine): + deep_states.extend(state.get_deep_states()) + else: + deep_states.append(state) + elif self._current_state is not None: + Logger.localerr(f"MirrorConcurrentContainer.get_deep_states '{self.name}' ({self._current_state._state_id})\n" + f" - current state is NOT a list! Error type={type(self._current_state)}") + Logger.localerr(f" '{self._current_state.name}' ({self._current_state._state_id})") + raise TypeError(f"MirrorConcurrentContainer.get_deep_states {self.name} - " + f"current state is NOT a list! Errror type={type(self._current_state)}") + return deep_states diff --git a/flexbe_mirror/flexbe_mirror/mirror_state.py b/flexbe_mirror/flexbe_mirror/mirror_state.py index 958fd15..a8c2a7f 100644 --- a/flexbe_mirror/flexbe_mirror/mirror_state.py +++ b/flexbe_mirror/flexbe_mirror/mirror_state.py @@ -32,6 +32,9 @@ from std_msgs.msg import String from flexbe_core import EventState +from flexbe_core import Logger +from flexbe_core.core.preemptable_state import PreemptableState +from flexbe_core.core.topics import Topics from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached @@ -43,29 +46,62 @@ class MirrorState(EventState): and is designed in a way to be created dynamically. """ + # Hold data from last outcome message being processed by mirror + _last_state_id = None + _last_state_outcome = None + _last_target_path = None + + _pub = None + def __init__(self, target_name, target_path, given_outcomes, outcome_autonomy): # pylint: disable=unused-argument super().__init__(outcomes=given_outcomes) - self.set_rate(100) + self._outcomes.append(PreemptableState._preempted_name) # Add preempted to outcomes list (for -1 outcome) self._target_name = target_name - self._target_path = target_path + self._target_path = "/" + "/".join(target_path.split("/")[1:]) # Drop top-level name + MirrorState._last_target_path = None # reset any time that we build a new state machine + + if MirrorState._pub is None: + # Allow access to standard proxies initialied by flexbe_mirror + MirrorState._pub = ProxyPublisher() + + @classmethod + def publish_update(cls, target_path): + """Publish latest deep state for UI control.""" + if target_path != MirrorState._last_target_path: + MirrorState._last_target_path = target_path + MirrorState._pub.publish(Topics._BEHAVIOR_UPDATE_TOPIC, String(data=target_path)) - self._outcome_topic = 'flexbe/mirror/outcome' + def execute_mirror(self, userdata): + if self._entering: + self.on_enter_mirror(userdata) - # Allow access to standard proxies - self._pub = ProxyPublisher() - self._sub = ProxySubscriberCached() + if MirrorState._last_state_id == self.state_id: + # Only process if relevant to this state + if self._last_outcome is not None: + Logger.localwarn(f"Already processed outcome={self._last_outcome} for " + f" state '{self.name}' ({self.state_id}) given new " + f"outcome index={MirrorState._last_state_outcome} - reprocessing anyway") - def execute(self, userdata): - if self._sub.has_buffered(self._outcome_topic): - msg = self._sub.get_from_buffer(self._outcome_topic) - if msg.data < len(self.outcomes): - return self.outcomes[msg.data] + MirrorState._last_state_id = None # Flag that the message was handled + if MirrorState._last_state_outcome is not None: + desired_outcome = MirrorState._last_state_outcome + MirrorState._last_state_outcome = None # Flag that the message was handled + return self.on_exit_mirror(userdata, desired_outcome) return None - def on_enter(self, userdata): - self._pub.publish('flexbe/behavior_update', - String(data="/" + "/".join(self._target_path.split("/")[1:]))) + def on_enter_mirror(self, userdata): + self._entering = False + self._last_outcome = None + MirrorState.publish_update(self._target_path) - def on_stop(self): - """Call when mirror SM stops.""" + def on_exit_mirror(self, userdata, desired_outcome): + try: + self._last_outcome = self.outcomes[desired_outcome] + return self._last_outcome + except Exception as exc: # pylint: disable=W0703 + Logger.localerr(f"Error: MirrorState execute for {self.name}: " + f"outcome index {desired_outcome} is not relevant ({len(self.outcomes)}) ") + import traceback + Logger.localerr(f"{type(exc)} - {exc}\n{traceback.format_exc().replace('%', '%%')}") + return None diff --git a/flexbe_mirror/flexbe_mirror/mirror_state_machine.py b/flexbe_mirror/flexbe_mirror/mirror_state_machine.py index f0e244f..b92a2db 100644 --- a/flexbe_mirror/flexbe_mirror/mirror_state_machine.py +++ b/flexbe_mirror/flexbe_mirror/mirror_state_machine.py @@ -30,12 +30,19 @@ """Simplified state machine for use with FlexBE UI State machine mirror.""" from threading import Event -import zlib import rclpy +from std_msgs.msg import UInt32 + from flexbe_core import Logger -from flexbe_core.core import PreemptableStateMachine +from flexbe_core.core import PreemptableState, PreemptableStateMachine +from flexbe_core.core import StateMachine +from flexbe_core.core import StateMap +from flexbe_core.core.exceptions import StateError +from flexbe_core.core.topics import Topics +from flexbe_core.proxy import ProxySubscriberCached + from flexbe_msgs.msg import BehaviorSync from flexbe_mirror.mirror_state import MirrorState @@ -43,36 +50,119 @@ class MirrorStateMachine(PreemptableStateMachine): """Manage updates of the FlexBE mirror in response to changes.""" - def __init__(self, *args, **kwargs): + _execute_flag = True # on change, we should execute cycle + + def __init__(self, target_name, target_path, *args, **kwargs): super().__init__(*args, **kwargs) - self._timing_event = Event() + self.id = None + self._entering = True + self._target_name = target_name + self._target_path = "/" + "/".join(target_path.split("/")[1:]) # Drop top-level name - def spin(self, userdata=None): + def spin(self, start_time, userdata=None): """Spin the execute in loop for Mirror.""" - outcome = None - while rclpy.ok(): - outcome = self.execute(userdata) - - # Store the information for safely passing to heartbeat thread - deep_state = self.get_deep_state() - if deep_state: - if deep_state.name != self._last_deep_state_name: - with self._status_lock: - self._last_deep_state_path = str(deep_state.path) - self._last_deep_state_name = str(deep_state.name) - else: - if self._last_deep_state_name is not None: - with self._status_lock: - self._last_deep_state_path = None - self._last_deep_state_name = None - - if outcome is not None: - Logger.localinfo(f"MirrorStateMachine {self.name} spin() - done with outcome={outcome}") - break + Logger.localinfo(f"Mirror: begin spinning for '{self.name}' ({self.id}) " + f" in thread with start time = {start_time.nanoseconds} ns") + + timing_event = Event() + + # Only the top-level SM needs the outcome topic subscribed by mirror + outcome_sub = ProxySubscriberCached() + + MirrorState._last_state_id = None + MirrorState._last_state_outcome = None + + MirrorStateMachine._execute_flag = True # Force a first pass regardless of messages + loop_count = 0 + self._total_loop_count = 0 # Attribute only added to top-level SM + outcome = PreemptableState._preempted_name + if self._current_state is None: + self.on_enter_mirror(userdata) + Logger.localinfo(f"Mirror: set initial state for top-level '{self.name}' ({self.state_id}) ({self.id}) " + f" with state = {self._current_state.name}' ({self._current_state._state_id})") + + while rclpy.ok() and not PreemptableState.preempt: + self._total_loop_count += 1 + loop_count += 1 # For periodic updates + try: + # Grab the latest outcome + if outcome_sub.has_buffered(Topics._OUTCOME_TOPIC): + # Mirror states only change in response to an outcome message. + # We will process every message to ensure consistency + msg = outcome_sub.get_from_buffer(Topics._OUTCOME_TOPIC) + state_id, outcome = StateMap.unhash(msg.data) + # Logger.localinfo(f" outcome {state_id} {outcome} in thread started at {start_time.nanoseconds}") - # Process fast independent of simulation time in order to keep up with onboard - self._timing_event.wait(0.001) + # Store data for handling by execute function in appropriate state + MirrorState._last_state_id = state_id + MirrorState._last_state_outcome = outcome + if MirrorState._last_state_id == self.state_id: + # Handle this top-level outcome + if self._last_outcome is not None: + Logger.localwarn(f"Mirror SM top-level spin for {self.name} : " + f"Already processed outcome={self._last_outcome} for " + f" state '{self.name}' ({self.state_id}) given new " + f"outcome index={MirrorState._last_state_outcome} - " + f"reprocessing anyway in thread started at {start_time.nanoseconds}") + MirrorState._last_state_id = None # Flag that the message was handled + if MirrorState._last_state_outcome is not None: + outcome = self.on_exit_mirror(userdata, MirrorState._last_state_outcome) + MirrorState.publish_update(self._target_path) # Notify back at top-level before exit + MirrorState._last_state_outcome = None # Flag that the message was handled + Logger.localinfo(f" top-level outcome {outcome} for {state_id} " + f"in thread started at {start_time.nanoseconds}") + break # Outcome at the top-level + + # Some change to process + MirrorStateMachine._execute_flag = True + + # No top-level outcome, so execute the active states looking for outcomes + if MirrorStateMachine._execute_flag: + MirrorStateMachine._execute_flag = False + # Process the latest outcome by active states + outcome = self._execute_current_state_mirror(userdata) + + if MirrorState._last_state_id is not None or MirrorState._last_state_outcome is not None: + # This should not happen unless system is out of sync + Logger.logwarn(f"MirrorStateMachine '{self.name}' ({self._state_id}) spin() - " + f"no state handled outcome from {MirrorState._last_state_id} " + f"outcome index={MirrorState._last_state_outcome}") + + # Store the information for safely passing to heartbeat thread + deep_states = self.get_deep_states() + if deep_states != self._last_deep_states_list: + MirrorStateMachine._execute_flag = True # Execute once more after any change, + with self._status_lock: + self._last_deep_states_list = deep_states + + # In case of internal return in concurrency container send another update to UI + # for the deepest active state + if len(deep_states) > 0: + MirrorState.publish_update(deep_states[-1]._target_path) + + if outcome is not None: + Logger.localinfo(f"MirrorStateMachine '{self.name}' ({self._state_id}) spin() - outcome = {outcome}" + f" - wait for confirming top-level outcome message!") + + else: + # Process fast independent of simulation time in order to keep up with onboard + if loop_count > 50000: + loop_count = 0 # periodic spam for updates + Logger.localinfo(f" SM spinner -'{self.name}' ({self.id}) - " + f"after {self._total_loop_count} spins in thread started at {start_time.nanoseconds}") + timing_event.wait(0.0002) # minor wait for next message if we didn't process anything previous loop + + except Exception as exc: # pylint: disable=W0703 + Logger.logerr(f" Exception in mirror spinner -'{self.state_id}' ({self.id})") + Logger.localerr(f"{type(exc)} - {exc}") + import traceback + Logger.localinfo(f"{traceback.format_exc().replace('%', '%%')}") + break + + Logger.localinfo(f"Mirror: done spinning for '{self.name}' ({self.id}) with outcome = '{outcome}' " + f" after {self._total_loop_count} spins" + f" in thread started at {start_time.nanoseconds}") return outcome def destroy(self): @@ -80,28 +170,114 @@ def destroy(self): self._notify_stop() def _notify_stop(self): - self.on_stop() for state in self._states: if isinstance(state, MirrorState): state.on_stop() if isinstance(state, MirrorStateMachine): state._notify_stop() - def get_latest_status(self): + def _execute_current_state_mirror(self, userdata): + """Define custom mirror execute method.""" + if self._current_state is None: + # Current state might be None while waiting on final outcome message to exit SM + return None + + # Process the current state + outcome = self._current_state.execute_mirror(userdata) + if outcome is not None: + MirrorStateMachine._execute_flag = True # Spin it again + try: + target = self._transitions[self._current_state.name][outcome] + self._current_state = self._labels.get(target) # Get the new state + if self._current_state is None: + return target + else: + self._current_state._entering = True + return None + except KeyError as exc: + err_msg = f"Returned outcome '{outcome}' is not registered as a transition from '{self._current_state}'" + Logger.localerr(f"Mirror SM execute for '{self.name}' ({self._state_id}): {err_msg}") + Logger.localinfo(f" {self.name} ({self._state_id}) - labels={self._labels}") + Logger.localinfo(f" {self.name} ({self._state_id}) - transitions={self._transitions}") + Logger.localinfo(f" {self.name} ({self._state_id}) - outcomes={self._outcomes}") + raise StateError(err_msg) from exc + + # we handle internal SM transitions using mirror outcome messages + return None + + def execute_mirror(self, userdata): + """Execute this SM as an internal state.""" + if self._entering: + self.on_enter_mirror(userdata) + + if MirrorState._last_state_id == self.state_id: + # Handle outcome of this internal SM + if self._last_outcome is not None: + Logger.localwarn(f"Mirror SM execute for '{self.name}' ({self._state_id}) : " + f"Already processed outcome={self._last_outcome} for " + f"outcome index={MirrorState._last_state_outcome} - reprocessing anyway") + + MirrorState._last_state_id = None # Flag that the message was handled + if MirrorState._last_state_outcome is not None: + desired_outcome = MirrorState._last_state_outcome + MirrorState._last_state_outcome = None + return self.on_exit_mirror(userdata, desired_outcome) + + return self._execute_current_state_mirror(userdata) + + def get_deep_states(self): """ - Return the latest execution information as a BehaviorSync message. + Recursively look for the currently executing states. - This version is typically called by the OCS mirror, so we do - some extra cleanup. + Traverse all state machines down to the terminal child state that is not a container. + (Except concurrency containers, which override this method) + + @return: The list of active states (not state machine) """ - with self._status_lock: - path = self._last_deep_state_path + if isinstance(self._current_state, StateMachine): + return self._current_state.get_deep_states() + return [self._current_state] if self._current_state is not None else [] # Return as a list + def get_latest_status(self): + """Return the latest execution information as a BehaviorSync message.""" msg = BehaviorSync() - msg.behavior_id = -1 - if path is not None: - path_clean = path.replace("_mirror", "") # Ignore mirror decoration for comparison with onboard - msg.current_state_checksum = zlib.adler32(path_clean.encode()) & 0x7fffffff + with self._status_lock: + active_states = self._last_deep_states_list + msg.behavior_id = self.id if self.id is not None else BehaviorSync.INVALID + + if active_states is not None: + for active in active_states: + if active is not None: + outcome_index = 0 + if active._last_outcome is not None: + try: + outcome_index = active._outcomes.index(active._last_outcome) + except Exception: # pylint: disable=W0703 + Logger.localerr(f"Invalid outcome='{active._last_outcome} for '{active}' - ignore outcome!") + + msg.current_state_checksums.append(StateMap.hash(active, outcome_index)) else: - msg.current_state_checksum = -1 + Logger.localinfo(f" Mirror get_latest_status: No active states for {msg.behavior_id}!") return msg + + def on_enter_mirror(self, userdata): + self._entering = False + self._last_outcome = None + self.assert_consistent_transitions() + self._current_state = self.initial_state + self._userdata = None # not used in mirror + MirrorState.publish_update(self._target_path) + + def on_exit_mirror(self, userdata, desired_outcome=-1): + try: + if self._current_state is not None: + self._current_state._entering = True + self._current_state.on_exit(userdata, -1) # Preempted + self._last_outcome = self.outcomes[desired_outcome] + self._current_state = None + self._entering = True + return self._last_outcome + except Exception: # pylint: disable=W0703 + Logger.localerr(f"Error: MirrorStateMachine execute for {self.name}: " + f"outcome index {desired_outcome} is not relevant ({len(self.outcomes)}) ") + return None diff --git a/flexbe_msgs/CHANGELOG.rst b/flexbe_msgs/CHANGELOG.rst index 79bd37c..8efa18a 100644 --- a/flexbe_msgs/CHANGELOG.rst +++ b/flexbe_msgs/CHANGELOG.rst @@ -1,6 +1,10 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Changelog for package flexbe_msgs ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Forthcoming +----------- +* Support for new state id and concurrency handling + 2.3.3 (2023-08-09) ------------------ diff --git a/flexbe_msgs/msg/BehaviorSync.msg b/flexbe_msgs/msg/BehaviorSync.msg index 3f53c7f..f78b7b6 100644 --- a/flexbe_msgs/msg/BehaviorSync.msg +++ b/flexbe_msgs/msg/BehaviorSync.msg @@ -2,4 +2,4 @@ int32 INVALID = 0 # denotes unset id or checksum int32 behavior_id # id (checksum) of particular behavior encoding -int32 current_state_checksum \ No newline at end of file +int32[] current_state_checksums \ No newline at end of file diff --git a/flexbe_msgs/msg/Container.msg b/flexbe_msgs/msg/Container.msg index dac4d3a..83bcb34 100644 --- a/flexbe_msgs/msg/Container.msg +++ b/flexbe_msgs/msg/Container.msg @@ -1,5 +1,7 @@ -string path +int32 state_id # Unique 23-bit identifer for state +string path # String path from top-level state machine string[] children string[] outcomes string[] transitions +int8 type # 0=State, StateMachine, PriorityContainer,ConcurrencyContainer int8[] autonomy diff --git a/flexbe_onboard/CHANGELOG.rst b/flexbe_onboard/CHANGELOG.rst index 74d9dbb..5fe05db 100644 --- a/flexbe_onboard/CHANGELOG.rst +++ b/flexbe_onboard/CHANGELOG.rst @@ -1,6 +1,10 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Changelog for package flexbe_onboard ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Forthcoming +----------- +* Support for new state id and concurrency handling + 2.3.3 (2023-08-09) ------------------ * destroy sub/pub/client in executor thread diff --git a/flexbe_onboard/flexbe_onboard/flexbe_onboard.py b/flexbe_onboard/flexbe_onboard/flexbe_onboard.py index 8eebd09..f17e14d 100644 --- a/flexbe_onboard/flexbe_onboard/flexbe_onboard.py +++ b/flexbe_onboard/flexbe_onboard/flexbe_onboard.py @@ -12,7 +12,8 @@ # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # -# * Neither the name of the Philipp Schillinger, Team ViGIR, Christopher Newport University nor the names of its +# * Neither the name of the Philipp Schillinger, Team ViGIR, +# Christopher Newport University nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # @@ -42,17 +43,27 @@ import time import zlib +try: + from prctl import set_name as set_thread_name +except Exception: + def set_thread_name(name): + print("Python thread names are not visible in ps/top unless you install prctl") + import rclpy from rclpy.exceptions import ParameterNotDeclaredException from rclpy.node import Node +from rclpy.qos import QoSProfile, QoSDurabilityPolicy -from flexbe_core import BehaviorLibrary, Logger -from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached +from flexbe_core import BehaviorLibrary, Logger, MIN_UI_VERSION from flexbe_core.core.state_machine import StateMachine +from flexbe_core.core.topics import Topics +from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached from flexbe_msgs.msg import BehaviorSelection, BehaviorSync, BEStatus, CommandFeedback, UserdataInfo from flexbe_msgs.srv import GetUserdata +from std_msgs.msg import String + class FlexbeOnboard(Node): """Control the execution of robot behaviors.""" @@ -75,14 +86,23 @@ def __init__(self): self._behavior_lib = BehaviorLibrary(self) # prepare communication - self.status_topic = 'flexbe/status' - self.feedback_topic = 'flexbe/command_feedback' - self.heartbeat_topic = 'flexbe/heartbeat' - self._feedback_pub = self.create_publisher(CommandFeedback, self.feedback_topic, 10) - self._heartbeat_pub = self.create_publisher(BehaviorSync, self.heartbeat_topic, 10) - self._status_pub = self.create_publisher(BEStatus, self.status_topic, 10) + # Proxy as also accessed by states + self._proxy_pub = ProxyPublisher({Topics._CMD_FEEDBACK_TOPIC: CommandFeedback}) + + # only at onboard level + self._heartbeat_pub = self.create_publisher(BehaviorSync, Topics._ONBOARD_HEARTBEAT_TOPIC, 10) + self._status_pub = self.create_publisher(BEStatus, Topics._ONBOARD_STATUS_TOPIC, 10) + + latching_qos = QoSProfile(depth=1, durability=QoSDurabilityPolicy.TRANSIENT_LOCAL) + self._version_sub = self.create_subscription(String, Topics._UI_VERSION_TOPIC, + self._version_callback, qos_profile=latching_qos) # listen for new behavior to start + self._start_beh_sub = self.create_subscription(BehaviorSelection, + Topics._START_BEHAVIOR_TOPIC, + self._behavior_callback, + 10) + try: self._enable_clear_imports = self.get_parameter('enable_clear_imports').get_parameter_value() except ParameterNotDeclaredException: @@ -96,10 +116,6 @@ def __init__(self): self._switch_lock = threading.Lock() self._behavior_id = -1 self._current_state_checksum = -1 - self._start_beh_sub = self.create_subscription(BehaviorSelection, - 'flexbe/start_behavior', - self._behavior_callback, - 10) self._userdata_service = self.create_service(GetUserdata, 'get_user_data', self._userdata_callback) @@ -114,6 +130,24 @@ def __init__(self): Logger.localinfo('\033[92m--- Behavior Engine ready for first behavior! ---\033[0m') self._status_pub.publish(BEStatus(stamp=self.get_clock().now().to_msg(), code=BEStatus.READY)) + def _version_callback(self, msg): + vui = FlexbeOnboard._parse_version(msg.data) + vex = FlexbeOnboard._parse_version(MIN_UI_VERSION) + if vui < vex: + Logger.logwarn('FlexBE App needs to be updated!\n' + f'Onboard Behavior Engine requires at least version {MIN_UI_VERSION}, ' + f' but you have {msg.data}\n' + 'Please update the flexbe_app software.') + + @staticmethod + def _parse_version(v): + result = 0 + offset = 1 + for n in reversed(v.split('.')): + result += int(n) * offset + offset *= 100 + return result + def _behavior_callback(self, beh_sel_msg): self._trigger_ready = False # We have received the behavior selection request self._ready_counter = 0 @@ -141,6 +175,13 @@ def behavior_shutdown(self): import traceback print(traceback.format_exc().replace("%", "%%")) + def onboard_shutdown(self): + print("Shutting down the onboard behavior executive ...", flush=True) + self.destroy_timer(self._heartbeat) + self._proxy_pub.remove_publisher(Topics._CMD_FEEDBACK_TOPIC) + for _ in range(50): + self.executor.spin_once(timeout_sec=0.001) + def verify_no_active_behaviors(self, timeout=0.5): """Verify no active behaviors.""" run_locked = self._run_lock.acquire(timeout=timeout) @@ -158,12 +199,13 @@ def verify_no_active_behaviors(self, timeout=0.5): def _behavior_execution(self, beh_sel_msg): # sending a behavior while one is already running is considered as switching + set_thread_name("beh" + f"{beh_sel_msg.behavior_id}") if not rclpy.ok(): self._cleanup_tempdir() if self._running: Logger.loginfo('--> Initiating behavior switch...') - self._feedback_pub.publish(CommandFeedback(command="switch", args=['received'])) + self._proxy_pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="switch", args=['received'])) # construct the behavior that should be executed Logger.localinfo(f"Prepare behavior id={beh_sel_msg.behavior_key} ({beh_sel_msg.behavior_id}) ...") @@ -171,16 +213,16 @@ def _behavior_execution(self, beh_sel_msg): if be is None: Logger.logerr('Dropped behavior start request because preparation failed.') if self._running: - self._feedback_pub.publish(CommandFeedback(command="switch", args=['failed'])) + self._proxy_pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="switch", args=['failed'])) else: # self._status_pub.publish(BEStatus(stamp=self.get_clock().now().to_msg(), code=BEStatus.READY)) Logger.localinfo('\033[92m--- Behavior Engine ready to try again! ---\033[0m') - self._ready_counter = 6 # Trigger heartbeat to republish READY within 4 seconds + self._ready_counter = 8 # Trigger heartbeat to republish READY within 2 seconds return # perform the behavior switch if required - Logger.localinfo("Behavior Engine - get switch lock to start behavior id " - f"key={beh_sel_msg.behavior_key} ({beh_sel_msg.behavior_id})...") + # Logger.localinfo("Behavior Engine - get switch lock to start behavior id " + # f"key={beh_sel_msg.behavior_key} ({beh_sel_msg.behavior_id})...") with self._switch_lock: Logger.localinfo("Behavior Engine - got switch lock to start behavior new id " f"key={beh_sel_msg.behavior_key} ({beh_sel_msg.behavior_id})...") @@ -189,14 +231,14 @@ def _behavior_execution(self, beh_sel_msg): self._switching = True Logger.localinfo("Behavior Engine - prepare to switch current running behavior" f" {self.be.name}: id={self.be.beh_id}...") - self._feedback_pub.publish(CommandFeedback(command="switch", args=['start'])) + self._proxy_pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="switch", args=['start'])) # ensure that switching is possible if not self._is_switchable(be): Logger.logerr("Dropped behavior start request for " f"key={beh_sel_msg.behavior_key} (id={beh_sel_msg.behavior_id}) " " because switching is not possible.") - self._feedback_pub.publish(CommandFeedback(command="switch", args=['not_switchable'])) + self._proxy_pub.publish(Topics._CMD_FEEDBACK_TOPIC, CommandFeedback(command="switch", args=['not_switchable'])) return self._status_pub.publish(BEStatus(stamp=self.get_clock().now().to_msg(), @@ -204,24 +246,31 @@ def _behavior_execution(self, beh_sel_msg): code=BEStatus.SWITCHING)) # wait if running behavior is currently starting or stopping rate = threading.Event() - active_state = None + active_states = None while rclpy.ok() and self._running: - active_state = self.be.get_current_state() - if active_state is not None or not self._running: + active_states = self.be.get_current_states() + if not self._running or (active_states is not None and len(active_states) > 0): break - rate.wait(0.01) # As we are polling for signal, don't use ROS rate due to sim time + rate.wait(0.001) # As we are polling for signal, don't use ROS rate due to sim time del rate # extract the active state if any - if active_state is not None: + if active_states is not None: Logger.localinfo(f"Behavior Engine - {self.be.name}: {self.be.beh_id} " - f"switching behaviors from active state {active_state.name} ...") + f"switching behaviors from active state {[acst.name for acst in active_states]} ...") try: - be.prepare_for_switch(active_state) - self._feedback_pub.publish(CommandFeedback(command="switch", args=['prepared'])) + if len(active_states) > 1: + Logger.localinfo(f"\n\nBehavior Engine - {self.be.name}: {self.be.beh_id} " + f"- switching only top level state!\n") + Logger.logwarn("Switch multiple active states?") + + be.prepare_for_switch(active_states[0]) + self._proxy_pub.publish(Topics._CMD_FEEDBACK_TOPIC, + CommandFeedback(command="switch", args=['prepared'])) except Exception as exc: Logger.logerr('Failed to prepare behavior switch:\n%s' % str(exc)) - self._feedback_pub.publish(CommandFeedback(command="switch", args=['failed'])) + self._proxy_pub.publish(Topics._CMD_FEEDBACK_TOPIC, + CommandFeedback(command="switch", args=['failed'])) # Let us know that old behavior is still running self._status_pub.publish(BEStatus(stamp=self.get_clock().now().to_msg(), behavior_id=self.be.beh_id, @@ -229,7 +278,7 @@ def _behavior_execution(self, beh_sel_msg): return # stop the rest Logger.localinfo(f"Behavior Engine - {self.be.name}: {self.be.beh_id} - " - f"preempt active state {active_state.name} ...") + f"preempt active state {active_states[0].name} ...") self.be.preempt() else: Logger.localinfo(f"Behavior Engine - {self.be.name}: {self.be.beh_id} " @@ -246,10 +295,10 @@ def _behavior_execution(self, beh_sel_msg): result = None try: - Logger.localinfo(f'Behavior Engine - behavior {self.be.name}: {self.be.beh_id} ready, begin startup ...') Logger.loginfo('Onboard Behavior Engine starting [%s : %s]' % (be.name, beh_sel_msg.behavior_id)) self.be.confirm() - Logger.localinfo(f'Behavior Engine - behavior {self.be.name}: {self.be.beh_id} confirmation.') + Logger.localinfo(f' behavior {self.be.name}: {self.be.beh_id} confirmation.') + args = [self.be.requested_state_path] if self.be.requested_state_path is not None else [] Logger.localinfo(f'Behavior Engine - behavior {self.be.name}: {self.be.beh_id} BEStatus STARTED.') self._status_pub.publish(BEStatus(stamp=self.get_clock().now().to_msg(), @@ -292,7 +341,7 @@ def _behavior_execution(self, beh_sel_msg): self._status_pub.publish(BEStatus(stamp=self.get_clock().now().to_msg(), code=BEStatus.READY)) Logger.localinfo('\033[92m--- Behavior Engine finished - ready for more! ---\033[0m') - Logger.localinfo(f"Behavior execution finished for id={self.be.beh_id}, exit thread!") + # Logger.localinfo(f"Behavior execution finished for id={self.be.beh_id}, exit thread!") self._running = False self._switching = False self.be = None @@ -445,6 +494,8 @@ def _prepare_behavior(self, beh_sel_msg): # build state machine try: + self.get_logger().info(f"Building state machine {beh_sel_msg.behavior_id} with " + f"autonomy level={beh_sel_msg.autonomy_level}.") be.set_up(beh_id=beh_sel_msg.behavior_id, autonomy_level=beh_sel_msg.autonomy_level, debug=False) be.prepare_for_execution(self._convert_input_data(beh_sel_msg.input_keys, beh_sel_msg.input_values)) self.get_logger().info('State machine built.') diff --git a/flexbe_onboard/flexbe_onboard/start_behavior.py b/flexbe_onboard/flexbe_onboard/start_behavior.py index 5c2db75..29db871 100644 --- a/flexbe_onboard/flexbe_onboard/start_behavior.py +++ b/flexbe_onboard/flexbe_onboard/start_behavior.py @@ -10,7 +10,8 @@ # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # -# * Neither the name of the Philipp Schillinger, Team ViGIR, Christopher Newport University nor the names of its +# * Neither the name of the Philipp Schillinger, Team ViGIR, +# Christopher Newport University nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # @@ -73,11 +74,16 @@ def main(args=None): print(f" All onboard behaviors are stopped at {datetime.now()}!", flush=True) # Last call for clean up of any stray communications - for _ in range(100): + for _ in range(50): executor.spin_once(timeout_sec=0.001) # allow behavior to cleanup after itself + print(f"{datetime.now()} - onboard shutdown requested ...", flush=True) + onboard.onboard_shutdown() + for _ in range(30): + executor.spin_once(timeout_sec=0.001) # allow onboard system to cleanup after itself + except Exception as exc: - print(f"{datetime.now()} - Exception in onboard behavior shutdown! {type(exc)}\n {exc}", flush=True) + print(f"{datetime.now()} - Exception in onboard shutdown! {type(exc)}\n {exc}", flush=True) import traceback print(f"{traceback.format_exc().replace('%', '%%')}", flush=True) @@ -86,14 +92,14 @@ def main(args=None): shutdown_proxies() # Last call for clean up of any stray communications - for _ in range(100): + for _ in range(50): executor.spin_once(timeout_sec=0.001) # allow behavior to cleanup after itself print(f"Proxy shutdown completed at {datetime.now()} ...", flush=True) onboard.destroy_node() # Last call for clean up of any stray communications - for _ in range(100): + for _ in range(50): executor.spin_once(timeout_sec=0.001) # allow behavior to cleanup after itself print(f"Node destruction completed at {datetime.now()} ...", flush=True) diff --git a/flexbe_onboard/tests/test_onboard.py b/flexbe_onboard/tests/test_onboard.py index cea2187..737d034 100755 --- a/flexbe_onboard/tests/test_onboard.py +++ b/flexbe_onboard/tests/test_onboard.py @@ -41,6 +41,7 @@ from rclpy.executors import MultiThreadedExecutor from flexbe_onboard import FlexbeOnboard +from flexbe_core.core.topics import Topics from flexbe_core.proxy import ProxySubscriberCached from flexbe_msgs.msg import BehaviorSelection, BEStatus, BehaviorLog, BehaviorModification @@ -60,8 +61,8 @@ def setUp(self): ProxySubscriberCached.initialize(self.node) - self.sub = ProxySubscriberCached({'flexbe/status': BEStatus, - 'flexbe/log': BehaviorLog}, + self.sub = ProxySubscriberCached({Topics._ONBOARD_STATUS_TOPIC: BEStatus, + Topics._BEHAVIOR_LOGGING_TOPIC: BehaviorLog}, inst_id=id(self)) # make sure that behaviors can be imported data_folder = os.path.dirname(os.path.realpath(__file__)) @@ -90,22 +91,22 @@ def assertStatus(self, expected, timeout, message=""): # Spin both nodes as needed self.executor.spin_once(timeout_sec=0.01) time.sleep(0.1) - if self.sub.has_msg('flexbe/status'): + if self.sub.has_msg(Topics._ONBOARD_STATUS_TOPIC): break else: raise AssertionError('Did not receive a status as required.') - msg = self.sub.get_last_msg('flexbe/status') - self.sub.remove_last_msg('flexbe/status') + msg = self.sub.get_last_msg(Topics._ONBOARD_STATUS_TOPIC) + self.sub.remove_last_msg(Topics._ONBOARD_STATUS_TOPIC) self.node.get_logger().info(f"assertStatus: msg= {str(msg)} expected={expected} - {message}?") self.assertEqual(msg.code, expected, msg=message) return msg def clear_extra_heartbeat_ready_messages(self): - while self.sub.has_msg('flexbe/status'): - msg = self.sub.get_last_msg('flexbe/status') + while self.sub.has_msg(Topics._ONBOARD_STATUS_TOPIC): + msg = self.sub.get_last_msg(Topics._ONBOARD_STATUS_TOPIC) if msg.code == BEStatus.READY: self.node.get_logger().info(f"clearing READY msg={str(msg)}") - self.sub.remove_last_msg('flexbe/status') # Clear any heartbeat start up messages + self.sub.remove_last_msg(Topics._ONBOARD_STATUS_TOPIC) # Clear any heartbeat start up messages else: break self.executor.spin_once(timeout_sec=0.01) @@ -113,7 +114,7 @@ def clear_extra_heartbeat_ready_messages(self): def test_onboard_behaviors(self): self.executor.spin_once(timeout_sec=1) - behavior_pub = self.node.create_publisher(BehaviorSelection, 'flexbe/start_behavior', 1) + behavior_pub = self.node.create_publisher(BehaviorSelection, Topics._START_BEHAVIOR_TOPIC, 1) # wait for publisher2 end_time = time.time() + 3.0 while time.time() < end_time: @@ -140,7 +141,7 @@ def test_onboard_behaviors(self): # send valid simple behavior request with open(self.lib.get_sourcecode_filepath(be_key)) as f: request.behavior_id = zlib.adler32(f.read().encode()) & 0x7fffffff - self.sub.enable_buffer('flexbe/log') + self.sub.enable_buffer(Topics._BEHAVIOR_LOGGING_TOPIC) self.clear_extra_heartbeat_ready_messages() @@ -151,8 +152,8 @@ def test_onboard_behaviors(self): self.assertStatus(BEStatus.STARTED, 1, "Started simple log behavior") self.assertStatus(BEStatus.FINISHED, 3, "Finished simple log behavior") behavior_logs = [] - while self.sub.has_buffered('flexbe/log'): - behavior_logs.append(self.sub.get_from_buffer('flexbe/log').text) + while self.sub.has_buffered(Topics._BEHAVIOR_LOGGING_TOPIC): + behavior_logs.append(self.sub.get_from_buffer(Topics._BEHAVIOR_LOGGING_TOPIC).text) self.executor.spin_once(timeout_sec=0.1) self.assertIn('Test data', behavior_logs) @@ -197,8 +198,8 @@ def test_onboard_behaviors(self): while time.time() < end_time: self.executor.spin_once(timeout_sec=0.05) - while self.sub.has_buffered('flexbe/log'): - behavior_logs.append(self.sub.get_from_buffer('flexbe/log').text) + while self.sub.has_buffered(Topics._BEHAVIOR_LOGGING_TOPIC): + behavior_logs.append(self.sub.get_from_buffer(Topics._BEHAVIOR_LOGGING_TOPIC).text) self.assertIn('value_2', behavior_logs) self.clear_extra_heartbeat_ready_messages() @@ -221,8 +222,8 @@ def test_onboard_behaviors(self): while time.time() < end_time: self.executor.spin_once(timeout_sec=0.1) - while self.sub.has_buffered('flexbe/log'): - behavior_logs.append(self.sub.get_from_buffer('flexbe/log').text) + while self.sub.has_buffered(Topics._BEHAVIOR_LOGGING_TOPIC): + behavior_logs.append(self.sub.get_from_buffer(Topics._BEHAVIOR_LOGGING_TOPIC).text) self.executor.spin_once(timeout_sec=0.1) self.assertIn('value_1', behavior_logs) self.node.get_logger().info("Done onboard testing!") diff --git a/flexbe_states/CHANGELOG.rst b/flexbe_states/CHANGELOG.rst index 3f48824..779494a 100644 --- a/flexbe_states/CHANGELOG.rst +++ b/flexbe_states/CHANGELOG.rst @@ -1,6 +1,11 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Changelog for package flexbe_states ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Forthcoming +----------- +* add options to remove service callers and action clients +* flake8, pep257 and codestyle checks + 2.3.3 (2023-08-09) ------------------ * cleanup diff --git a/flexbe_states/flexbe_states/input_state.py b/flexbe_states/flexbe_states/input_state.py index 6634ad0..a7a47f6 100644 --- a/flexbe_states/flexbe_states/input_state.py +++ b/flexbe_states/flexbe_states/input_state.py @@ -61,21 +61,29 @@ class InputState(EventState): """ - def __init__(self, request, message, timeout=1.0): + def __init__(self, request, message, timeout=1.0, action_topic='flexbe/behavior_input'): """Construct instance.""" super(InputState, self).__init__(outcomes=['received', 'aborted', 'no_connection', 'data_error'], output_keys=['data']) - self._action_topic = 'flexbe/behavior_input' ProxyActionClient.initialize(InputState._node) - self._client = ProxyActionClient({self._action_topic: BehaviorInput}, wait_duration=0.0) + self._action_topic = action_topic + self._client = None self._request = request self._message = message self._timeout = timeout - self._connected = True + self._connected = False self._received = False - def execute(self, userdata): + def on_start(self): + self._client = ProxyActionClient({self._action_topic: BehaviorInput}, wait_duration=0.0) + self._connected = True + + def on_stop(self): + ProxyActionClient.remove_client(self._action_topic) + self._client = None + self._connected = False + def execute(self, userdata): if not self._connected: return 'no_connection' if self._received: @@ -122,6 +130,6 @@ def on_enter(self, userdata): # Attempt to send the goal. try: self._client.send_goal(self._action_topic, action_goal, wait_duration=self._timeout) - except Exception as e: - Logger.logwarn('Was unable to send data request:\n%s' % str(e)) + except Exception as exc: + Logger.logwarn('Was unable to send data request:\n%s' % str(exc)) self._connected = False diff --git a/flexbe_states/flexbe_states/subscriber_state.py b/flexbe_states/flexbe_states/subscriber_state.py index 796e9b4..c63af99 100644 --- a/flexbe_states/flexbe_states/subscriber_state.py +++ b/flexbe_states/flexbe_states/subscriber_state.py @@ -57,11 +57,16 @@ def __init__(self, topic, msg_type="", blocking=True, clear=False): self._blocking = blocking self._clear = clear self._connected = False - + self._sub = None if not self._connect(): Logger.logwarn('Topic %s for state %s not yet available.\n' 'Will try again when entering the state...' % (self._topic, self.name)) + def on_stop(self): + if self._connected: + ProxySubscriberCached.unsubscribe_topic(self._topic) + self._connected = False + def execute(self, userdata): if not self._connected: userdata.message = None diff --git a/flexbe_testing/flexbe_testing/py_tester.py b/flexbe_testing/flexbe_testing/py_tester.py index 11d1c45..5fd3170 100644 --- a/flexbe_testing/flexbe_testing/py_tester.py +++ b/flexbe_testing/flexbe_testing/py_tester.py @@ -75,10 +75,10 @@ def setUpClass(cls): PyTester._file_path = join(package_dir, PyTester._tests_folder) def setUp(self): - assert PyTester._package is not None, "Must define setUpClass() for particular test" assert PyTester._file_path is not None, "Must define setUpClass() for particular test" + print("setUp test and initialize ROS ...", flush=True) self.context = rclpy.context.Context() rclpy.init(context=self.context) @@ -89,6 +89,7 @@ def setUp(self): self.node.declare_parameter("~print_debug_positive", True) self.node.declare_parameter("~print_debug_negative", True) self.node.declare_parameter("~compact_format", False) + self.node.get_logger().info(f" setUp - rclpy.ok={rclpy.ok(context=self.context)} context={self.context.ok()}") initialize_proxies(self.node) @@ -111,7 +112,7 @@ def tearDown(self): self.executor.shutdown() - # Kill it with fire to make sure not stray published topics are available + # Kill it with fire to make sure no stray published topics are available rclpy.shutdown(context=self.context) def run_test(self, test_name, timeout_sec=None, max_cnt=50): @@ -120,6 +121,8 @@ def run_test(self, test_name, timeout_sec=None, max_cnt=50): try: self.node.get_logger().info(f" Loading {test_name} from -->{filename}") + self.node.get_logger().info(f" run_test import rclpy.ok={rclpy.ok(context=self.context)} " + f"context={self.node.context.ok()}") with open(filename, 'r') as f: config = getattr(yaml, 'unsafe_load', yaml.load)(f) except IOError as io_error: @@ -132,6 +135,8 @@ def run_test(self, test_name, timeout_sec=None, max_cnt=50): self.node.get_logger().error(f"Running test for {test_name}\n config: {config} ...") tester = Tester(self.node, self.executor) try: + self.node.get_logger().info(f" run_pytest rclpy.ok={rclpy.ok(context=self.context)} " + f"context={self.node.context.ok()}") success = tester.run_pytest(test_name, config, timeout_sec=timeout_sec, max_cnt=max_cnt) self.node.get_logger().error(f" test for {test_name} success={success}") except Exception as exc: # pylint: disable=W0703 diff --git a/flexbe_testing/flexbe_testing/test_interface.py b/flexbe_testing/flexbe_testing/test_interface.py index fbc86b8..92cf0dc 100644 --- a/flexbe_testing/flexbe_testing/test_interface.py +++ b/flexbe_testing/flexbe_testing/test_interface.py @@ -54,7 +54,7 @@ def __init__(self, node, path, classname): self._class = next(c for name, c in clsmembers if name == classname) self._instance = None self._node = node - self._node.get_logger().info(f"rclpy.ok={rclpy.ok()} context={self._node.context.ok()}") + self._node.get_logger().info(f"rclpy.ok={rclpy.ok(context=self._node.context)} context={self._node.context.ok()}") try: self._class.initialize_ros(node) Logger.print_positive("Given class is a state") @@ -121,6 +121,6 @@ def _execute_behavior(self, userdata, context, spin_cb): sm = self._instance._state_machine while outcome is None and context.ok(): outcome = sm.execute(userdata) - sm.sleep() + sm.wait(seconds=sm.sleep_duration, context=self._node.context) spin_cb() return outcome diff --git a/flexbe_testing/tests/flexbe_colcon_test.py b/flexbe_testing/tests/flexbe_colcon_test.py index fdbbac0..783a010 100644 --- a/flexbe_testing/tests/flexbe_colcon_test.py +++ b/flexbe_testing/tests/flexbe_colcon_test.py @@ -28,7 +28,9 @@ """Colcon testing for flexbe_testing.""" + from os.path import join +import unittest from flexbe_testing.py_tester import PyTester @@ -70,3 +72,7 @@ def test_sub_unavailable(self): def test_behavior(self): """Invoke unittest defined .test file.""" return self.run_test("behavior") + + +if __name__ == '__main__': + unittest.main() diff --git a/flexbe_widget/CHANGELOG.rst b/flexbe_widget/CHANGELOG.rst index cf8890e..a51fe77 100644 --- a/flexbe_widget/CHANGELOG.rst +++ b/flexbe_widget/CHANGELOG.rst @@ -1,6 +1,12 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Changelog for package flexbe_widget ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Forthcoming +----------- +* flake 8 cleanup +* fix CONSTANT style across flexbe_behavior_engine +* use SingleThreadedExecutor + 2.3.3 (2023-08-09) ------------------ diff --git a/flexbe_widget/bin/be_compress b/flexbe_widget/bin/be_compress index 616850d..7b73ecc 100755 --- a/flexbe_widget/bin/be_compress +++ b/flexbe_widget/bin/be_compress @@ -7,6 +7,7 @@ fi ''' # flake8: noqa import rclpy +from flexbe_core.core.topics import Topics from flexbe_msgs.msg import BehaviorSelection from rosidl_runtime_py import get_interface_path @@ -69,7 +70,7 @@ def _callback(msg): def main(args=None): rclpy.init(args=args) node = rclpy.create_node('flexbe_widget') - sub = node.create_subscription(BehaviorSelection, "/flexbe/start_behavior", _callback) + sub = node.create_subscription(BehaviorSelection, Topics._START_BEHAVIOR_TOPIC, _callback) # Wait for ctrl-c to stop the application rclpy.spin() diff --git a/flexbe_widget/flexbe_widget/behavior_action_server.py b/flexbe_widget/flexbe_widget/behavior_action_server.py index e7203ad..c110b1a 100644 --- a/flexbe_widget/flexbe_widget/behavior_action_server.py +++ b/flexbe_widget/flexbe_widget/behavior_action_server.py @@ -42,6 +42,7 @@ from flexbe_msgs.msg import BehaviorSelection, BehaviorModification, BEStatus from flexbe_msgs.action import BehaviorExecution from flexbe_core import BehaviorLibrary +from flexbe_core.core.topics import Topics from std_msgs.msg import String, Empty @@ -58,12 +59,13 @@ def __init__(self, node): self._current_state = None self._active_behavior_id = None - self._pub = self._node.create_publisher(BehaviorSelection, 'flexbe/start_behavior', 100) - self._preempt_pub = self._node.create_publisher(Empty, 'flexbe/command/preempt', 100) - self._status_pub = self._node.create_subscription(BEStatus, 'flexbe/status', self._status_cb, 100) - self._state_pub = self._node.create_subscription(String, 'flexbe/behavior_update', self._state_cb, 100) + self._pub = self._node.create_publisher(BehaviorSelection, Topics._START_BEHAVIOR_TOPIC, 100) + self._preempt_pub = self._node.create_publisher(Empty, Topics._CMD_PREEMPT_TOPIC, 100) + self._status_pub = self._node.create_subscription(BEStatus, Topics._ONBOARD_STATUS_TOPIC, self._status_cb, 100) + self._state_pub = self._node.create_subscription(String, Topics._BEHAVIOR_UPDATE_TOPIC, self._state_cb, 100) - self._as = ActionServer(self._node, BehaviorExecution, 'flexbe/execute_behavior', + self._as = ActionServer(self._node, BehaviorExecution, + Topics._EXECUTE_BEHAVIOR_ACTION, goal_callback=self._goal_cb, cancel_callback=self._preempt_cb, execute_callback=self._execute_cb) diff --git a/flexbe_widget/flexbe_widget/behavior_launcher.py b/flexbe_widget/flexbe_widget/behavior_launcher.py index 8cc5ab3..f5b3d3a 100644 --- a/flexbe_widget/flexbe_widget/behavior_launcher.py +++ b/flexbe_widget/flexbe_widget/behavior_launcher.py @@ -48,28 +48,28 @@ from flexbe_msgs.msg import BehaviorModification, BehaviorRequest, BehaviorSelection, BEStatus, ContainerStructure -from flexbe_core import BehaviorLibrary, Logger +from flexbe_core import BehaviorLibrary, Logger, MIN_UI_VERSION +from flexbe_core.core import StateMap +from flexbe_core.core.topics import Topics class BehaviorLauncher(Node): """Node to launch FlexBE behaviors.""" - MIN_VERSION = '2.2.0' - def __init__(self): # Initiate the Node class's constructor and give it a name super().__init__('flexbe_widget') self._ready_event = threading.Event() - self._sub = self.create_subscription(BehaviorRequest, "flexbe/request_behavior", self._request_callback, 100) - self._version_sub = self.create_subscription(String, "flexbe/ui_version", self._version_callback, 100) - self._status_sub = self.create_subscription(BEStatus, "flexbe/status", self._status_callback, 100) + self._sub = self.create_subscription(BehaviorRequest, Topics._REQUEST_BEHAVIOR_TOPIC, self._request_callback, 100) + self._version_sub = self.create_subscription(String, Topics._UI_VERSION_TOPIC, self._version_callback, 1) + self._status_sub = self.create_subscription(BEStatus, Topics._ONBOARD_STATUS_TOPIC, self._status_callback, 100) - self._pub = self.create_publisher(BehaviorSelection, "flexbe/start_behavior", 100) - self._status_pub = self.create_publisher(BEStatus, "flexbe/status", 100) - self._mirror_pub = self.create_publisher(ContainerStructure, "flexbe/mirror/structure", 100) - self._heartbeat_pub = self.create_publisher(Int32, 'flexbe/launcher/heartbeat', 2) + self._pub = self.create_publisher(BehaviorSelection, Topics._START_BEHAVIOR_TOPIC, 100) + self._status_pub = self.create_publisher(BEStatus, Topics._ONBOARD_STATUS_TOPIC, 100) + self._mirror_pub = self.create_publisher(ContainerStructure, Topics._MIRROR_STRUCTURE_TOPIC, 100) + self._heartbeat_pub = self.create_publisher(Int32, Topics._LAUNCHER_HEARTBEAT_TOPIC, 2) self._behavior_lib = BehaviorLibrary(self) @@ -94,6 +94,18 @@ def _status_callback(self, msg): self.get_logger().info(f"BE status code={msg.code} received ") def _request_callback(self, msg): + """Process request in separate thread to avoid blocking callbacks.""" + if not self._ready_event.is_set(): + Logger.logerr("Behavior engine is not ready - cannot process start request!") + else: + # self._ready_event.clear() # require a new ready signal after publishing + # thread = threading.Thread(target=self._process_request, args=[msg]) + # thread.daemon = True + # thread.start() + # Not waiting in process request, so safe to not block callback + self._process_request(msg) + + def _process_request(self, msg): self.get_logger().info(f"Got message from request behavior for {msg.behavior_name}") be_key, behavior = self._behavior_lib.find_behavior(msg.behavior_name) if be_key is None: @@ -132,19 +144,20 @@ def _request_callback(self, msg): be_selection.arg_keys = msg.arg_keys be_selection.arg_values = msg.arg_values - # wait until Behavior Engine status is BEStatus.READY - self.get_logger().info("Wait for Behavior Engine ...") - self._ready_event.wait() - self.get_logger().info(" BE is ready!") - + container_map = StateMap() be_structure = ContainerStructure() be_structure.containers = msg.structure + for container in be_structure.containers: + # self.get_logger().info(f"BELauncher: request_callback: adding container {container.path} ...") + container_map.add_state(container.path, container) + # self.get_logger().info(f"BELauncher: request_callback: {msg.structure}") try: be_filepath_new = self._behavior_lib.get_sourcecode_filepath(be_key) except Exception: # pylint: disable=W0703 self.get_logger().error("Could not find behavior package '%s'" % (behavior["package"])) self.get_logger().info("Have you built and updated your setup after creating the behavior?") + self._status_pub.publish(BEStatus(stamp=self.get_clock().now().to_msg(), code=BEStatus.ERROR)) return with open(be_filepath_new, "r") as f: @@ -156,6 +169,7 @@ def _request_callback(self, msg): be_selection.behavior_id = zlib.adler32(be_content_new.encode()) & 0x7fffffff if msg.autonomy_level != 255: be_structure.behavior_id = be_selection.behavior_id + # self.get_logger().info(f"BELauncher: request_callback publish structure : {be_structure}") self._mirror_pub.publish(be_structure) self._ready_event.clear() # require a new ready signal after publishing self._pub.publish(be_selection) @@ -178,21 +192,22 @@ def _request_callback(self, msg): be_structure.behavior_id = be_selection.behavior_id self._mirror_pub.publish(be_structure) - self._ready_event.clear() # require a new ready signal after publishing + self._ready_event.clear() # Force a new ready message before processing self._pub.publish(be_selection) self.get_logger().info(f"New behavior key={be_selection.behavior_key} published " f"with checksum id = {be_selection.behavior_id}- start!") def _version_callback(self, msg): - vui = self._parse_version(msg.data) - vex = self._parse_version(BehaviorLauncher.MIN_VERSION) + vui = BehaviorLauncher._parse_version(msg.data) + vex = BehaviorLauncher._parse_version(MIN_UI_VERSION) if vui < vex: - self.get_logger().warn('FlexBE App needs to be updated!\n' - f'Require at least version {BehaviorLauncher.MIN_VERSION}, ' - f' but you have {msg.data}\n' - 'Please update the flexbe_app software.') + Logger.logwarn('FlexBE App needs to be updated!\n' + f'Behavior launcher requires at least version {MIN_UI_VERSION}, ' + f' but you have {msg.data}\n' + 'Please update the flexbe_app software.') - def _parse_version(self, v): + @staticmethod + def _parse_version(v): result = 0 offset = 1 for n in reversed(v.split('.')): @@ -243,7 +258,7 @@ def behavior_launcher_main(node_args=None): signal_handler_options=rclpy.signals.SignalHandlerOptions.NO) # We will handle shutdown launcher = BehaviorLauncher() - executor = rclpy.executors.MultiThreadedExecutor(num_threads=2) + executor = rclpy.executors.SingleThreadedExecutor() executor.add_node(launcher) if behavior != "":