diff --git a/.coveragerc b/.coveragerc index c7ad07bca..443874bee 100644 --- a/.coveragerc +++ b/.coveragerc @@ -17,4 +17,4 @@ exclude_lines = raise NotImplementedError @abstract if __name__ == .__main__.: - log = logging.getLogger(__name__) + logging.getLogger(__name__) diff --git a/BciPyReport.pdf b/BciPyReport.pdf deleted file mode 100644 index c90cd24de..000000000 Binary files a/BciPyReport.pdf and /dev/null differ diff --git a/CHANGELOG.md b/CHANGELOG.md index 56e81e89b..49e2db25f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,12 +1,14 @@ # 2.0.0-rc.4 -Our last release candidate before the official 2.0 release! +Our final release candidate before the official 2.0 release! ## Contributions - Multimodal Acquisition and Querying - Support for multiple devices in online querying #286 - Support for trigger handling relative to a given device #293 +- Session Orchestrator + - New task protocol for orchestrating tasks in a session. This refactors several Task and Cli functionality #339 - Model - Offline analysis to support multimodal fusion. Initial release of GazeModel, GazeReshaper, and Gaze Visualization #294 - Stimuli @@ -18,14 +20,25 @@ Our last release candidate before the official 2.0 release! - Offset Support - Add support for determining offsets between timing verification Tasks (Ex. RSVPTimingVerificationCalibration) and RawData with a photodiode trigger column. This is useful for setting up new systems and preventing errors before an experiment begins. #TODO - Parameters - - Add a Range type parameter #285 + - Add a Range type parameter #285 Add editable fields #340 Update parameters.json to seperate relevant parameters by task - Housekeeping - Add mypy typing to the codebase #301 - Change default log level to INFO to prevent too many messages in the experiment logs #288 - Upgrade requirements for m1/2 chips #299/#300 - Fix GitHub actions build issues with macOS - Fix occasionally failing test in `test_stimuli` #326 - +- GUI Refactor + - Create new `BCIUI` class for simpler more straightforward UI creation. + - Create dedicated external stylesheet for global styling + - Rewrite Experiment Registry to use new GUI code + - Create intertask action UI +- Task Return Object + - Create `TaskData` dataclass to be returned from tasks + - updates task `execute` methods to return an instance of `TaskData` + - Allows for optional storage of a save path and task dictionary in `TaskData` +-Experiment Refactor + - Refactors the Experiment Field Collection GUI to be an action + - Allows task protocol to be defined in the orchestrator # 2.0.0-rc.3 diff --git a/README.md b/README.md index d17f471cc..da8abf015 100644 --- a/README.md +++ b/README.md @@ -62,22 +62,39 @@ Alternately, if [Make](http://www.mingw.org/) is installed, you may run the foll make dev-install ``` -#### Usage Locally - -Two ways to get started using BciPy for data collection: - 1. Run `python bcipy/gui/BCInterface.py` in your command prompt or terminal from from base BciPy directory. This will execute the main BCI GUI. You may also use the command `make bci-gui`. - 2. Invoke the experiment directly using command line utility `bcipy`. +#### Client Usage + Invoke an experiment protocol or task directly using command line utility `bcipy`. - You can pass it attributes with flags, if desired. - Ex. `bcipy --user "bci_user" --task "RSVP Calibration"` + Running with a User ID and Task: `bcipy --user "bci_user" --task "RSVP Calibration"` + Running with a User ID and Tasks with a registered Protocol: `bcipy --user "bci_user" --experiment "default"` + Running with fake data: `bcipy --fake` + Running without visualizations: `bcipy --noviz` + Running with alerts after each Task execution: `bcipy --alert` + Running with custom parameters: `bcipy --parameters "path/to/valid/parameters.json"` + - Use the help flag to see other available input options: `bcipy --help` -##### Example usage as a package +##### Example Usage as a Package ```python from bcipy.helpers import system_utils system_utils.get_system_info() ``` +#### Example Usage through the GUI + +Run the following command in your terminal to start the BciPy GUI: +```sh +python bcipy/gui/BCInterface.py +``` + +Alternately, if Make is installed, you may run the follow command to start the GUI from the BciPy root directory: + +```sh +make bci-gui +``` + + #### Simulator Usage The simulator can be run using the command line utility `bcipy-sim`. @@ -101,6 +118,8 @@ Run `bcipy-sim --help` for documentation or see the README in the simulator modu ***Session***: Data collected for a task. Comprised of metadata about the task and a list of Series. +***Protocol***: A collection of tasks and actions to be executed in a session. This is defined as within experiments and can be registered using the BciPy GUI. + ***Task***: An experimental design with stimuli, trials, inquiries and series for use in BCI. For instance, "RSVP Calibration" is a task. ***Mode***: Common design elements between task types. For instance, Calibration and Free Spelling are modes. @@ -131,6 +150,8 @@ This a list of the major modules and their functionality. Each module will conta ## Paradigms ------------ +See `bcipy/task/README.md` for more information on all supported paradigms and modes. The following are the supported and validated paradigms: + > RSVPKeyboard @@ -166,7 +187,7 @@ For example, you may run the main BciPy demo by: `python demo/bci_main_demo.py` -This demo will load in parameters and execute a demo task defined in the file. There are demo files for all modules listed above except helpers and utils. Run them as a python script! +This demo will load in parameters and execute a demo task defined in the file. There are demo files contained in most modules, excepting gui, signal and parameters. Run them as a python script! ## Offset Determination and Correction @@ -293,14 +314,14 @@ If you want to be added to the development team slack or have additional questio We follow and will enforce the contributor's covenant to foster a safe and inclusive environment for this open source software, please reference this link for more information: https://www.contributor-covenant.org/ Other guidelines: -- All features require tests and a demo. +- All modules require tests and a demo. - All tests must pass to merge, even if they are seemingly unrelated to your work. - Use Spaces, not Tabs. - Use informative names for functions and classes. - Document the input and output of your functions / classes in the code. eg in-line commenting and typing. - Do not push IDE or other local configuration files. - All new modules or major functionality should be documented outside of the code with a README.md. - See README.md in repo or go to this site for inspiration: https://github.com/matiassingers/awesome-readme. Always use a Markdown interpreter before pushing. There are many free online or your IDE may come with one. + See README.md in repo or go to this site for inspiration: https://github.com/matiassingers/awesome-readme. Always use a Markdown interpreter before pushing. See this resource for examples: http://docs.python-guide.org/en/latest/writing/style/ diff --git a/bcipy/acquisition/datastream/generator.py b/bcipy/acquisition/datastream/generator.py index f96cf85eb..8c6798bb5 100644 --- a/bcipy/acquisition/datastream/generator.py +++ b/bcipy/acquisition/datastream/generator.py @@ -1,14 +1,11 @@ """Functions for generating mock data to be used for testing/development.""" -import logging from typing import Optional, Generator, Callable from past.builtins import range from bcipy.config import DEFAULT_ENCODING from bcipy.signal.generator.generator import gen_random_data -log = logging.getLogger(__name__) - def advance_to_row(filehandle, rownum): """Utility function to advance a file cursor to the given row.""" diff --git a/bcipy/acquisition/datastream/lsl_server.py b/bcipy/acquisition/datastream/lsl_server.py index dc064a708..0f8f82aa3 100644 --- a/bcipy/acquisition/datastream/lsl_server.py +++ b/bcipy/acquisition/datastream/lsl_server.py @@ -13,10 +13,9 @@ from bcipy.acquisition.datastream.producer import Producer from bcipy.acquisition.devices import DeviceSpec from bcipy.acquisition.util import StoppableThread -from bcipy.config import DEFAULT_ENCODING, MARKER_STREAM_NAME - -log = logging.getLogger(__name__) +from bcipy.config import DEFAULT_ENCODING, MARKER_STREAM_NAME, SESSION_LOG_FILENAME +log = logging.getLogger(SESSION_LOG_FILENAME) # pylint: disable=too-many-arguments diff --git a/bcipy/acquisition/datastream/mock/eye_tracker_server.py b/bcipy/acquisition/datastream/mock/eye_tracker_server.py index e0a5e4d1d..9cc3c3e73 100644 --- a/bcipy/acquisition/datastream/mock/eye_tracker_server.py +++ b/bcipy/acquisition/datastream/mock/eye_tracker_server.py @@ -8,8 +8,9 @@ from bcipy.acquisition.datastream.lsl_server import LslDataServer from bcipy.acquisition.devices import DeviceSpec +from bcipy.config import SESSION_LOG_FILENAME -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) def eye_tracker_device() -> DeviceSpec: diff --git a/bcipy/acquisition/datastream/mock/switch.py b/bcipy/acquisition/datastream/mock/switch.py index 38145f5b2..824195171 100644 --- a/bcipy/acquisition/datastream/mock/switch.py +++ b/bcipy/acquisition/datastream/mock/switch.py @@ -6,8 +6,9 @@ from bcipy.acquisition.devices import DeviceSpec, IRREGULAR_RATE from bcipy.gui.main import BCIGui, app +from bcipy.config import SESSION_LOG_FILENAME -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) def switch_device() -> DeviceSpec: @@ -41,7 +42,7 @@ def quit(self): self.outlet = None -class SwitchGui(BCIGui): +class SwitchGui(BCIGui): # pragma: no cover """GUI to emulate a switch.""" def __init__(self, switch: Switch, *args, **kwargs): @@ -70,7 +71,7 @@ def build_text(self) -> None: font_size=16) -def main(switch: Switch): +def main(switch: Switch): # pragma: no cover """Creates a PyQt5 GUI with a single button in the middle. Performs the switch action when clicked.""" gui = app(sys.argv) diff --git a/bcipy/acquisition/datastream/producer.py b/bcipy/acquisition/datastream/producer.py index 58633395c..d62ba60d3 100644 --- a/bcipy/acquisition/datastream/producer.py +++ b/bcipy/acquisition/datastream/producer.py @@ -8,8 +8,9 @@ import time from bcipy.acquisition.datastream.generator import random_data_generator +from bcipy.config import SESSION_LOG_FILENAME -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) class Producer(threading.Thread): diff --git a/bcipy/acquisition/demo/demo_lsl_acq_client.py b/bcipy/acquisition/demo/demo_lsl_acq_client.py index b1c0eacbf..3679e94c4 100644 --- a/bcipy/acquisition/demo/demo_lsl_acq_client.py +++ b/bcipy/acquisition/demo/demo_lsl_acq_client.py @@ -1,7 +1,6 @@ """Demo for the LslAcquisitionClient""" import time - from bcipy.acquisition import LslAcquisitionClient diff --git a/bcipy/acquisition/demo/demo_lsl_server.py b/bcipy/acquisition/demo/demo_lsl_server.py index d654ebf21..7ef7bf9c4 100644 --- a/bcipy/acquisition/demo/demo_lsl_server.py +++ b/bcipy/acquisition/demo/demo_lsl_server.py @@ -29,7 +29,7 @@ def main(): while True: time.sleep(1) except KeyboardInterrupt: - print("Keyboard Interrupt") + log.info("Keyboard Interrupt") server.stop() diff --git a/bcipy/acquisition/devices.py b/bcipy/acquisition/devices.py index 115b9c00c..74b152e58 100644 --- a/bcipy/acquisition/devices.py +++ b/bcipy/acquisition/devices.py @@ -6,7 +6,8 @@ from pathlib import Path from typing import Dict, List, NamedTuple, Optional, Union -from bcipy.config import DEFAULT_ENCODING, DEVICE_SPEC_PATH +from bcipy.config import DEFAULT_ENCODING, DEVICE_SPEC_PATH, SESSION_LOG_FILENAME + IRREGULAR_RATE: int = 0 DEFAULT_CONFIG = DEVICE_SPEC_PATH @@ -18,7 +19,7 @@ DEFAULT_DEVICE_TYPE = 'EEG' DEFAULT_STATIC_OFFSET = 0.1 -log = logging.getLogger(__name__) +logger = logging.getLogger(SESSION_LOG_FILENAME) class ChannelSpec(NamedTuple): @@ -182,7 +183,7 @@ def _validate_excluded_channels(self): """Warn if excluded channels are not in the list of channels""" for channel in self.excluded_from_analysis: if channel not in self.channels: - log.warning( + logger.warning( f"Excluded channel {channel} not found in spec for {self.name}" ) @@ -247,6 +248,7 @@ def preconfigured_device(name: str, strict: bool = True) -> DeviceSpec: "\n" "You may register new devices using the device module `register` function or in bulk" " using `load`.") + logger.error(msg) raise ValueError(msg) return device diff --git a/bcipy/acquisition/marker_writer.py b/bcipy/acquisition/marker_writer.py index b7c36d1ab..6d4172654 100644 --- a/bcipy/acquisition/marker_writer.py +++ b/bcipy/acquisition/marker_writer.py @@ -3,8 +3,9 @@ from typing import Any import pylsl +from bcipy.config import SESSION_LOG_FILENAME -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) class MarkerWriter(): diff --git a/bcipy/acquisition/multimodal.py b/bcipy/acquisition/multimodal.py index cf13a5613..f39d0c266 100644 --- a/bcipy/acquisition/multimodal.py +++ b/bcipy/acquisition/multimodal.py @@ -9,8 +9,9 @@ from bcipy.acquisition.protocols.lsl.lsl_client import LslAcquisitionClient from bcipy.acquisition.record import Record from bcipy.helpers.system_utils import AutoNumberEnum +from bcipy.config import SESSION_LOG_FILENAME -log = logging.getLogger(__name__) +logger = logging.getLogger(SESSION_LOG_FILENAME) class ContentType(AutoNumberEnum): @@ -111,11 +112,12 @@ def get_client( def start_acquisition(self): """Start acquiring data for all clients""" for client in self.clients: - log.info(f"Connecting to {client.device_spec.name}...") + logger.info(f"Connecting to {client.device_spec.name}...") client.start_acquisition() def stop_acquisition(self): """Stop acquiring data for all clients""" + logger.info("Stopping acquisition...") for client in self.clients: client.stop_acquisition() @@ -150,12 +152,13 @@ def get_data_by_device( adjusted_start = start + client.device_spec.static_offset if client.device_spec.sample_rate > 0: count = round(seconds * client.device_spec.sample_rate) - log.info(f'Need {count} records for processing {name} data') + logger.info(f'Need {count} records for processing {name} data') output[content_type] = client.get_data(start=adjusted_start, limit=count) data_count = len(output[content_type]) if strict and data_count < count: msg = f'Needed {count} {name} records but received {data_count}' + logger.error(msg) raise InsufficientDataException(msg) else: # Markers have an IRREGULAR_RATE. @@ -174,4 +177,6 @@ def __getattr__(self, name: str) -> Any: client = self.default_client if client: return client.__getattribute__(name) + + logger.error(f"Missing attribute: {name}") raise AttributeError(f"Missing attribute: {name}") diff --git a/bcipy/acquisition/protocols/lsl/lsl_client.py b/bcipy/acquisition/protocols/lsl/lsl_client.py index 86d0dbe1a..754a4d119 100644 --- a/bcipy/acquisition/protocols/lsl/lsl_client.py +++ b/bcipy/acquisition/protocols/lsl/lsl_client.py @@ -13,14 +13,14 @@ from bcipy.acquisition.protocols.lsl.lsl_connector import check_device from bcipy.acquisition.protocols.lsl.lsl_recorder import LslRecordingThread from bcipy.acquisition.record import Record -from bcipy.config import MAX_PAUSE_SECONDS +from bcipy.config import MAX_PAUSE_SECONDS, SESSION_LOG_FILENAME from bcipy.gui.viewer.ring_buffer import RingBuffer from bcipy.helpers.clock import Clock -log = logging.getLogger(__name__) - LSL_TIMEOUT = 5.0 # seconds +logger = logging.getLogger(SESSION_LOG_FILENAME) + def time_range(stamps: List[float], precision: int = 3, @@ -112,8 +112,8 @@ def start_acquisition(self) -> bool: stream_info, max_buflen=MAX_PAUSE_SECONDS, max_chunklen=1) - log.info("Acquiring data from data stream:") - log.info(self.inlet.info().as_xml()) + logger.info("Acquiring data from data stream:") + logger.info(self.inlet.info().as_xml()) if self.device_spec: check_device(self.device_spec, self.inlet.info()) @@ -128,10 +128,10 @@ def start_acquisition(self) -> bool: device_spec=self.device_spec, queue=msg_queue) self.recorder.start() - log.info("Waiting for first sample from lsl_recorder") + logger.info("Waiting for first sample from lsl_recorder") self._first_sample_time = msg_queue.get(block=True, timeout=LSL_TIMEOUT) - log.info(f"First sample time: {self.first_sample_time}") + logger.info(f"First sample time: {self.first_sample_time}") self.inlet.open_stream(timeout=LSL_TIMEOUT) if self.max_buffer_len and self.max_buffer_len > 0: @@ -142,16 +142,16 @@ def start_acquisition(self) -> bool: def stop_acquisition(self) -> None: """Disconnect from the data source.""" - log.info(f"Stopping Acquisition from {self.device_spec.name} ...") + logger.info(f"Stopping Acquisition from {self.device_spec.name} ...") if self.recorder: - log.info(f"Closing {self.device_spec.name} data recorder") + logger.info(f"Closing {self.device_spec.name} data recorder") self.recorder.stop() self.recorder.join() if self.inlet: - log.info("Closing LSL connection") + logger.info("Closing LSL connection") self.inlet.close_stream() self.inlet = None - log.info("Inlet closed") + logger.info("Inlet closed") self.buffer = None @@ -202,16 +202,16 @@ def get_data(self, ------- List of Records """ - log.info(request_desc(start, end, limit)) + logger.info(request_desc(start, end, limit)) data = self.get_latest_data() if not data: - log.info('No records available') + logger.info('No records available') return [] data_start = data[0].timestamp data_end = data[-1].timestamp - log.info(f'Available data: {self._data_stats(data)}') + logger.info(f'Available data: {self._data_stats(data)}') if start is None: start = data_start @@ -224,7 +224,7 @@ def get_data(self, data_slice = [ record for record in data if start <= record.timestamp <= end ][0:limit] - log.info(f"Filtered records: {self._data_stats(data_slice)}") + logger.info(f"Filtered records: {self._data_stats(data_slice)}") return data_slice @@ -247,12 +247,12 @@ def _pull_chunk(self) -> int: """Pull a chunk of samples from LSL and record in the buffer. Returns the count of samples pulled. """ - log.debug(f"\tPulling chunk (max_samples: {self.max_samples})") + logger.debug(f"\tPulling chunk (max_samples: {self.max_samples})") # A timeout of 0.0 gets currently available samples without blocking. samples, timestamps = self.inlet.pull_chunk( timeout=0.0, max_samples=self.max_samples) count = len(samples) - log.debug(f"\t-> received {count} samples: {time_range(timestamps)}") + logger.debug(f"\t-> received {count} samples: {time_range(timestamps)}") for sample, stamp in zip(samples, timestamps): self.buffer.append(Record(sample, stamp)) return count @@ -361,7 +361,7 @@ def offset(self, first_stim_time: float) -> float: return 0.0 assert self.first_sample_time, "Acquisition was not started." offset_from_stim = first_stim_time - self.first_sample_time - log.info(f"Acquisition offset: {offset_from_stim}") + logger.info(f"Acquisition offset: {offset_from_stim}") return offset_from_stim def cleanup(self): @@ -371,7 +371,7 @@ def cleanup(self): def discover_device_spec(content_type: str) -> DeviceSpec: """Finds the first LSL stream with the given content type and creates a device spec from the stream's metadata.""" - log.info(f"Waiting for {content_type} data to be streamed over LSL.") + logger.info(f"Waiting for {content_type} data to be streamed over LSL.") streams = resolve_byprop('type', content_type, timeout=LSL_TIMEOUT) if not streams: raise Exception( diff --git a/bcipy/acquisition/protocols/lsl/lsl_connector.py b/bcipy/acquisition/protocols/lsl/lsl_connector.py index 629e90f6f..31363db38 100644 --- a/bcipy/acquisition/protocols/lsl/lsl_connector.py +++ b/bcipy/acquisition/protocols/lsl/lsl_connector.py @@ -7,8 +7,9 @@ import pylsl from bcipy.acquisition.devices import DeviceSpec +from bcipy.config import SESSION_LOG_FILENAME -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) LSL_TIMESTAMP = 'LSL_timestamp' LSL_TIMEOUT_SECONDS = 5.0 diff --git a/bcipy/acquisition/protocols/lsl/lsl_recorder.py b/bcipy/acquisition/protocols/lsl/lsl_recorder.py index 2846d0ee1..75ddee170 100644 --- a/bcipy/acquisition/protocols/lsl/lsl_recorder.py +++ b/bcipy/acquisition/protocols/lsl/lsl_recorder.py @@ -13,9 +13,10 @@ from bcipy.acquisition.protocols.lsl.lsl_connector import (channel_names, check_device) from bcipy.acquisition.util import StoppableProcess +from bcipy.config import SESSION_LOG_FILENAME from bcipy.helpers.raw_data import RawDataWriter -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) class LslRecorder: @@ -144,9 +145,9 @@ def _init_data_writer(self, stream_info: StreamInfo) -> None: def _cleanup(self) -> None: """Performs cleanup tasks.""" - assert self.writer, "Writer not initialized" - self.writer.__exit__() - self.writer = None + if self.writer: + self.writer.__exit__() + self.writer = None def _write_chunk(self, data: List, timestamps: List) -> None: """Persists the data resulting from pulling a chunk from the inlet. diff --git a/bcipy/acquisition/tests/protocols/lsl/test_lsl_recorder.py b/bcipy/acquisition/tests/protocols/lsl/test_lsl_recorder.py index 2db91b78a..e0649e3d2 100644 --- a/bcipy/acquisition/tests/protocols/lsl/test_lsl_recorder.py +++ b/bcipy/acquisition/tests/protocols/lsl/test_lsl_recorder.py @@ -5,6 +5,7 @@ from pathlib import Path import pytest +import logging from bcipy.acquisition.datastream.lsl_server import LslDataServer from bcipy.acquisition.datastream.mock.eye_tracker_server import \ @@ -13,8 +14,10 @@ from bcipy.acquisition.protocols.lsl.lsl_recorder import LslRecorder from bcipy.helpers.raw_data import TIMESTAMP_COLUMN, load +log = logging.getLogger(__name__) + DEVICE_NAME = 'DSI-24' -DEVICE = preconfigured_device(DEVICE_NAME) +DEVICE = preconfigured_device(DEVICE_NAME, log) @pytest.mark.slow diff --git a/bcipy/config.py b/bcipy/config.py index f49c77cc9..0bbc4b562 100644 --- a/bcipy/config.py +++ b/bcipy/config.py @@ -13,15 +13,22 @@ # experiment configuration DEFAULT_EXPERIMENT_ID = 'default' +DEFAULT_FRAME_RATE = 60 +CUSTOM_TASK_EXPERIMENT_ID = "CustomTaskExecution" EXPERIMENT_FILENAME = 'experiments.json' FIELD_FILENAME = 'fields.json' EXPERIMENT_DATA_FILENAME = 'experiment_data.json' +MULTIPHRASE_FILENAME = 'phrases.json' +PROTOCOL_FILENAME = 'protocol.json' BCIPY_ROOT = Path(__file__).resolve().parent ROOT = BCIPY_ROOT.parent DEFAULT_EXPERIMENT_PATH = f'{BCIPY_ROOT}/parameters/experiment' DEFAULT_FIELD_PATH = f'{BCIPY_ROOT}/parameters/field' +DEFAULT_USER_ID = 'test_user' +TASK_SEPERATOR = '->' DEFAULT_PARAMETER_FILENAME = 'parameters.json' +DEFAULT_DEVICES_PATH = f"{BCIPY_ROOT}/parameters" DEFAULT_PARAMETERS_PATH = f'{BCIPY_ROOT}/parameters/{DEFAULT_PARAMETER_FILENAME}' DEFAULT_DEVICE_SPEC_FILENAME = 'devices.json' DEVICE_SPEC_PATH = f'{BCIPY_ROOT}/parameters/{DEFAULT_DEVICE_SPEC_FILENAME}' @@ -49,7 +56,8 @@ TRIGGER_FILENAME = 'triggers.txt' SESSION_DATA_FILENAME = 'session.json' SESSION_SUMMARY_FILENAME = 'session.xlsx' -LOG_FILENAME = 'bcipy_system_log.txt' +SESSION_LOG_FILENAME = 'session_log.txt' +PROTOCOL_LOG_FILENAME = 'protocol_log.txt' STIMULI_POSITIONS_FILENAME = 'stimuli_positions.json' # misc configuration diff --git a/bcipy/demo/bci_main_demo.py b/bcipy/demo/bci_main_demo.py index b2ac274f3..4e3c0adb3 100644 --- a/bcipy/demo/bci_main_demo.py +++ b/bcipy/demo/bci_main_demo.py @@ -1,26 +1,32 @@ -# This is a demo of the main bci system. It will run the task defined here -# using the parameters file passed to it. - - -def main(): - from bcipy.main import bcipy_main - from bcipy.task import TaskType - from bcipy.helpers.parameters import DEFAULT_PARAMETERS_PATH - - # Load a parameters file - parameters = DEFAULT_PARAMETERS_PATH - - # Task. Ex. `RSVP Calibration` - task = TaskType.by_value('RSVP Calibration') - - # Experiment. Use the default registered experiment! - experiment = 'default' - - # Define a user - user = 'bci_main_demo_user' - - bcipy_main(parameters, user, task, experiment) - - -if __name__ == "__main__": - main() +from bcipy.main import bci_main +from bcipy.config import DEFAULT_PARAMETERS_PATH + +parameter_location = DEFAULT_PARAMETERS_PATH # Path to a valid BciPy parameters file +user = 'test_demo_user' # User ID +experiment_id = 'default' # This will run two tasks: RSVP Calibration and Matrix Calibration +alert = False # Set to True to alert user when tasks are complete +visualize = False # Set to True to visualize data at the end of a task +fake_data = True # Set to True to use fake acquisition data during the session +# A single task or experiment ID must be provided to run. If a task is provided, the experiment ID will be ignored. +task = None + + +def bcipy_main(): + """BCI Main Demo. + + This function demonstrates how to use the BciPy main function outside of the client interface to execute tasks + or experiments. + """ + bci_main( + parameter_location=parameter_location, + user=user, + experiment_id=experiment_id, + alert=alert, + visualize=visualize, + fake=fake_data, + task=task + ) + + +if __name__ == '__main__': + bcipy_main() diff --git a/bcipy/display/demo/components/demo_layouts.py b/bcipy/display/demo/components/demo_layouts.py index adb2fd071..08521c73f 100644 --- a/bcipy/display/demo/components/demo_layouts.py +++ b/bcipy/display/demo/components/demo_layouts.py @@ -264,11 +264,12 @@ def demo_matrix_positions(win: visual.Window): # norm_layout = centered(parent=win, width_pct=1., height_pct=0.5) task_bar = at_top(parent=win, height=0.25) win_layout = Layout(win) + symbols = alphabet() norm_layout = centered(parent=win, width_pct=0.7, height_pct=0.75) - positions = symbol_positions(norm_layout, rows=5, columns=6) + positions = symbol_positions(norm_layout, symbol_set=symbols, rows=5, columns=6) - for sym, pos in zip(alphabet(), positions): + for sym, pos in zip(symbols, positions): stim = visual.TextStim(win, text=sym, pos=pos, diff --git a/bcipy/display/demo/matrix/demo_calibration_matrix.py b/bcipy/display/demo/matrix/demo_calibration_matrix.py index bb1a1cb02..3e737644d 100644 --- a/bcipy/display/demo/matrix/demo_calibration_matrix.py +++ b/bcipy/display/demo/matrix/demo_calibration_matrix.py @@ -19,7 +19,8 @@ stim_properties = StimuliProperties(stim_font='Arial', stim_pos=(-0.6, 0.4), stim_height=0.17, - is_txt_stim=True) + is_txt_stim=True, + layout='ALP') # Initialize Stimulus window_parameters = { diff --git a/bcipy/display/demo/matrix/demo_copyphrase_matrix.py b/bcipy/display/demo/matrix/demo_copyphrase_matrix.py index 33b27ce2c..f2b7e73c0 100644 --- a/bcipy/display/demo/matrix/demo_copyphrase_matrix.py +++ b/bcipy/display/demo/matrix/demo_copyphrase_matrix.py @@ -26,7 +26,8 @@ stim_properties = StimuliProperties(stim_font=font, stim_pos=(-0.6, 0.4), stim_height=0.17, - is_txt_stim=True) + is_txt_stim=True, + layout='ALP') # Initialize Stimulus window_parameters = { diff --git a/bcipy/display/demo/matrix/demo_matrix_layout.py b/bcipy/display/demo/matrix/demo_matrix_layout.py index 72249743b..62ba0a0af 100644 --- a/bcipy/display/demo/matrix/demo_matrix_layout.py +++ b/bcipy/display/demo/matrix/demo_matrix_layout.py @@ -35,7 +35,8 @@ stim_properties = StimuliProperties(stim_font=font, stim_pos=[], stim_height=0.5, - is_txt_stim=True) + is_txt_stim=True, + layout='QWERTY') info = InformationProperties( info_color=['white'], @@ -54,7 +55,6 @@ columns=7, width_pct=0.7, height_pct=matrix_height_pct) -# sort_order=qwerty_order(is_txt_stim=True)) matrix_display.draw(grid_opacity=matrix_display.full_grid_opacity, grid_color=matrix_display.grid_color, diff --git a/bcipy/display/main.py b/bcipy/display/main.py index 52b227a29..8aa9f9b94 100644 --- a/bcipy/display/main.py +++ b/bcipy/display/main.py @@ -1,7 +1,6 @@ # mypy: disable-error-code="assignment,empty-body" from abc import ABC, abstractmethod from enum import Enum -from logging import Logger from typing import Any, List, NamedTuple, Optional, Tuple, Type, Union from psychopy import visual @@ -22,7 +21,6 @@ class Display(ABC): window: visual.Window = None timing_clock: Clock = None experiment_clock: Clock = None - logger: Logger = None stimuli_inquiry: List[str] = None stimuli_colors: List[str] = None stimuli_timing: List[float] = None @@ -132,7 +130,7 @@ def init_display_window(parameters): class StimuliProperties: """"Stimuli Properties. - An encapsulation of properties relevant to core stimuli presentation in an RSVP or Matrix paradigm. + An encapsulation of properties relevant to core stimuli presentation in a paradigm. """ def __init__( @@ -144,7 +142,8 @@ def __init__( stim_colors: Optional[List[str]] = None, stim_timing: Optional[List[float]] = None, is_txt_stim: bool = True, - prompt_time: Optional[float] = None): + prompt_time: Optional[float] = None, + layout: Optional[str] = None): """Initialize Stimuli Parameters. stim_font(List[str]): Ordered list of colors to apply to information stimuli @@ -156,6 +155,8 @@ def __init__( stim_timing(List[float]): Ordered list of timing to apply to an inquiry using the stimuli is_txt_stim(bool): Whether or not this is a text based stimuli (False implies image based) prompt_time(float): Time to display target prompt for at the beginning of inquiry + layout(str): Layout of stimuli on the screen (ex. 'ALPHABET' or 'QWERTY'). + This is only used for matrix displays. """ self.stim_font = stim_font self.stim_pos = stim_pos @@ -167,6 +168,7 @@ def __init__( self.stim_length = len(self.stim_inquiry) self.sti = None self.prompt_time = prompt_time + self.layout = layout def build_init_stimuli(self, window: visual.Window) -> Union[visual.TextStim, visual.ImageStim]: """"Build Initial Stimuli. diff --git a/bcipy/display/paradigm/matrix/README.md b/bcipy/display/paradigm/matrix/README.md index 22d731125..0fbdf4a89 100644 --- a/bcipy/display/paradigm/matrix/README.md +++ b/bcipy/display/paradigm/matrix/README.md @@ -55,21 +55,6 @@ matrix_display = MatrixDisplay(win, columns=7) ``` -## Sorting stimuli - -A sort order function for the symbols can specified. The sort function includes the ability to provide blank spaces within the grid. - -``` -from bcipy.helpers.symbols import qwerty_order -matrix_display = MatrixDisplay(win, - experiment_clock, - stim_properties, - task_bar=task_bar, - info=info, - rows=3, - columns=10, - sort_order=qwerty_order(is_txt_stim=True)) -``` ## Layout diff --git a/bcipy/display/paradigm/matrix/display.py b/bcipy/display/paradigm/matrix/display.py index 6cbfa0198..b22f081b4 100644 --- a/bcipy/display/paradigm/matrix/display.py +++ b/bcipy/display/paradigm/matrix/display.py @@ -1,20 +1,22 @@ """Display for presenting stimuli in a grid.""" import logging -from typing import Callable, Dict, List, NamedTuple, Optional, Tuple +from typing import Dict, List, NamedTuple, Optional, Tuple from psychopy import core, visual import bcipy.display.components.layout as layout -from bcipy.config import MATRIX_IMAGE_FILENAME +from bcipy.config import MATRIX_IMAGE_FILENAME, SESSION_LOG_FILENAME from bcipy.display import (BCIPY_LOGO_PATH, Display, InformationProperties, StimuliProperties) from bcipy.display.components.task_bar import TaskBar from bcipy.display.main import PreviewParams, init_preview_button_handler from bcipy.display.paradigm.matrix.layout import symbol_positions from bcipy.helpers.stimuli import resize_image -from bcipy.helpers.symbols import alphabet +from bcipy.helpers.symbols import alphabet, qwerty_order, frequency_order from bcipy.helpers.triggers import _calibration_trigger +logger = logging.getLogger(SESSION_LOG_FILENAME) + class SymbolDuration(NamedTuple): """Represents a symbol and its associated duration to display""" @@ -33,7 +35,7 @@ class MatrixDisplay(Display): time_fixation: 2 stim_pos_x: -0.6 stim_pos_y: 0.4 - stim_height: 0.1 + stim_height: 0.17 """ def __init__(self, @@ -47,9 +49,8 @@ def __init__(self, width_pct: float = 0.75, height_pct: float = 0.8, trigger_type: str = 'text', - symbol_set: Optional[List[str]] = None, + symbol_set: Optional[List[str]] = alphabet(), should_prompt_target: bool = True, - sort_order: Optional[Callable] = None, preview_config: Optional[PreviewParams] = None): """Initialize Matrix display parameters and objects. @@ -79,8 +80,6 @@ def __init__(self, """ self.window = window - self.logger = logging.getLogger(__name__) - self.stimuli_inquiry = [] self.stimuli_timing = [] self.stimuli_colors = [] @@ -88,15 +87,16 @@ def __init__(self, assert stimuli.is_txt_stim, "Matrix display is a text only display" - self.symbol_set = symbol_set or alphabet() - self.sort_order = sort_order or self.symbol_set.index + self.symbol_set = symbol_set + self.sort_order = self.build_sort_order(stimuli) # Set position and parameters for grid of alphabet - self.grid_stimuli_height = 0.17 # stimuli.stim_height + self.grid_stimuli_height = stimuli.stim_height display_container = layout.centered(parent=window, width_pct=width_pct, height_pct=height_pct) - self.positions = symbol_positions(display_container, rows, columns) + self.positions = symbol_positions( + display_container, rows, columns, symbol_set) self.grid_color = 'white' self.start_opacity = 0.15 @@ -122,10 +122,23 @@ def __init__(self, preview_config, experiment_clock) if self.preview_enabled else None self.preview_accepted = True - self.logger.info( + logger.info( f"Symbol positions ({display_container.units} units):\n{self.stim_positions}" ) - self.logger.info(f"Matrix center position: {display_container.center}") + logger.info(f"Matrix center position: {display_container.center}") + + def build_sort_order(self, stimuli: StimuliProperties) -> List[str]: + """Build the symbol set for the display.""" + if stimuli.layout == 'ALP': + return self.symbol_set.index + elif stimuli.layout == 'QWERTY': + logger.info('Using QWERTY layout') + return qwerty_order() + elif stimuli.layout == 'FREQ': + logger.info('Using frequency layout') + return frequency_order() + else: + raise ValueError(f'Unknown layout: {stimuli.layout}') @property def stim_positions(self) -> Dict[str, Tuple[float, float]]: @@ -222,11 +235,13 @@ def do_inquiry(self) -> List[Tuple[str, float]]: def build_grid(self) -> Dict[str, visual.TextStim]: """Build the text stimuli to populate the grid.""" + grid = {} for sym in self.symbol_set: pos_index = self.sort_order(sym) pos = self.positions[pos_index] grid[sym] = visual.TextStim(win=self.window, + font=self.stimuli_font, text=sym, color=self.grid_color, opacity=self.start_opacity, @@ -334,7 +349,7 @@ def draw(self, core.wait(duration) def animate_scp(self, fixation: SymbolDuration, - stimuli: List[SymbolDuration]): + stimuli: List[SymbolDuration]) -> None: """Animate the given stimuli using single character presentation. Flashes each stimuli in stimuli_inquiry for their respective flash @@ -405,7 +420,7 @@ def draw_components(self) -> None: for info in self.info_text: info.draw() - def update_task_bar(self, text: str = ''): + def update_task_bar(self, text: str = '') -> None: """Update Task. Update any task related display items not related to the inquiry. Ex. stimuli count 1/200. diff --git a/bcipy/display/paradigm/matrix/layout.py b/bcipy/display/paradigm/matrix/layout.py index 7ecccd814..994642bf4 100644 --- a/bcipy/display/paradigm/matrix/layout.py +++ b/bcipy/display/paradigm/matrix/layout.py @@ -1,16 +1,16 @@ """Functions for calculating matrix layouts""" -import logging from typing import List, Optional, Tuple from bcipy.display.components.layout import (Layout, above, below, left_of, right_of, scaled_height, scaled_width) -logger = logging.getLogger(__name__) - -def symbol_positions(container: Layout, rows: int, - columns: int, max_spacing: Optional[float] = None) -> List[Tuple[float, float]]: +def symbol_positions(container: Layout, + rows: int, + columns: int, + symbol_set: List[str], + max_spacing: Optional[float] = None) -> List[Tuple[float, float]]: """Compute the positions for arranging a number of symbols in a grid layout. @@ -20,6 +20,7 @@ def symbol_positions(container: Layout, rows: int, visual.Window parent, which is used to determine the aspect ratio. rows - number of rows in the grid columns - number of columns in the grid + symbol_set - list of symbols to place in the grid max_spacing - optional max spacing (in layout units) in the height direction; width will be normalized to this value if provided Returns @@ -28,6 +29,8 @@ def symbol_positions(container: Layout, rows: int, """ assert container.parent, "Container must have a parent" assert rows >= 1 and columns >= 1, "There must be at least one row and one column" + assert rows * columns >= len(symbol_set), \ + f"Not enough positions for symbols {len(symbol_set)}. Increase rows or columns." # compute the spacing (in container units) from the container width and height win_size = container.parent.size diff --git a/bcipy/display/paradigm/vep/codes.py b/bcipy/display/paradigm/vep/codes.py index 86e367460..833288ae4 100644 --- a/bcipy/display/paradigm/vep/codes.py +++ b/bcipy/display/paradigm/vep/codes.py @@ -4,7 +4,7 @@ import numpy as np -from bcipy.helpers.exceptions import BciPyCoreException +from bcipy.exceptions import BciPyCoreException log = logging.getLogger(__name__) diff --git a/bcipy/display/paradigm/vep/display.py b/bcipy/display/paradigm/vep/display.py index 0df6032ac..f3fb0afeb 100644 --- a/bcipy/display/paradigm/vep/display.py +++ b/bcipy/display/paradigm/vep/display.py @@ -89,7 +89,8 @@ def __init__(self, display_container = layout.centered(parent=self.window, width_pct=0.7) self.starting_positions = symbol_positions(display_container, rows=3, - columns=10) + columns=10, + symbol_set=self.symbol_set) self.logger.info( f"Symbol starting positions ({str(display_container.units)} units): {self.starting_positions}" ) @@ -112,6 +113,8 @@ def __init__(self, self.task_bar = task_bar self.info_text = info.build_info_text(window) + self.box_stim_height = 0.24 + # build the VEP stimuli self.flicker_rates = flicker_rates self.logger.info(f"VEP flicker rates (hz): {flicker_rates}") @@ -122,7 +125,7 @@ def __init__(self, ] vep_colors = [('white', 'black'), ('red', 'green'), ('blue', 'yellow'), ('orange', 'green')] - vep_stim_size = scaled_size(0.24, self.window_size) + vep_stim_size = scaled_size(self.box_stim_height, self.window_size) self.vep = self.build_vep_stimuli(positions=box_config.positions, codes=codes, colors=cycle(vep_colors), @@ -381,9 +384,6 @@ def draw_static(self) -> None: if self.task_bar: self.task_bar.draw() - for info in self.info_text: - info.draw() - def update_task_bar(self, text: str = ''): """Update any task related display items not related to the inquiry. Ex. stimuli count 1/200. @@ -412,6 +412,7 @@ def _build_inquiry_stimuli(self) -> Dict[str, visual.TextStim]: for sym in self.symbol_set: pos_index = self.sort_order(sym) grid[sym] = visual.TextStim(win=self.window, + font=self.stimuli_font, text=sym, color=self.starting_color, pos=self.starting_positions[pos_index], diff --git a/bcipy/display/tests/paradigm/matrix/test_matrix_display.py b/bcipy/display/tests/paradigm/matrix/test_matrix_display.py index 36bccca82..dd71cc251 100644 --- a/bcipy/display/tests/paradigm/matrix/test_matrix_display.py +++ b/bcipy/display/tests/paradigm/matrix/test_matrix_display.py @@ -12,13 +12,15 @@ # Define some reusable elements to test Matrix Display with LEN_STIM = 10 -TEST_STIM = StimuliProperties(stim_font='Arial', - stim_pos=(-0.6, 0.4), - stim_height=0.1, - stim_inquiry=['A'], - stim_colors=[], - stim_timing=[0.1], - is_txt_stim=True) +TEST_STIM = StimuliProperties( + stim_font='Arial', + stim_pos=(-0.6, 0.4), + stim_height=0.1, + stim_inquiry=['A'], + stim_colors=[], + stim_timing=[0.1], + is_txt_stim=True, + layout='QWERTY') TEST_INFO = InformationProperties( info_color=['White'], @@ -43,7 +45,7 @@ def setUp(self): spec=psychopy.visual.TextStim) when(self.text_stim_mock).setOpacity(...).thenReturn() when(self.text_stim_mock).setColor(...).thenReturn() - when(self.text_stim_mock).draw(...).thenReturn() + when(self.text_stim_mock).draw().thenReturn() # grid item when(psychopy.visual).TextStim( diff --git a/bcipy/display/tests/paradigm/matrix/test_matrix_layout.py b/bcipy/display/tests/paradigm/matrix/test_matrix_layout.py index a6c7c8b4f..bd562dbd2 100644 --- a/bcipy/display/tests/paradigm/matrix/test_matrix_layout.py +++ b/bcipy/display/tests/paradigm/matrix/test_matrix_layout.py @@ -19,12 +19,14 @@ def setUp(self): top=1.0, right=1.0, bottom=-1.0) + self.symbols = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'] def test_regular_grid(self): """Test basic properties of a regular grid""" row_count = 4 col_count = 5 positions = symbol_positions(self.layout, + symbol_set=self.symbols, rows=row_count, columns=col_count) self.assertEqual(len(positions), 20) @@ -37,7 +39,11 @@ def test_regular_grid(self): def test_single_row(self): """Test position calculations for a single row""" - positions = symbol_positions(self.layout, rows=1, columns=10) + positions = symbol_positions( + self.layout, + symbol_set=self.symbols, + rows=1, + columns=10) self.assertEqual(len(positions), 10) y_coord = positions[0][1] @@ -59,7 +65,11 @@ def test_single_row(self): def test_single_column(self): """Test position calculations for a single column""" - positions = symbol_positions(self.layout, rows=10, columns=1) + positions = symbol_positions( + self.layout, + symbol_set=self.symbols, + rows=10, + columns=1) self.assertEqual(len(positions), 10) x_coord = positions[0][0] @@ -80,7 +90,11 @@ def test_single_column(self): def test_spacing(self): """Test grid spacing""" - positions = symbol_positions(self.layout, rows=2, columns=2) + positions = symbol_positions( + self.layout, + symbol_set=['A', 'B', 'C', 'D'], + rows=2, + columns=2) self.assertEqual(len(positions), 4) top_left = positions[0] @@ -107,6 +121,7 @@ def test_max_spacing(self): """Test max_spacing parameter""" max_spacing = 0.1 positions = symbol_positions(self.layout, + symbol_set=['A', 'B', 'C', 'D'], rows=2, columns=2, max_spacing=max_spacing) @@ -120,6 +135,15 @@ def test_max_spacing(self): self.assertEqual(row_spacing, max_spacing) self.assertEqual(column_spacing, max_spacing) + def test_symbol_position_throws_error_with_too_many_symbols(self): + """Test that an error is thrown if there are too many symbols for the grid""" + with self.assertRaises(AssertionError): + symbol_positions(self.layout, + symbol_set=self.symbols, + rows=2, + columns=2, + max_spacing=0.1) + if __name__ == '__main__': unittest.main() diff --git a/bcipy/display/tests/vep/test_codes.py b/bcipy/display/tests/vep/test_codes.py index cd0584dbb..5b1d82b4b 100644 --- a/bcipy/display/tests/vep/test_codes.py +++ b/bcipy/display/tests/vep/test_codes.py @@ -1,7 +1,7 @@ import unittest from bcipy.display.paradigm.vep.codes import round_refresh_rate, ssvep_to_code -from bcipy.helpers.exceptions import BciPyCoreException +from bcipy.exceptions import BciPyCoreException class SSVEPStimuli(unittest.TestCase): diff --git a/bcipy/helpers/exceptions.py b/bcipy/exceptions.py similarity index 100% rename from bcipy/helpers/exceptions.py rename to bcipy/exceptions.py diff --git a/bcipy/feedback/feedback.py b/bcipy/feedback/feedback.py index 9bb264f39..8496371b8 100644 --- a/bcipy/feedback/feedback.py +++ b/bcipy/feedback/feedback.py @@ -1,4 +1,5 @@ import logging +from bcipy.config import SESSION_LOG_FILENAME REGISTERED_FEEDBACK_TYPES = ['sound', 'visual'] @@ -9,7 +10,7 @@ class Feedback: def __init__(self, feedback_type): super(Feedback, self).__init__() self.feedback_type = feedback_type - self.logger = logging.getLogger(__name__) + self.logger = logging.getLogger(SESSION_LOG_FILENAME) def configure(self): raise NotImplementedError() diff --git a/bcipy/gui/BCInterface.py b/bcipy/gui/BCInterface.py index 326e2a1f6..3a4c79134 100644 --- a/bcipy/gui/BCInterface.py +++ b/bcipy/gui/BCInterface.py @@ -1,16 +1,19 @@ import subprocess import sys +import logging from typing import List from bcipy.config import (BCIPY_ROOT, DEFAULT_PARAMETERS_PATH, - STATIC_IMAGES_PATH) + STATIC_IMAGES_PATH, PROTOCOL_LOG_FILENAME) from bcipy.gui.main import (AlertMessageResponse, AlertMessageType, AlertResponse, BCIGui, app, contains_special_characters, contains_whitespaces, invalid_length) from bcipy.helpers.load import (copy_parameters, load_experiments, load_json_parameters, load_users) -from bcipy.task import TaskType +from bcipy.task import TaskRegistry + +logger = logging.getLogger(PROTOCOL_LOG_FILENAME) class BCInterface(BCIGui): @@ -20,7 +23,7 @@ class BCInterface(BCIGui): editing and loading, and offline analysis execution. """ - tasks = TaskType.list() + tasks = TaskRegistry().list() default_text = '...' padding = 20 @@ -29,7 +32,7 @@ class BCInterface(BCIGui): max_length = 25 min_length = 1 timeout = 3 - font = 'Consolas' + font = 'Courier New' def __init__(self, *args, **kwargs): super(BCInterface, self).__init__(*args, **kwargs) @@ -333,17 +336,18 @@ def check_input(self) -> bool: try: if not self.check_user_id(): return False - if self.experiment == BCInterface.default_text: + + if self.experiment == BCInterface.default_text and self.task == BCInterface.default_text: self.throw_alert_message( title='BciPy Alert', - message='Please select or create an Experiment', + message='Please select an Experiment or Task for execution', message_type=AlertMessageType.INFO, message_response=AlertMessageResponse.OTE) return False - if self.task == BCInterface.default_text: + if self.experiment != BCInterface.default_text and self.task != BCInterface.default_text: self.throw_alert_message( title='BciPy Alert', - message='Please select a Task', + message='Please select only an Experiment or Task', message_type=AlertMessageType.INFO, message_response=AlertMessageResponse.OTE) return False @@ -407,13 +411,25 @@ def start_experiment(self) -> None: message_type=AlertMessageType.INFO, message_response=AlertMessageResponse.OTE, message_timeout=self.task_start_timeout) - cmd = ( - f'bcipy -e "{self.experiment}" ' - f'-u "{self.user}" -t "{self.task}" -p "{self.parameter_location}"' - ) + if self.task != BCInterface.default_text: + cmd = ( + f'bcipy ' + f'-u "{self.user}" -t "{self.task}" -p "{self.parameter_location}"' + ) + else: + cmd = ( + f'bcipy ' + f'-u "{self.user}" -e "{self.experiment}" -p "{self.parameter_location}"' + ) if self.alert: cmd += ' -a' - subprocess.Popen(cmd, shell=True) + output = subprocess.run(cmd, shell=True) + if output.returncode != 0: + self.throw_alert_message( + title='BciPy Alert', + message=f'Error: {output.stderr.decode()}', + message_type=AlertMessageType.CRIT, + message_response=AlertMessageResponse.OTE) if self.autoclose: self.close() @@ -424,7 +440,7 @@ def offline_analysis(self) -> None: Run offline analysis as a script in a new process. """ if not self.action_disabled(): - cmd = f'python {BCIPY_ROOT}/signal/model/offline_analysis.py --alert --p "{self.parameter_location}"' + cmd = f'bcipy-train --alert --p "{self.parameter_location}" -v -s' subprocess.Popen(cmd, shell=True) def action_disabled(self) -> bool: diff --git a/bcipy/gui/alert.py b/bcipy/gui/alert.py index d974349d9..91fec4c9e 100644 --- a/bcipy/gui/alert.py +++ b/bcipy/gui/alert.py @@ -15,12 +15,13 @@ def confirm(message: str) -> bool: ------- users selection : True for selecting Ok, False for Cancel. """ - app = QApplication(sys.argv) + app = QApplication(sys.argv).instance() + if not app: + app = QApplication(sys.argv) dialog = alert_message(message, message_type=AlertMessageType.INFO, message_response=AlertMessageResponse.OCE) button = dialog.exec() - result = bool(button == AlertResponse.OK.value) app.quit() return result diff --git a/bcipy/gui/bcipy_stylesheet.css b/bcipy/gui/bcipy_stylesheet.css new file mode 100644 index 000000000..58b53a88b --- /dev/null +++ b/bcipy/gui/bcipy_stylesheet.css @@ -0,0 +1,87 @@ +/* This stylesheet uses the QSS syntax, but is named as a CSS file to take advantage of IDE CSS tooling */ + +QWidget[class="experiment-registry"] { + background-color: black; +} + +QWidget[class="inter-task"] { + background-color: black; +} + +QLabel { + background-color: black; + color: white; +} + +QLabel[class="task-label"] { + background-color: transparent; + color: black; +} + +QPushButton { + background-color: rgb(16, 173, 39); + color: white; + padding: 10px; + border-radius: 10px; +} + +QPushButton[class="remove-button"] { + background-color: rgb(243, 58, 58); +} + +QPushButton[class="remove-button"]:hover { + background-color: rgb(255, 0, 0); +} + +QPushButton[class="small-button"] { + background-color: darkslategray; + color: white; + padding: 5px; + border-radius: 5px; +} + +QPushButton[class="small-button"]:hover { + background-color: darkgreen; +} + + +QPushButton:pressed { + background-color: darkslategrey; +} + +QComboBox { + background-color: white; + color: black; + padding: 4px; + border-radius: 1px; +} + +QComboBox:hover { + background-color: #e6f5ea; + color: black; +} + +QComboBox:on { + background-color: #e6f5ea; + color: black; +} + +QListView { + background-color: white; + color: black; + padding: 5px; +} + +QLineEdit { + background-color: white; + color: black; + padding: 5px; + border: none; + border-radius: 1px; +} + +QScrollArea { + background-color: white; + color: black; + border-radius: 1px; +} \ No newline at end of file diff --git a/bcipy/gui/bciui.py b/bcipy/gui/bciui.py new file mode 100644 index 000000000..b3c301832 --- /dev/null +++ b/bcipy/gui/bciui.py @@ -0,0 +1,238 @@ +from typing import Callable, Type +from PyQt6.QtCore import pyqtSignal +from PyQt6.QtWidgets import ( + QWidget, + QVBoxLayout, + QHBoxLayout, + QPushButton, + QScrollArea, + QLayout, + QSizePolicy, + QMessageBox, + QApplication, +) +from typing import Optional, List +from bcipy.config import BCIPY_ROOT +import sys + + +class BCIUI(QWidget): + contents: QVBoxLayout + center_content_vertically: bool = False + + def __init__(self, title: str = "BCIUI", default_width: int = 500, default_height: int = 600) -> None: + super().__init__() + self.resize(default_width, default_height) + self.setWindowTitle(title) + self.contents = QVBoxLayout() + self.setLayout(self.contents) + + def app(self): + ... + + def apply_stylesheet(self) -> None: + stylesheet_path = f'{BCIPY_ROOT}/gui/bcipy_stylesheet.css' # TODO: move to config + with open(stylesheet_path, "r") as f: + stylesheet = f.read() + self.setStyleSheet(stylesheet) + + def display(self) -> None: + # Push contents to the top of the window + """ + Display the UI window and apply the stylesheet. + """ + self.app() + if not self.center_content_vertically: + self.contents.addStretch() + self.apply_stylesheet() + self.show() + + def show_alert(self, alert_text: str) -> int: + """ + Shows an alert dialog with the specified text. + + PARAMETERS + ---------- + :param: alert_text: string text to display in the alert dialog. + """ + msg = QMessageBox() + msg.setText(alert_text) + msg.setWindowTitle("Alert") + return msg.exec() + + @staticmethod + def centered(widget: QWidget) -> QHBoxLayout: + layout = QHBoxLayout() + layout.addStretch() + layout.addWidget(widget) + layout.addStretch() + return layout + + @staticmethod + def make_list_scroll_area(widget: QWidget) -> QScrollArea: + scroll_area = QScrollArea() + scroll_area.setWidget(widget) + scroll_area.setWidgetResizable(True) + return scroll_area + + @staticmethod + def make_toggle( + on_button: QPushButton, + off_button: QPushButton, + on_action: Optional[Callable] = lambda: None, + off_action: Optional[Callable] = lambda: None, + ) -> None: + """ + Connects two buttons to toggle between eachother and call passed methods + + PARAMETERS + ---------- + :param: on_button: QPushButton to toggle on + :param: off_button: QPushButton to toggle off + :param: on_action: function to call when on_button is clicked + :param: off_action: function to call when off_button is clicked + + """ + off_button.hide() + + def toggle_off(): + on_button.hide() + off_button.show() + off_action() + + def toggle_on(): + on_button.show() + off_button.hide() + on_action() + + on_button.clicked.connect(toggle_off) + off_button.clicked.connect(toggle_on) + + def hide(self) -> None: + """Close the UI window""" + self.hide() + + +class SmallButton(QPushButton): + """A small button with a fixed size""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.setProperty("class", "small-button") + self.setSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed) + + +class DynamicItem(QWidget): + """A widget that can be dynamically added and removed from the ui""" + + on_remove: pyqtSignal = pyqtSignal() + data: dict = {} + + def remove(self) -> None: + """Remove the widget from it's parent DynamicList, removing it from the UI and deleting it""" + self.on_remove.emit() + + +class DynamicList(QWidget): + """A list of QWidgets that can be dynamically updated""" + + widgets: List[QWidget] + + def __init__(self, layout: Optional[QLayout] = None): + super().__init__() + if layout is None: + layout = QVBoxLayout() + self.setLayout(layout) + self.widgets = [] + + def __len__(self): + return len(self.widgets) + + def add_item(self, item: DynamicItem) -> None: + """ + Add a DynamicItem to the list. + + PARAMETERS + ---------- + :param: item: DynamicItem to add to the list. + """ + self.widgets.append(item) + item.on_remove.connect(lambda: self.remove_item(item)) + self.layout().addWidget(item) + + def move_item(self, item: DynamicItem, new_index: int) -> None: + """ + Move a DynamicItem to a new index in the list. + + PARAMETERS + ---------- + :param: item: A reference to the DynamicItem in the list to be moved. + :param: new_index: int new index to move the item to. + """ + if new_index < 0 or new_index >= len(self): + raise IndexError(f"Index out of range for length {len(self)}") + + self.widgets.pop(self.widgets.index(item)) + self.widgets.insert(new_index, item) + self.layout().removeWidget(item) + self.layout().insertWidget(new_index, item) + + def index(self, item: DynamicItem) -> int: + """ + Get the index of a DynamicItem in the list. + + PARAMETERS + ---------- + :param: item: A reference to the DynamicItem in the list to get the index of. + + Returns + ------- + The index of the item in the list. + """ + return self.widgets.index(item) + + def remove_item(self, item: DynamicItem) -> None: + """ + Remove a DynamicItem from the list. + + PARAMETERS + ---------- + :param: item: A reference to the DynamicItem to remove from the list + """ + self.widgets.remove(item) + self.layout().removeWidget(item) + item.deleteLater() + + def clear(self) -> None: + """Remove all items from the list""" + for widget in self.widgets: + self.layout().removeWidget(widget) + widget.deleteLater() + self.widgets = [] + + def list(self): + return [widget.data for widget in self.widgets] + + def list_property(self, prop: str): + """ + Get a list of values for a given property of each DynamicItem's data dictionary. + + PARAMETERS + ---------- + :param: prop: string property name to get the values of. + + Returns + ------- + A list of values for the given property. + """ + return [widget.data[prop] for widget in self.widgets] + + +def run_bciui(ui: Type[BCIUI], *args, **kwargs): + # add app to kwargs + app = QApplication(sys.argv).instance() + if not app: + app = QApplication(sys.argv) + ui_instance = ui(*args, **kwargs) + ui_instance.display() + return app.exec() diff --git a/bcipy/gui/experiments/ExperimentField.py b/bcipy/gui/experiments/ExperimentField.py index 290e6c692..4ec1e51a0 100644 --- a/bcipy/gui/experiments/ExperimentField.py +++ b/bcipy/gui/experiments/ExperimentField.py @@ -1,4 +1,5 @@ """GUI form for collecting experimental field data.""" + # pylint: disable=E0611 import sys @@ -26,8 +27,10 @@ FloatInput, FormInput, IntegerInput, - TextInput + TextInput, ) +from bcipy.config import EXPERIMENT_DATA_FILENAME +from bcipy.helpers.validate import validate_experiment, validate_field_data_written from bcipy.helpers.load import load_experiments, load_fields from bcipy.helpers.save import save_experiment_field_data @@ -38,20 +41,29 @@ class ExperimentFieldCollection(QWidget): Given an experiment with fields to be collected, this UI can be used to collect data in the correct format and require fields which are noted as such in the experiment. """ + field_data: List[tuple] = [] field_inputs: List[FormInput] = [] type_inputs = { - 'int': (IntegerInput, 0), - 'float': (FloatInput, 0.0), - 'bool': (BoolInput, False), - 'filepath': (FileInput, ''), - 'directorypath': (DirectoryInput, ''), + "int": (IntegerInput, 0), + "float": (FloatInput, 0.0), + "bool": (BoolInput, False), + "filepath": (FileInput, ""), + "directorypath": (DirectoryInput, ""), } - require_mark = '*' + require_mark = "*" alert_timeout = 10 save_data = {} - def __init__(self, title: str, width: int, height: int, experiment_name: str, save_path: str, file_name: str): + def __init__( + self, + title: str, + width: int, + height: int, + experiment_name: str, + save_path: str, + file_name: str, + ): super().__init__() self.experiment_name = experiment_name @@ -59,7 +71,7 @@ def __init__(self, title: str, width: int, height: int, experiment_name: str, sa self.save_path = save_path self.file_name = file_name self.help_size = 12 - self.help_color = 'darkgray' + self.help_color = "darkgray" self.width = width self.height = height self.title = title @@ -87,9 +99,13 @@ def build_form(self) -> None: Loop over the field data and create UI field inputs for data collection. """ for field_name, field_type, required, help_text in self.field_data: - self.field_inputs.append(self.field_input(field_name, field_type, help_text, required)) + self.field_inputs.append( + self.field_input(field_name, field_type, help_text, required) + ) - def field_input(self, field_name: str, field_type: str, help_tip: str, required: bool) -> FormInput: + def field_input( + self, field_name: str, field_type: str, help_tip: str, required: bool + ) -> FormInput: """Field Input. Construct a FormInput for the given field based on its python type and other @@ -105,7 +121,8 @@ def field_input(self, field_name: str, field_type: str, help_tip: str, required: value=init_value, help_tip=help_tip, help_size=self.help_size, - help_color=self.help_color) + help_color=self.help_color, + ) def build_assets(self) -> None: """Build Assets. @@ -124,11 +141,12 @@ def check_input(self) -> bool: name = field.label if self.require_mark in field.label and not _input: self.throw_alert_message( - title='BciPy Alert', - message=f'Required field {name.strip(self.require_mark)} must be filled out!', + title="BciPy Alert", + message=f"Required field {name.strip(self.require_mark)} must be filled out!", message_type=AlertMessageType.CRIT, message_response=AlertMessageResponse.OCE, - message_timeout=self.alert_timeout) + message_timeout=self.alert_timeout, + ) return False return True @@ -138,13 +156,18 @@ def build_field_data(self) -> None: Using the fields defined in the experiment, fetch the other attributes of the field. It will be stored in self.field_data as a list of tuples (name, field type, required, help text). """ - for field in self.experiment['fields']: + for field in self.experiment["fields"]: # the field name and requirement for name, required in field.items(): # help text and type field_data = self.fields[name] self.field_data.append( - (name.title(), field_data['type'], self.map_to_bool(required['required']), field_data['help_text']) + ( + name.title(), + field_data["type"], + self.map_to_bool(required["required"]), + field_data["help_text"], + ) ) def map_to_bool(self, string_boolean: str) -> bool: @@ -152,11 +175,11 @@ def map_to_bool(self, string_boolean: str) -> bool: All data is loaded from json ("true"/"false"). This method will return a python boolean (True/False). """ - if string_boolean == 'true': + if string_boolean == "true": return True - elif string_boolean == 'false': + elif string_boolean == "false": return False - raise Exception(f'Unsupported boolean value {string_boolean}') + raise Exception(f"Unsupported boolean value {string_boolean}") def save(self) -> None: if self.check_input(): @@ -172,31 +195,34 @@ def build_save_data(self) -> None: self.save_data[name] = _input except ValueError as e: self.throw_alert_message( - title='Error', - message=f'Error saving data. Invalid value provided. \n {e}', + title="Error", + message=f"Error saving data. Invalid value provided. \n {e}", message_type=AlertMessageType.WARN, message_response=AlertMessageResponse.OCE, - message_timeout=self.alert_timeout + message_timeout=self.alert_timeout, ) def write_save_data(self) -> None: save_experiment_field_data(self.save_data, self.save_path, self.file_name) self.throw_alert_message( - title='Success', + title="Success", message=( - f'Data successfully written to: \n\n{self.save_path}/{self.file_name} \n\n\n' - 'Please wait or close this window to start the task!'), + f"Data successfully written to: \n\n{self.save_path}/{self.file_name} \n\n\n" + "Please wait or close this window to start the task!" + ), message_type=AlertMessageType.INFO, message_response=AlertMessageResponse.OCE, message_timeout=self.alert_timeout, ) - def throw_alert_message(self, - title: str, - message: str, - message_type: AlertMessageType = AlertMessageType.INFO, - message_response: AlertMessageResponse = AlertMessageResponse.OTE, - message_timeout: float = 0) -> MessageBox: + def throw_alert_message( + self, + title: str, + message: str, + message_type: AlertMessageType = AlertMessageType.INFO, + message_response: AlertMessageResponse = AlertMessageResponse.OTE, + message_timeout: float = 0, + ) -> MessageBox: """Throw Alert Message.""" msg = MessageBox() @@ -226,13 +252,23 @@ class MainPanel(QWidget): file_name: name of the file to write with collected field_data """ - def __init__(self, title: str, width: int, height: int, experiment_name: str, save_path: str, file_name: str): + def __init__( + self, + title: str, + width: int, + height: int, + experiment_name: str, + save_path: str, + file_name: str, + ): super().__init__() self.title = title self.width = width self.height = height - self.form = ExperimentFieldCollection(title, width, height, experiment_name, save_path, file_name) + self.form = ExperimentFieldCollection( + title, width, height, experiment_name, save_path, file_name + ) self.initUI() def initUI(self): @@ -241,7 +277,9 @@ def initUI(self): self.form_panel = QScrollArea() self.form_panel.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOn) - self.form_panel.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff) + self.form_panel.setHorizontalScrollBarPolicy( + Qt.ScrollBarPolicy.ScrollBarAlwaysOff + ) self.form_panel.setWidgetResizable(True) self.form_panel.setFixedWidth(self.width) self.form_panel.setWidget(self.form) @@ -255,7 +293,7 @@ def initUI(self): control_box = QHBoxLayout() control_box.addStretch() - save_button = QPushButton('Save') + save_button = QPushButton("Save") save_button.setFixedWidth(80) save_button.clicked.connect(self.save) control_box.addWidget(save_button) @@ -267,56 +305,67 @@ def save(self): self.close() -def start_app() -> None: - """Start Experiment Field Collection.""" - import argparse - from bcipy.config import DEFAULT_EXPERIMENT_ID, EXPERIMENT_DATA_FILENAME - from bcipy.helpers.validate import validate_experiment, validate_field_data_written - - parser = argparse.ArgumentParser() - - # experiment_name - parser.add_argument('-p', '--path', default='.', - help='Path to save collected field data to in json format') - parser.add_argument('-e', '--experiment', default=DEFAULT_EXPERIMENT_ID, - help='Select a valid experiment to run the task for this user') - parser.add_argument('-f', '--filename', default=EXPERIMENT_DATA_FILENAME, - help='Provide a json filename to write the field data to. Ex, experiment_data.json') - parser.add_argument('-v', '--validate', default=False, - help='Whether or not to validate the experiment before proceeding to data collection.') - - args = parser.parse_args() - - experiment_name = args.experiment - validate = args.validate - +def start_experiment_field_collection_gui( + experiment_name: str, + save_path: str, + file_name: str = EXPERIMENT_DATA_FILENAME, + validate: bool = True, +) -> None: if validate: validate_experiment(experiment_name) - print('Experiment valid!') - - save_path = args.path - file_name = args.filename bcipy_gui = app(sys.argv) - - ex = MainPanel( - title='Experiment Field Collection', + _ = MainPanel( + title="Experiment Field Collection", height=250, width=600, experiment_name=experiment_name, save_path=save_path, - file_name=file_name + file_name=file_name, ) + bcipy_gui.exec() + if validate and not validate_field_data_written(save_path, file_name): + raise Exception(f"Field data not written to {save_path}/{file_name}") - if validate: - if validate_field_data_written(save_path, file_name): - print('Field data successfully written!') - else: - raise Exception(f'Field data not written to {save_path}/{file_name}') +def start_app() -> None: + """Start Experiment Field Collection.""" + import argparse + from bcipy.config import DEFAULT_EXPERIMENT_ID, EXPERIMENT_DATA_FILENAME + + parser = argparse.ArgumentParser() + + # experiment_name + parser.add_argument( + "-p", + "--path", + default=".", + help="Path to save collected field data to in json format", + ) + parser.add_argument( + "-e", + "--experiment", + default=DEFAULT_EXPERIMENT_ID, + help="Select a valid experiment to run the task for this user", + ) + parser.add_argument( + "-f", + "--filename", + default=EXPERIMENT_DATA_FILENAME, + help="Provide a json filename to write the field data to. Ex, experiment_data.json", + ) + parser.add_argument( + "-v", + "--validate", + default=False, + help="Whether or not to validate the experiment before proceeding to data collection.", + ) + + args = parser.parse_args() + start_experiment_field_collection_gui(args.experiment, args.path, args.filename, args.validate) sys.exit() -if __name__ == '__main__': +if __name__ == "__main__": start_app() diff --git a/bcipy/gui/experiments/ExperimentRegistry.py b/bcipy/gui/experiments/ExperimentRegistry.py index 365675ad5..b1bd74de5 100644 --- a/bcipy/gui/experiments/ExperimentRegistry.py +++ b/bcipy/gui/experiments/ExperimentRegistry.py @@ -1,468 +1,331 @@ -import sys -import subprocess - -from bcipy.gui.main import BCIGui, app, AlertMessageType, AlertMessageResponse, ScrollableFrame, LineItems - -from bcipy.config import BCIPY_ROOT, DEFAULT_EXPERIMENT_PATH, EXPERIMENT_FILENAME -from bcipy.helpers.load import load_experiments, load_fields +from typing import List, Optional +from PyQt6.QtWidgets import ( + QComboBox, + QVBoxLayout, + QHBoxLayout, + QLabel, + QLineEdit, + QPushButton, + QScrollArea, +) +from bcipy.gui.bciui import BCIUI, DynamicItem, DynamicList, SmallButton, run_bciui +from bcipy.helpers.load import load_fields, load_experiments from bcipy.helpers.save import save_experiment_data +from bcipy.config import ( + DEFAULT_ENCODING, + DEFAULT_EXPERIMENT_PATH, + DEFAULT_FIELD_PATH, + EXPERIMENT_FILENAME, + FIELD_FILENAME, + BCIPY_ROOT, +) +from bcipy.task.registry import TaskRegistry +import subprocess +from bcipy.task.orchestrator.protocol import serialize_protocol +import json -class ExperimentRegistry(BCIGui): - """Experiment Registry. - - User interface for creating new experiments for use in BCInterface.py. - """ - - padding = 100 - btn_height = 50 - default_text = '...' - alert_title = 'Experiment Registry Alert' - alert_timeout = 10 - experiment_fields = [] - - def __init__(self, *args, **kwargs): - super(ExperimentRegistry, self).__init__(*args, **kwargs) - - # Structure of an experiment: - # { name: { fields : {name: '', required: bool, anonymize: bool}, summary: '' } } - self.update_experiment_data() - - # These are set in the build_inputs and represent text inputs from the user - self.name_input = None - self.summary_input = None - self.field_input = None - self.panel = None - self.line_items = None - - # fields is for display of registered fields - self.fields = [] - self.registered_fields = load_fields() - self.name = None - self.summary = None - - # for registered fields - self.build_scroll_area() - - self.show_gui() - self.update_field_list() - - def build_scroll_area(self) -> None: - """Build Scroll Area. +class ExperimentRegistry(BCIUI): - Appends a scrollable area at the bottom of the UI for management of registered fields via LineItems. - """ - line_widget = LineItems([], self.width) - self.panel = ScrollableFrame(200, self.width, background_color='white', widget=line_widget) - self.add_widget(self.panel) + task_registry: TaskRegistry - def refresh_field_panel(self) -> None: - """Refresh Field Panel. + def __init__(self): + super().__init__("Experiment Registry", 600, 700) + self.task_registry = TaskRegistry() + self.setProperty("class", "experiment-registry") - Reconstruct the line items from the registered fields and refresh the scrollable panel of registered fields. + def format_experiment_combobox( + self, + label_text: str, + combobox: QComboBox, + buttons: Optional[List[QPushButton]], + class_name: str = 'default', + ) -> QVBoxLayout: """ - self.build_line_items_from_fields() - self.panel.refresh(self.line_items) + Create a formatted widget for a the experiment comboboxes with optional buttons. - def toggle_required_field(self) -> None: - """Toggle Required Field. + PARAMETERS + ---------- + :param: label_text: string text to display above the combobox. + :param: combobox: the combobox. + :param: buttons: list of buttons to add to right side of the combobox. - *Button Action* - - Using the field_name retrieved from the button (get_id), find the field in self.experiment_fields and toggle - the required field ('true' or 'false'). - """ - field_name = self.window.sender().get_id() - for field in self.experiment_fields: - if field_name in field: - required = field[field_name]['required'] - if required == 'false': - field[field_name]['required'] = 'true' - else: - field[field_name]['required'] = 'false' - self.refresh_field_panel() - - def toggle_anonymize_field(self) -> None: - """Toggle Anonymize Field. - - *Button Action* - - Using the field_name retrieved from the button (get_id), find the field in self.experiment_fields and toggle - the anonymize field ('true' or 'false'). + Returns + ------- + A QVBoxLayout with the label, combobox, and buttons. """ - field_name = self.window.sender().get_id() - for field in self.experiment_fields: - if field_name in field: - anonymize = field[field_name]['anonymize'] - if anonymize == 'false': - field[field_name]['anonymize'] = 'true' - else: - field[field_name]['anonymize'] = 'false' - self.refresh_field_panel() - - def remove_field(self) -> None: - """Remove Field. - - *Button Action* - - Using the field_name retrieved from the button (get_id), find the field in self.experiment_fields and remove it - from the list. + label = QLabel(label_text) + area = QVBoxLayout() + input_area = QHBoxLayout() + input_area.setContentsMargins(15, 0, 0, 15) + area.addWidget(label) + combobox.setProperty("class", class_name) + input_area.addWidget(combobox, 1) + if buttons: + for button in buttons: + input_area.addWidget(button) + area.addLayout(input_area) + return area + + def make_task_entry(self, name: str) -> DynamicItem: """ - field_name = self.window.sender().get_id() - - idx = 0 - remove = None - for field in self.experiment_fields: - if field_name in field: - remove = idx - break - idx += 1 - - self.experiment_fields.pop(remove) - self.refresh_field_panel() + Create a formatted widget for a task entry. - def build_line_items_from_fields(self) -> None: - """Build Line Items From Fields. + PARAMETERS + ---------- + :param: name: string name of the task that will be displayed. - Loop over the registered experiment fields and create LineItems, which can by used to toggle the required - field, anonymization, or remove as a registered experiment field. + Returns + ------- + A DynamicItem widget with the 'task_name' property set as it's data. """ - items = [] - for field in self.experiment_fields: - # experiment fields is a list of dicts, here we loop over the dict to get - # the field_name, anonymization, and requirement - for field_name, required in field.items(): - - # Set the button text and colors, based on the requirement and anonymization - if required['required'] == 'false': - required_button_label = 'Optional' - required_button_color = 'black' - required_button_text_color = 'white' - else: - required_button_label = 'Required' - required_button_color = 'green' - required_button_text_color = 'white' - - if required['anonymize'] == 'false': - anon_button_label = 'Onymous' - anon_button_color = 'black' - anon_button_text_color = 'white' - else: - anon_button_label = 'Anonymous' - anon_button_color = 'green' - anon_button_text_color = 'white' - - # Construct the item to turn into a LineItem, we set the id as field_name to use later via the action - item = { - field_name: { - required_button_label: { - 'action': self.toggle_required_field, - 'color': required_button_color, - 'textColor': required_button_text_color, - 'id': field_name - }, - anon_button_label: { - 'action': self.toggle_anonymize_field, - 'color': anon_button_color, - 'textColor': anon_button_text_color, - 'id': field_name - }, - 'Remove': { - 'action': self.remove_field, - 'color': 'red', - 'textColor': 'white', - 'id': field_name - } - } - } - items.append(item) - - # finally, set the new line items for rendering - self.line_items = LineItems(items, self.width) - - def build_text(self) -> None: - """Build Text. - - Build all static text needed for the UI. - Positions are relative to the height / width of the UI defined in start_app. - """ - text_x = 25 - text_y = 70 - font_size = 18 - self.add_static_textbox( - text='Create BciPy Experiment', - position=[self.width / 2 - self.padding - 50, 0], - size=[300, 100], - background_color='black', - text_color='white', - font_size=22 - ) - self.add_static_textbox( - text='Name', - position=[text_x, text_y], - size=[200, 50], - background_color='black', - text_color='white', - font_size=font_size) - text_y += self.padding - self.add_static_textbox( - text='Summary', - position=[text_x, text_y], - size=[300, 50], - background_color='black', - text_color='white', - font_size=font_size) - text_y += self.padding - self.add_static_textbox( - text='Fields', - position=[text_x, text_y], - size=[300, 50], - background_color='black', - text_color='white', - font_size=font_size) - text_y += self.padding + 45 - self.add_static_textbox( - text='Registered fields *click to toggle required field*', - position=[text_x, text_y], - size=[300, 50], - background_color='black', - text_color='white', - font_size=14) - - def build_inputs(self) -> None: - """Build Inputs. - - Build all text entry inputs for the UI. - """ - input_x = 50 - input_y = 120 - input_size = [280, 25] - self.name_input = self.add_combobox( - position=[input_x, input_y], - size=input_size, - items=[self.default_text], - editable=True, - background_color='white', - text_color='black') - - input_y += self.padding - self.summary_input = self.add_combobox( - position=[input_x, input_y], - size=input_size, - items=[self.default_text], - editable=True, - background_color='white', - text_color='black') - - input_y += self.padding - self.field_input = self.add_combobox( - position=[input_x, input_y], - size=input_size, - items=self.fields, - editable=False, - background_color='white', - text_color='black') - - def build_buttons(self): - """Build Buttons. - - Build all buttons necessary for the UI. Define their action on click using the named argument action. - """ - btn_create_x = self.width - self.padding - 10 - btn_create_y = self.height - self.padding - 200 - size = 150 - self.add_button( - message='Create Experiment', position=[btn_create_x - (size / 2), btn_create_y], - size=[size, self.btn_height], - background_color='green', - action=self.create_experiment, - text_color='white') - - btn_field_x = (self.width / 2) + 150 - btn_field_y = 310 - # create field - self.add_button( - message='+', - position=[btn_field_x, btn_field_y], - size=[40, self.btn_height - 10], - background_color='green', - action=self.create_field, - text_color='white' + layout = QHBoxLayout() + label = QLabel(name) + label.setProperty("class", "task-label") + layout.addWidget(label) + widget = DynamicItem() + + # The indices will have to be updated to reflect the actual index we want to move to + move_up_button = SmallButton(" ▲ ") + move_up_button.clicked.connect( + lambda: self.protocol_contents.move_item( + widget, max(self.protocol_contents.index(widget) - 1, 0) + ) ) - - # add field - self.add_button( - message='Register', - position=[btn_field_x - 75, btn_field_y], - size=[60, self.btn_height - 10], - background_color='grey', - action=self.add_field, - text_color='white' + move_down_button = SmallButton(" ▼ ") + move_down_button.clicked.connect( + lambda: self.protocol_contents.move_item( + widget, + min( + self.protocol_contents.index(widget) + 1, + len(self.protocol_contents) - 1, + ), + ) ) - - def create_experiment(self) -> None: - """Create Experiment. - - After inputing all required fields, verified by check_input, add it to the experiment list and save it. + layout.addWidget(move_up_button) + layout.addWidget(move_down_button) + + remove_button = SmallButton("Remove") + remove_button.setProperty("class", "remove-button") + remove_button.clicked.connect( + lambda: layout.deleteLater() + ) # This may not be needed + remove_button.clicked.connect(lambda: widget.remove()) + layout.addWidget(remove_button) + + widget.data = {"task_name": name} + widget.setLayout(layout) + return widget + + def make_field_entry(self, name: str) -> DynamicItem: """ - if self.check_input(): - self.add_experiment() - self.save_experiments() - self.throw_alert_message( - title=self.alert_title, - message='Experiment saved successfully! Please exit window or create another experiment!', - message_type=AlertMessageType.INFO, - message_response=AlertMessageResponse.OTE, - message_timeout=self.alert_timeout - ) - self.update_experiment_data() + Create a formatted widget for a field entry. - def update_experiment_data(self): - """Update Experiment Data. + PARAMETERS + ---------- + :param: name: string name of the field that will be displayed. - Fetches the experiments and extracts the registered names. + Returns + ------- + A DynamicItem widget with the 'field_name', 'anonymous', and 'optional' properties set as it's data. """ - self.experiments = load_experiments() - self.experiment_names = self.experiments.keys() - - def add_experiment(self) -> None: - """Add Experiment: + layout = QHBoxLayout() + label = QLabel(name) + label.setProperty("class", "task-label") + layout.addWidget(label) + widget = DynamicItem() + + remove_button = SmallButton("Remove") + remove_button.setStyleSheet("background-color: red;") + remove_button.clicked.connect(lambda: layout.deleteLater()) + + anonymous_button = SmallButton("Anonymous") + onymous_button = SmallButton("Onymous") + BCIUI.make_toggle( + anonymous_button, + onymous_button, + on_action=lambda: widget.data.update({"anonymous": True}), + off_action=lambda: widget.data.update({"anonymous": False}), + ) + layout.addWidget(anonymous_button) + layout.addWidget(onymous_button) + anonymous_button.setStyleSheet("background-color: black;") + + optional_button = SmallButton("Optional") + required_button = SmallButton("Required") + BCIUI.make_toggle( + optional_button, + required_button, + on_action=lambda: widget.data.update({"optional": True}), + off_action=lambda: widget.data.update({"optional": False}), + ) + layout.addWidget(optional_button) + layout.addWidget(required_button) + + layout.addWidget(remove_button) + widget.data = {"field_name": name, "anonymous": True, "optional": True} + remove_button.clicked.connect(lambda: widget.remove()) + widget.setLayout(layout) + return widget + + def load_fields(path: str = f"{DEFAULT_FIELD_PATH}/{FIELD_FILENAME}") -> dict: + """Load Fields. + + PARAMETERS + ---------- + :param: path: string path to the fields file. + + Returns + ------- + A dictionary of fields, with the following format: + { + "field_name": { + "help_text": "", + "type": "" + } - Add a new experiment to the dict of experiments. It follows the format: - { name: { fields : {name: '', required: bool, anonymize: bool}, summary: '' } } """ - self.experiments[self.name] = { - 'fields': self.experiment_fields, - 'summary': self.summary + with open(path, "r", encoding=DEFAULT_ENCODING) as json_file: + return json.load(json_file) + + def create_experiment(self): + existing_experiments = load_experiments() + experiment_name = self.experiment_name_input.text() + if not experiment_name: + self.show_alert("Please specify an experiment name") + return + experiment_summary = self.experiment_summary_input.text() + if not experiment_summary: + self.show_alert("Please specify an experiment summary") + return + fields = self.fields_content.list() + + field_list = [ + { + field["field_name"]: { + "anonymize": field["anonymous"], + "required": not field["optional"], + } + } + for field in fields + ] + task_names = self.protocol_contents.list_property("task_name") + task_objects = [self.task_registry.get(task_name) for task_name in task_names] + protocol = serialize_protocol(task_objects) + + existing_experiments[experiment_name] = { + "fields": field_list, + "summary": experiment_summary, + "protocol": protocol, } + save_experiment_data( + existing_experiments, + load_fields(), + DEFAULT_EXPERIMENT_PATH, + EXPERIMENT_FILENAME, + ) + self.show_alert("created experiment") - def save_experiments(self) -> None: - """Save Experiment. - - Save the experiments registered to the correct path as pulled from system_utils. - """ - # add fields to the experiment - save_experiment_data(self.experiments, self.registered_fields, DEFAULT_EXPERIMENT_PATH, EXPERIMENT_FILENAME) - - def create_field(self) -> None: + def create_experiment_field(self) -> None: """Create Field. Launch to FieldRegistry to create a new field for experiments. """ subprocess.call( - f'python {BCIPY_ROOT}/gui/experiments/FieldRegistry.py', - shell=True) + f"python {BCIPY_ROOT}/gui/experiments/FieldRegistry.py", shell=True + ) self.update_field_list() - def add_field(self) -> None: - """Add Field. + def update_field_list(self): + self.field_input.clear() + self.field_input.addItems(load_fields()) + + def app(self): + # Add form fields + self.center_content_vertically = True + header = QLabel("Experiment Registry") + header.setStyleSheet("font-size: 24px") + self.contents.addLayout(BCIUI.centered(header)) + form_area = QVBoxLayout() + form_area.setContentsMargins(30, 0, 30, 0) + self.experiment_name_input = QLineEdit() + experiment_name_box = self.format_experiment_combobox( + "Name", self.experiment_name_input, None + ) + form_area.addLayout(experiment_name_box) - Functionality to add fields to the newly created experiment. It will ensure no duplicates are addded. - """ - # get the current field value and compute a list of field names already added - field = self.field_input.currentText() - registered_fields = [name for field in self.experiment_fields for name in field.keys()] - - # if the field selected is already registered throw an alert to the user - if field in registered_fields: - return self.throw_alert_message( - title=self.alert_title, - message=f'{field} already registered with this experiment!', - message_type=AlertMessageType.INFO, - message_response=AlertMessageResponse.OTE, - message_timeout=self.alert_timeout, + self.experiment_summary_input = QLineEdit() + experiment_summary_box = self.format_experiment_combobox( + "Summary", self.experiment_summary_input, None + ) + form_area.addLayout(experiment_summary_box) + + def add_field(): + if self.field_input.currentText() in self.fields_content.list_property( + "field_name" + ): + self.show_alert("Field already added") + return + self.fields_content.add_item( + self.make_field_entry(self.field_input.currentText()) ) - # else add the field! - self.experiment_fields.append( - { - field: - { - 'required': 'false', - 'anonymize': 'true' - } - } + def add_task(): + self.protocol_contents.add_item( + self.make_task_entry(self.experiment_protocol_input.currentText()) + ) + + self.experiment_protocol_input = QComboBox() + self.experiment_protocol_input.addItems(self.task_registry.list()) + add_task_button = QPushButton("Add") + add_task_button.clicked.connect(add_task) + experiment_protocol_box = self.format_experiment_combobox( + "Protocol", + self.experiment_protocol_input, + [add_task_button], + "protocol", + ) + form_area.addLayout(experiment_protocol_box) + + self.field_input = QComboBox() + self.field_input.addItems(load_fields()) + add_field_button = QPushButton("Add") + new_field_button = QPushButton("New") + new_field_button.clicked.connect(self.create_experiment_field) + form_area.addLayout( + self.format_experiment_combobox( + "Fields", + self.field_input, + [add_field_button, new_field_button], + "fields", + ) ) - self.refresh_field_panel() + self.contents.addLayout(form_area) - def update_field_list(self) -> None: - """Updates the field_input combo box with a list of fields. """ + scroll_area_layout = QHBoxLayout() - self.field_input.clear() - self.field_input.addItem(ExperimentRegistry.default_text) - self.registered_fields = load_fields() - self.fields = [item for item in self.registered_fields] - self.field_input.addItems(self.fields) + self.fields_content = DynamicList() + fields_scroll_area = BCIUI.make_list_scroll_area(self.fields_content) + label = QLabel("Fields") + label.setStyleSheet("color: black;") + scroll_area_layout.addWidget(fields_scroll_area) - def build_assets(self) -> None: - """Build Assets. + protocol_scroll_area = QScrollArea() + self.protocol_contents = DynamicList() + protocol_scroll_area = BCIUI.make_list_scroll_area(self.protocol_contents) + label = QLabel("Protocol") + label.setStyleSheet("color: black;") + scroll_area_layout.addWidget(protocol_scroll_area) - Define the assets to build in the UI. - """ - self.build_inputs() - self.build_text() - self.build_buttons() + self.contents.addLayout(scroll_area_layout) - def check_input(self) -> bool: - """Check Input. + add_field_button.clicked.connect(add_field) + create_experiment_button = QPushButton("Create experiment") + create_experiment_button.clicked.connect(self.create_experiment) + self.contents.addWidget(create_experiment_button) - Checks to make sure user has input all required fields. Currently, only name and summary are required. - """ - self.name = self.name_input.currentText() - self.summary = self.summary_input.currentText() - try: - if self.name == ExperimentRegistry.default_text: - self.throw_alert_message( - title=self.alert_title, - message='Please add an Experiment Name!', - message_type=AlertMessageType.WARN, - message_response=AlertMessageResponse.OTE, - message_timeout=self.alert_timeout) - return False - if self.name in self.experiment_names: - self.throw_alert_message( - title=self.alert_title, - message=( - 'Experiment name already registered. \n' - 'Please use a unique Experiment name! \n' - f'Registed names: {self.experiment_names}' - ), - message_type=AlertMessageType.WARN, - message_response=AlertMessageResponse.OTE, - message_timeout=self.alert_timeout) - return False - if self.summary == ExperimentRegistry.default_text: - self.throw_alert_message( - title=self.alert_title, - message='Please add an Experiment Summary!', - message_type=AlertMessageType.WARN, - message_response=AlertMessageResponse.OTE, - message_timeout=self.alert_timeout) - return False - except Exception as e: - self.throw_alert_message( - title=self.alert_title, - message=f'Error, {e}', - message_type=AlertMessageType.CRIT, - message_response=AlertMessageResponse.OTE, - message_timeout=self.alert_timeout) - return False - return True - - -def start_app() -> None: - """Start Experiment Registry.""" - bcipy_gui = app(sys.argv) - ex = ExperimentRegistry( - title='Experiment Registry', - height=700, - width=600, - background_color='black') - - sys.exit(bcipy_gui.exec()) - - -if __name__ == '__main__': - start_app() + +if __name__ == "__main__": + run_bciui(ExperimentRegistry) diff --git a/bcipy/gui/intertask_gui.py b/bcipy/gui/intertask_gui.py new file mode 100644 index 000000000..87e1a8e47 --- /dev/null +++ b/bcipy/gui/intertask_gui.py @@ -0,0 +1,86 @@ +from typing import Callable, List + +from PyQt6.QtWidgets import ( + QLabel, + QHBoxLayout, + QPushButton, + QProgressBar, + QApplication +) +from bcipy.gui.bciui import BCIUI, run_bciui +from bcipy.config import SESSION_LOG_FILENAME +import logging + +logger = logging.getLogger(SESSION_LOG_FILENAME) + + +class IntertaskGUI(BCIUI): + + action_name = "IntertaskAction" + + def __init__( + self, + next_task_index: int, + tasks: List[str], + exit_callback: Callable, + ): + self.tasks = tasks + self.current_task_index = next_task_index + self.next_task_name = tasks[self.current_task_index] + self.total_tasks = len(tasks) + self.task_progress = next_task_index + self.callback = exit_callback + super().__init__("Progress", 800, 150) + self.setProperty("class", "inter-task") + + def app(self): + self.contents.addLayout(BCIUI.centered(QLabel("Experiment Progress"))) + + progress_container = QHBoxLayout() + progress_container.addWidget( + QLabel(f"({self.task_progress}/{self.total_tasks})") + ) + self.progress = QProgressBar() + self.progress.setValue(int(self.task_progress / self.total_tasks * 100)) + self.progress.setTextVisible(False) + progress_container.addWidget(self.progress) + self.contents.addLayout(progress_container) + + next_info = QHBoxLayout() + next_info.addWidget(QLabel("Next Task: ")) + next_task = QLabel(self.next_task_name) + next_task.setStyleSheet("font-weight: bold; color: green;") + next_info.addWidget(next_task) + self.contents.addLayout(next_info) + + self.contents.addStretch(1) + self.next_button = QPushButton("Next") + self.stop_button = QPushButton("Stop") + self.stop_button.setStyleSheet("background-color: red") + buttons_layout = QHBoxLayout() + buttons_layout.addWidget(self.stop_button) + buttons_layout.addWidget(self.next_button) + self.contents.addLayout(buttons_layout) + + self.next_button.clicked.connect(self.next) + self.stop_button.clicked.connect(self.stop_tasks) + + def stop_tasks(self): + # This should exit Task executions + logger.info(f"Stopping Tasks... user requested. Using callback: {self.callback}") + self.callback() + self.quit() + logger.info("Tasks Stopped") + + def next(self): + logger.info(f"Next Task=[{self.next_task_name}] requested") + self.quit() + + def quit(self): + QApplication.instance().quit() + + +if __name__ == "__main__": + tasks = ["RSVP Calibration", "IntertaskAction", "Matrix Calibration", "IntertaskAction"] + + run_bciui(IntertaskGUI, tasks=tasks, next_task_index=1, exit_callback=lambda: print("Stopping orchestrator")) diff --git a/bcipy/gui/main.py b/bcipy/gui/main.py index 2c0d00923..9d032dfa6 100644 --- a/bcipy/gui/main.py +++ b/bcipy/gui/main.py @@ -17,7 +17,7 @@ from bcipy.helpers.parameters import parse_range -def font(size: int = 14, font_family: str = 'Helvetica') -> QFont: +def font(size: int = 16, font_family: str = 'Helvetica') -> QFont: """Create a Font object with the given parameters.""" return QFont(font_family, size, weight=0) @@ -51,7 +51,7 @@ def contains_special_characters(string: str, def static_text_control(parent, label: str, color: str = 'black', - size: int = 14, + size: int = 16, font_family: str = 'Helvetica') -> QLabel: """Creates a static text control with the given font parameters. Useful for creating labels and help components.""" @@ -224,6 +224,7 @@ def __init__(self, value: str, help_tip: Optional[str] = None, options: Optional[List[str]] = None, + editable: bool = True, help_size: int = 12, help_color: str = 'darkgray', should_display: bool = True): @@ -236,6 +237,7 @@ def __init__(self, self.label_widget = self.init_label() self.help_tip_widget = self.init_help(help_size, help_color) + self.editable_widget = self.init_editable(editable) self.control = self.init_control(value) self.control.installEventFilter(self) self.init_layout() @@ -251,7 +253,7 @@ def eventFilter(self, source, event): def init_label(self) -> QWidget: """Initialize the label widget.""" - return static_text_control(None, label=self.label) + return static_text_control(None, label=self.label, size=16) def init_help(self, font_size: int, color: str) -> QWidget: """Initialize the help text widget.""" @@ -271,6 +273,13 @@ def init_control(self, value) -> QWidget: # Default is a text input return QLineEdit(value) + def init_editable(self, value: bool) -> QWidget: + "Override. Another checkbox is needed for editable" + editable_checkbox = QCheckBox("Editable") + editable_checkbox.setChecked(value) + editable_checkbox.setFont(font(size=12)) + return editable_checkbox + def init_layout(self): """Initialize the layout by adding the label, help, and control widgets.""" self.vbox = QVBoxLayout() @@ -278,15 +287,35 @@ def init_layout(self): self.vbox.addWidget(self.label_widget) if self.help_tip_widget: self.vbox.addWidget(self.help_tip_widget) + if self.editable_widget: + self.vbox.addWidget(self.editable_widget) self.vbox.addWidget(self.control) + + self.vbox.addWidget(self.separator()) self.setLayout(self.vbox) + def separator(self): + """Creates a separator line.""" + line = QLabel() + line.setFixedHeight(1) + line.setStyleSheet("background-color: grey;") + return line + def value(self) -> str: """Returns the value associated with the form input.""" if self.control: return self.control.text() return None + def is_editable(self) -> bool: + """Returns whether the input is editable.""" + return self.editable_widget.isChecked() + + @property + def editable(self) -> bool: + """Returns whether the input is editable.""" + return True if self.editable_widget.isChecked() else False + def cast_value(self) -> Any: """Returns the value associated with the form input, cast to the correct type. @@ -417,19 +446,11 @@ class BoolInput(FormInput): def __init__(self, **kwargs): super(BoolInput, self).__init__(**kwargs) - def init_label(self) -> QWidget: - """Override. Checkboxes do not have a separate label.""" - return None - - def init_help(self, font_size: int, color: str) -> QWidget: - """Override. Checkboxes do not display help.""" - return None - def init_control(self, value): """Override to create a checkbox.""" - ctl = QCheckBox(self.label) + ctl = QCheckBox(f'Enable {self.label}') ctl.setChecked(value == 'true') - ctl.setFont(font()) + ctl.setFont(font(size=14)) return ctl def value(self) -> str: @@ -440,7 +461,7 @@ class RangeInput(FormInput): """FormInput to select a range of values (low, high). Serializes to 'low_value:high_value'. Appropriate boundaries are determined - from the starting value and list of recommended_values if provided. + from the starting value and list of recommended if provided. """ def init_control(self, value) -> QWidget: @@ -450,7 +471,7 @@ def init_control(self, value) -> QWidget: --------- value - initial value """ - return RangeWidget(value, self.options) + return RangeWidget(value, self.options, size=14) class SelectionInput(FormInput): @@ -542,10 +563,14 @@ def init_layout(self) -> None: self.vbox.addWidget(self.label_widget) if self.help_tip_widget: self.vbox.addWidget(self.help_tip_widget) + if self.editable_widget: + self.vbox.addWidget(self.editable_widget) + hbox = QHBoxLayout() hbox.addWidget(self.control) hbox.addWidget(self.button) self.vbox.addLayout(hbox) + self.vbox.addWidget(self.separator()) self.setLayout(self.vbox) def widgets(self) -> List[QWidget]: @@ -583,7 +608,8 @@ def __init__(self, value: Tuple[int, int], options: Optional[List[str]] = None, label_low: str = "Low:", - label_high="High:"): + label_high="High:", + size=14): super(RangeWidget, self).__init__() self.low, self.high = parse_range(value) @@ -602,10 +628,10 @@ def __init__(self, self.high_input = self.create_input(self.high) hbox = QHBoxLayout() - hbox.addWidget(static_text_control(None, label=label_low)) + hbox.addWidget(static_text_control(None, label=label_low, size=size)) hbox.addWidget(self.low_input) - hbox.addWidget(static_text_control(None, label=label_high)) + hbox.addWidget(static_text_control(None, label=label_high, size=size)) hbox.addWidget(self.high_input) self.setLayout(hbox) @@ -668,7 +694,7 @@ class SearchInput(QWidget): contents of the text box. """ - def __init__(self, on_search, font_size: int = 10): + def __init__(self, on_search, font_size: int = 12): super(SearchInput, self).__init__() self.on_search = on_search @@ -907,7 +933,7 @@ def add_static_textbox(self, text_color: str = 'default', size: Optional[list] = None, font_family='Times', - font_size=12, + font_size=14, wrap_text=False) -> QLabel: """Add Static Text.""" @@ -971,7 +997,8 @@ def __init__(self, height: int, width: int, background_color: str = 'black', - widget: Optional[QWidget] = None): + widget: Optional[QWidget] = None, + title: Optional[str] = None): super().__init__() self.height = height @@ -995,6 +1022,15 @@ def __init__(self, self.widget = widget self.frame.setWidget(widget) + # If there is a title, add it to the top of the frame + if title: + title_label = QLabel(title) + title_label.setStyleSheet( + 'background-color: black; color: white;') + title_label.setAlignment(Qt.AlignmentFlag.AlignCenter) + title_label.setFont(font(16)) + self.vbox.addWidget(title_label) + # add the frame and set the layout self.vbox.addWidget(self.frame) self.setLayout(self.vbox) @@ -1082,7 +1118,11 @@ def app(args) -> QApplication: Passes args from main and initializes the app """ - return QApplication(args) + + bci_app = QApplication(args).instance() + if not bci_app: + return QApplication(args) + return bci_app def start_app() -> None: @@ -1092,24 +1132,7 @@ def start_app() -> None: height=650, width=650, background_color='white') - - # ex.get_filename_dialog() - # ex.add_button(message='Test Button', position=[200, 300], size=[100, 100], id=1) - # ex.add_image(path='../static/images/gui/bci_cas_logo.png', position=[50, 50], size=200) - # ex.add_static_textbox( - # text='Test static text', - # background_color='black', - # text_color='white', - # position=[100, 20], - # wrap_text=True) - # ex.add_combobox(position=[100, 100], size=[100, 100], items=['first', 'second', 'third'], editable=True) - # ex.add_text_input(position=[100, 100], size=[100, 100]) ex.show_gui() - ex.throw_alert_message(title='title', - message='test', - message_response=AlertMessageResponse.OCE, - message_timeout=5) - sys.exit(bcipy_gui.exec()) diff --git a/bcipy/gui/parameters/params_form.py b/bcipy/gui/parameters/params_form.py index 0e6a4510a..602d6a816 100644 --- a/bcipy/gui/parameters/params_form.py +++ b/bcipy/gui/parameters/params_form.py @@ -80,13 +80,14 @@ def parameter_input(self, param: Dict[str, str]) -> FormInput: 'directorypath': DirectoryInput, 'range': RangeInput } - has_options = isinstance(param['recommended_values'], list) + has_options = isinstance(param['recommended'], list) form_input = type_inputs.get( param['type'], SelectionInput if has_options else TextInput) - return form_input(label=param['readableName'], + return form_input(label=param['name'], value=param['value'], help_tip=param['helpTip'], - options=param['recommended_values'], + options=param['recommended'], + editable=bool(param['editable']), help_size=self.help_size, help_color=self.help_color, should_display=bool(param['section'])) @@ -129,8 +130,12 @@ def update_parameters(self): for param_name, form_input in self.controls.items(): param = self.params[param_name] value = form_input.value() + editable = form_input.editable if value != param['value']: self.params[param_name]['value'] = value + if editable != param['editable']: + editable = True if editable == "true" or editable is True else False + self.params[param_name]['editable'] = editable def clear_layout(layout): @@ -192,7 +197,7 @@ def _init_changes(self): lbl = static_text_control( None, - label=f"* {param['readableName']}: {param['value']}", + label=f"* {param['name']}: {param['value']}", size=13, color="darkgreen") @@ -418,7 +423,7 @@ def on_save_as(self): self.repaint() -def main(json_file, title='BCI Parameters', size=(450, 550)) -> str: +def main(json_file, title='BCI Parameters', size=(750, 800)) -> str: """Set up the GUI components and start the main loop.""" app = QApplication(sys.argv) panel = MainPanel(json_file, title, size) diff --git a/bcipy/gui/viewer/data_source/file_streamer.py b/bcipy/gui/viewer/data_source/file_streamer.py index dd725b98a..ff6d33c7d 100644 --- a/bcipy/gui/viewer/data_source/file_streamer.py +++ b/bcipy/gui/viewer/data_source/file_streamer.py @@ -2,8 +2,10 @@ import logging import time from bcipy.acquisition.util import StoppableThread +from bcipy.config import SESSION_LOG_FILENAME from bcipy.helpers.raw_data import RawDataReader -log = logging.getLogger(__name__) + +log = logging.getLogger(SESSION_LOG_FILENAME) class FileStreamer(StoppableThread): diff --git a/bcipy/helpers/README.md b/bcipy/helpers/README.md index 1ab392cfd..bca1658a5 100644 --- a/bcipy/helpers/README.md +++ b/bcipy/helpers/README.md @@ -14,9 +14,11 @@ Modules necessary for BciPy system operation. These range from system utilities - `load`: methods for loading most BciPy data. For loading of triggers, see triggers.py - `parameters`: module for functionality related to system configuration via the parameters.json file - `raw_data`: functionality for reading and writing raw signal data +- `report`: methods for generating BciPy PDF reports - `save`: methods for saving BciPy data in supported formats. For saving of triggers, see triggers.py - `session`: methods for managing and parsing session.json data - `stimuli`: methods for generating stimuli and inquiries for presentation +- `symbols`: methods for working with symbols and symbol lists. Ex. Alphabet, QWERTY, etc. - `system_utils`: utilities for extracting git version, system information and handling of logging - `task`: common task methods and utilities, including Trial and InquiryReshaper. - `triggers`: methods and data classes defining BciPy internal triggering diff --git a/bcipy/helpers/acquisition.py b/bcipy/helpers/acquisition.py index c1350444b..c2c55de35 100644 --- a/bcipy/helpers/acquisition.py +++ b/bcipy/helpers/acquisition.py @@ -11,21 +11,20 @@ discover_device_spec) from bcipy.acquisition.devices import (DeviceSpec, DeviceStatus, preconfigured_device, with_content_type) -from bcipy.config import BCIPY_ROOT +from bcipy.config import BCIPY_ROOT, RAW_DATA_FILENAME, SESSION_LOG_FILENAME from bcipy.config import DEFAULT_DEVICE_SPEC_FILENAME as spec_name -from bcipy.config import RAW_DATA_FILENAME from bcipy.helpers.save import save_device_specs -log = logging.getLogger(__name__) +logger = logging.getLogger(SESSION_LOG_FILENAME) -def init_eeg_acquisition( +def init_acquisition( parameters: dict, save_folder: str, server: bool = False) -> Tuple[ClientManager, List[LslDataServer]]: """Initialize EEG Acquisition. - Initializes a client that connects with the EEG data source and begins + Initializes a client that connects with ta data source and begins data collection. Parameters @@ -56,7 +55,7 @@ def init_eeg_acquisition( if server: server_device_spec = server_spec(content_type, device_name) - log.info( + logger.info( f"Generating mock device data for {server_device_spec.name}") dataserver = LslDataServer(server_device_spec) servers.append(dataserver) @@ -68,8 +67,7 @@ def init_eeg_acquisition( device_spec.status = status raw_data_name = raw_data_filename(device_spec) - client = init_lsl_client(parameters, device_spec, save_folder, - raw_data_name) + client = init_lsl_client(parameters, device_spec, save_folder, raw_data_name) manager.add_client(client) manager.start_acquisition() @@ -187,7 +185,7 @@ def init_lsl_client(parameters: dict, """Initialize a client that acquires data from LabStreamingLayer.""" data_buffer_seconds = round(max_inquiry_duration(parameters)) - log.info( + logger.info( f"Setting an acquisition buffer for {device_spec.name} of {data_buffer_seconds} seconds" ) return LslAcquisitionClient(max_buffer_len=data_buffer_seconds, diff --git a/bcipy/helpers/convert.py b/bcipy/helpers/convert.py index 1b365f9ea..07c6ec34f 100644 --- a/bcipy/helpers/convert.py +++ b/bcipy/helpers/convert.py @@ -14,13 +14,13 @@ from bcipy.acquisition.devices import preconfigured_device from bcipy.config import (DEFAULT_PARAMETER_FILENAME, RAW_DATA_FILENAME, - TRIGGER_FILENAME) + TRIGGER_FILENAME, SESSION_LOG_FILENAME) from bcipy.helpers.load import load_json_parameters, load_raw_data from bcipy.helpers.raw_data import RawData from bcipy.helpers.triggers import trigger_decoder, trigger_durations from bcipy.signal.process import Composition, get_default_transform -logger = logging.getLogger(__name__) +logger = logging.getLogger(SESSION_LOG_FILENAME) FILE_LENGTH_LIMIT = 150 diff --git a/bcipy/helpers/copy_phrase_wrapper.py b/bcipy/helpers/copy_phrase_wrapper.py index d779fc463..aa546948e 100644 --- a/bcipy/helpers/copy_phrase_wrapper.py +++ b/bcipy/helpers/copy_phrase_wrapper.py @@ -5,7 +5,8 @@ import numpy as np -from bcipy.helpers.exceptions import BciPyCoreException +from bcipy.config import SESSION_LOG_FILENAME +from bcipy.exceptions import BciPyCoreException from bcipy.helpers.language_model import histogram, with_min_prob from bcipy.helpers.stimuli import InquirySchedule, StimuliOrder from bcipy.helpers.symbols import BACKSPACE_CHAR @@ -18,7 +19,7 @@ from bcipy.task.control.query import NBestStimuliAgent from bcipy.task.data import EvidenceType -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) class CopyPhraseWrapper: diff --git a/bcipy/helpers/language_model.py b/bcipy/helpers/language_model.py index 0c9d87d7c..b1b77a05d 100644 --- a/bcipy/helpers/language_model.py +++ b/bcipy/helpers/language_model.py @@ -13,7 +13,7 @@ """Only imported models will be included in language_models_by_name""" # flake8: noqa -from bcipy.helpers.exceptions import InvalidLanguageModelException +from bcipy.exceptions import InvalidLanguageModelException from bcipy.language.model.causal import CausalLanguageModel from bcipy.language.model.kenlm import KenLMLanguageModel from bcipy.language.model.mixture import MixtureLanguageModel diff --git a/bcipy/helpers/load.py b/bcipy/helpers/load.py index 7bea71f3f..f4065a090 100644 --- a/bcipy/helpers/load.py +++ b/bcipy/helpers/load.py @@ -6,21 +6,21 @@ from pathlib import Path from shutil import copyfile from time import localtime, strftime -from typing import List, Optional +from typing import List, Optional, Union from bcipy.config import (DEFAULT_ENCODING, DEFAULT_EXPERIMENT_PATH, DEFAULT_FIELD_PATH, DEFAULT_PARAMETERS_PATH, EXPERIMENT_FILENAME, FIELD_FILENAME, ROOT, - SIGNAL_MODEL_FILE_SUFFIX) + SIGNAL_MODEL_FILE_SUFFIX, SESSION_LOG_FILENAME) from bcipy.gui.file_dialog import ask_directory, ask_filename -from bcipy.helpers.exceptions import (BciPyCoreException, - InvalidExperimentException) +from bcipy.exceptions import (BciPyCoreException, + InvalidExperimentException) from bcipy.helpers.parameters import Parameters from bcipy.helpers.raw_data import RawData from bcipy.preferences import preferences from bcipy.signal.model import SignalModel -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) def copy_parameters(path: str = DEFAULT_PARAMETERS_PATH, @@ -136,9 +136,10 @@ def load_json_parameters(path: str, value_cast: bool = False) -> Parameters: "fake_data": { "value": "true", "section": "bci_config", - "readableName": "Fake Data Sessions", + "name": "Fake Data Sessions", "helpTip": "If true, fake data server used", - "recommended_values": "", + "recommended": "", + "editable": "true", "type": "bool" } @@ -260,7 +261,7 @@ def choose_csv_file(filename: Optional[str] = None) -> Optional[str]: return filename -def load_raw_data(filename: str) -> RawData: +def load_raw_data(filename: Union[Path, str]) -> RawData: """Reads the data (.csv) file written by data acquisition. Parameters @@ -308,14 +309,11 @@ def load_users(data_save_loc: str) -> List[str]: return saved_users # grab all experiments in the directory and iterate over them to get the users - experiments = fast_scandir(path, return_path=True) - - for experiment in experiments: - users = fast_scandir(experiment, return_path=False) - # If it is a new user, append it to the saved_user list - for user in users: - if user not in saved_users: - saved_users.append(user) + users = fast_scandir(path, return_path=True) + + for user in users: + if user not in saved_users: + saved_users.append(user.split('/')[-1]) return saved_users diff --git a/bcipy/helpers/parameters.py b/bcipy/helpers/parameters.py index 970d9a023..3be5f3d48 100644 --- a/bcipy/helpers/parameters.py +++ b/bcipy/helpers/parameters.py @@ -13,9 +13,10 @@ class Parameter(NamedTuple): """Represents a single parameter""" value: Any section: str - readableName: str + name: str helpTip: str - recommended_values: list + recommended: list + editable: bool type: str @@ -79,8 +80,8 @@ def __init__(self, source: Optional[str] = None, cast_values: bool = False): self.cast_values = cast_values self.required_keys = set([ - 'value', 'section', 'readableName', 'helpTip', - 'recommended_values', 'type' + 'value', 'section', 'name', 'helpTip', + 'recommended', 'editable', 'type' ]) self.conversions = { 'int': int, @@ -111,9 +112,10 @@ def from_cast_values(cls, **kwargs): key, { 'value': value_str, 'section': '', - 'readableName': '', + 'name': '', 'helpTip': '', - 'recommended_values': '', + 'recommended': '', + 'editable': '', 'type': value_type }) return params @@ -218,9 +220,10 @@ def check_valid_entry(self, entry_name: str, entry: dict) -> None: "fake_data": { "value": "true", "section": "bci_config", - "readableName": "Fake Data Sessions", + "name": "Fake Data Sessions", "helpTip": "If true, fake data server used", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "bool" } diff --git a/bcipy/helpers/raw_data.py b/bcipy/helpers/raw_data.py index 51f1c5627..d94ac2174 100644 --- a/bcipy/helpers/raw_data.py +++ b/bcipy/helpers/raw_data.py @@ -7,7 +7,7 @@ import pandas as pd from bcipy.config import DEFAULT_ENCODING -from bcipy.helpers.exceptions import BciPyCoreException +from bcipy.exceptions import BciPyCoreException from bcipy.signal.generator.generator import gen_random_data from bcipy.signal.process import Composition diff --git a/bcipy/helpers/report.py b/bcipy/helpers/report.py index 4836a7a0c..774e7ea8e 100644 --- a/bcipy/helpers/report.py +++ b/bcipy/helpers/report.py @@ -111,7 +111,7 @@ def _create_heatmap(self, onsets: List[float], range: Tuple[float, float], type: """ # create a heatmap with the onset values fig, ax = plt.subplots() - fig.set_size_inches(6, 3) + fig.set_size_inches(4, 2) ax.hist(onsets, bins=100, range=range, color='red', alpha=0.7) ax.set_title(f'{type} Artifact Onsets') ax.set_xlabel('Time (s)') @@ -156,8 +156,12 @@ class SessionReportSection(ReportSection): A class to handle the creation of a Session Report section in a BciPy Report using a summary dictionary. """ - def __init__(self, summary: Optional[dict] = None) -> None: + def __init__(self, summary: dict) -> None: self.summary = summary + if 'task' in self.summary: + self.session_name = self.summary['task'] + else: + self.session_name = 'Session Summary' self.style = getSampleStyleSheet() self.summary_table = None @@ -203,7 +207,7 @@ def _create_header(self) -> Paragraph: Creates a header for the Session Report section. """ - header = Paragraph('Session Summary', self.style['Heading3']) + header = Paragraph(f'{self.session_name}', self.style['Heading3']) return header diff --git a/bcipy/helpers/save.py b/bcipy/helpers/save.py index 84d275d69..8d0f197e9 100644 --- a/bcipy/helpers/save.py +++ b/bcipy/helpers/save.py @@ -16,7 +16,6 @@ DEFAULT_PARAMETER_FILENAME, SIGNAL_MODEL_FILE_SUFFIX, STIMULI_POSITIONS_FILENAME) -from bcipy.helpers.validate import validate_experiments from bcipy.signal.model.base_model import SignalModel @@ -41,7 +40,6 @@ def save_experiment_data( fields: dict, location: str, name: str) -> str: - validate_experiments(experiments, fields) return save_json_data(experiments, location, name) diff --git a/bcipy/helpers/stimuli.py b/bcipy/helpers/stimuli.py index 01bd4aae3..8a1af1a11 100644 --- a/bcipy/helpers/stimuli.py +++ b/bcipy/helpers/stimuli.py @@ -21,13 +21,13 @@ from PIL import Image from psychopy import core -from bcipy.config import DEFAULT_FIXATION_PATH, DEFAULT_TEXT_FIXATION -from bcipy.helpers.exceptions import BciPyCoreException +from bcipy.config import DEFAULT_FIXATION_PATH, DEFAULT_TEXT_FIXATION, SESSION_LOG_FILENAME +from bcipy.exceptions import BciPyCoreException from bcipy.helpers.list import grouper # Prevents pillow from filling the console with debug info logging.getLogger('PIL').setLevel(logging.WARNING) -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) NO_TARGET_INDEX = None diff --git a/bcipy/helpers/symbols.py b/bcipy/helpers/symbols.py index 467f5f59f..dca9feb4c 100644 --- a/bcipy/helpers/symbols.py +++ b/bcipy/helpers/symbols.py @@ -7,7 +7,7 @@ BACKSPACE_CHAR = '<' -def alphabet(parameters=None, include_path=True): +def alphabet(parameters=None, include_path=True, backspace=BACKSPACE_CHAR, space=SPACE_CHAR): """Alphabet. Function used to standardize the symbols we use as alphabet. @@ -31,7 +31,7 @@ def alphabet(parameters=None, include_path=True): stimulus_array.append(img) return stimulus_array - return list(ascii_uppercase) + [BACKSPACE_CHAR, SPACE_CHAR] + return list(ascii_uppercase) + [backspace, space] def qwerty_order(is_txt_stim: bool = True, @@ -49,14 +49,17 @@ def qwerty_order(is_txt_stim: bool = True, return f"{row1}{row2}{row3}".index -def frequency_order(is_txt_stim: bool = True) -> Callable: +def frequency_order( + is_txt_stim: bool = True, + space: str = SPACE_CHAR, + backspace: str = BACKSPACE_CHAR) -> Callable: """Returns a function that can be used to sort the alphabet symbols in most frequently used order in the English language. """ if not is_txt_stim: raise NotImplementedError( 'Frequency ordering not implemented for images') - return f"ETAOINSHRDLCUMWFGYPBVKJXQZ{BACKSPACE_CHAR}{SPACE_CHAR}".index + return f"ETAOINSHRDLCUMWFGYPBVKJXQZ{backspace}{space}".index DEFAULT_SYMBOL_SET = alphabet() diff --git a/bcipy/helpers/system_utils.py b/bcipy/helpers/system_utils.py index 6b9450404..c423dde4a 100644 --- a/bcipy/helpers/system_utils.py +++ b/bcipy/helpers/system_utils.py @@ -19,7 +19,7 @@ import torch from cpuinfo import get_cpu_info -from bcipy.config import DEFAULT_ENCODING, LOG_FILENAME +from bcipy.config import DEFAULT_ENCODING, SESSION_LOG_FILENAME class ScreenInfo(NamedTuple): @@ -197,30 +197,32 @@ def get_system_info() -> dict: def configure_logger( save_folder: str, - log_name=LOG_FILENAME, - log_level=logging.INFO, - version=None) -> None: + log_name=SESSION_LOG_FILENAME, + log_level=logging.INFO) -> logging.Logger: """Configure Logger. - Does what it says. + Configures a logger to print to a file in the save_folder directory. """ # create the log file logfile = os.path.join(save_folder, 'logs', log_name) # configure it - root_logger = logging.getLogger() - root_logger.setLevel(log_level) + logger = logging.getLogger(name=log_name) + logger.setLevel(log_level) + for hdlr in logger.handlers[:]: # remove all old handlers + logger.removeHandler(hdlr) handler = logging.FileHandler(logfile, 'w', encoding='utf-8') handler.setFormatter(logging.Formatter( '[%(threadName)-9s][%(asctime)s][%(name)s][%(levelname)s]: %(message)s')) - root_logger.addHandler(handler) + logger.addHandler(handler) # print to console the absolute path of the log file to aid in debugging path_to_logs = os.path.abspath(logfile) - print(f'Printing all BciPy logs to: {path_to_logs}') + msg = f'Writing logs to: {path_to_logs}' + logger.info(msg) + print(msg) - if version: - logging.info(f'Start of Session for BciPy Version: ({version})') + return logger def import_submodules(package, recursive=True): diff --git a/bcipy/helpers/task.py b/bcipy/helpers/task.py index a4e36c525..983dec8ec 100644 --- a/bcipy/helpers/task.py +++ b/bcipy/helpers/task.py @@ -8,12 +8,12 @@ from bcipy.acquisition.multimodal import ClientManager, ContentType from bcipy.acquisition.record import Record -from bcipy.config import MAX_PAUSE_SECONDS, SESSION_COMPLETE_MESSAGE +from bcipy.config import MAX_PAUSE_SECONDS, SESSION_COMPLETE_MESSAGE, SESSION_LOG_FILENAME from bcipy.helpers.clock import Clock from bcipy.helpers.stimuli import get_fixation from bcipy.task.exceptions import InsufficientDataException -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) def fake_copy_phrase_decision(copy_phrase: str, target_letter: str, text_task: str) -> Tuple[str, str, bool]: diff --git a/bcipy/helpers/tests/resources/mock_session/parameters.json b/bcipy/helpers/tests/resources/mock_session/parameters.json index f9fcf37d6..9331fe714 100644 --- a/bcipy/helpers/tests/resources/mock_session/parameters.json +++ b/bcipy/helpers/tests/resources/mock_session/parameters.json @@ -2,738 +2,781 @@ "fake_data": { "value": "false", "section": "bci_config", - "readableName": "Fake EEG Data On/Off", + "name": "Fake EEG Data", "helpTip": "If ‘true’, fake EEG data will be used instead of real EEG data. Useful for testing by software development team.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "bool" }, - "acq_device": { - "value": "DSI-24", + "acq_mode": { + "value": "EEG", "section": "acq_config", - "readableName": "Acquisition Device", - "helpTip": "Specifies the hardware used for EEG data collection.", - "recommended_values": [ - "DSI-24", - "DSI-VR300", - "g.USBamp-1", - "Tobii Nano" + "name": "Acquisition Mode", + "helpTip": "Specifies the hardware device(s) used for data collection. Default: EEG.", + "recommended": [ + "EEG", + "EEG/DSI-24", + "Eyetracker", + "EEG+Eyetracker", + "EEG+Eyetracker:passive" ], + "editable": true, "type": "str" }, - "acq_connection_method": { - "value": "LSL", - "section": "acq_connection_method", - "readableName": "Acquisition Connection Method", - "helpTip": "Specifies the method used to connect to the data acquisition device (LSL or TCP). Default: LSL", - "recommended_values": [ - "LSL", - "TCP" - ], - "type": "str" - }, - "acq_host": { - "value": "127.0.0.1", - "section": "acq_config", - "readableName": "TCP Connection Host", - "helpTip": "Specifies the host for a TCP connection (used when Acquisition Connection Method is set to 'TCP'). Default: 127.0.0.1", - "recommended_values": "", - "type": "str" - }, - "acq_port": { - "value": "9000", - "section": "acq_config", - "readableName": "TCP Connection Port", - "helpTip": "Specifies the port for a TCP connection (used when Acquisition Connection Method is set to ‘TCP’). Default: 9000", - "recommended_values": "", - "type": "int" - }, "trigger_type": { "value": "text", "section": "bci_config", - "readableName": "Trigger Stimulus Type", - "helpTip": "Specifies whether to use a picture or auditory stimulus to calibrate trigger latency. Default: text", - "recommended_values": [ + "name": "Trigger Stimulus Type", + "helpTip": "Specifies whether to use text, image or auditory stimulus to calibrate trigger latency. Default: text", + "recommended": [ "image", "text" ], + "editable": true, "type": "str" }, "k_folds": { "value": "10", "section": "signal_config", - "readableName": "Number of Cross-Validation Folds", + "name": "Number of Cross-Validation Folds", "helpTip": "Specifies the number of folds used for cross-validation when calculating AUC. Default: 10", - "recommended_values": "[10]", + "recommended": "[10]", + "editable": true, "type": "int" }, "trial_window": { "value": "0.0:0.5", "section": "bci_config", - "readableName": "Trial Classification Window Length", + "name": "Trial Classification Window Length", "helpTip": "Specifies the window (in seconds) of the EEG data collection window after each stimulus presentation. Default: 0.0:0.5", - "recommended_values": [ + "recommended": [ "0.0:0.5", "0.0:0.8", "0.2:0.8" ], + "editable": true, "type": "range" }, "prestim_length": { "value": "1", "section": "bci_config", - "readableName": "Prestimulus Window Length", + "name": "Prestimulus Window Length", "helpTip": "Specifies the length (in seconds) of the EEG data window to return before inquiry presentation. Default: 1", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "alert_sound_file": { - "value": "bcipy/static/sounds/beep.wav", + "value": "beep.wav", "section": "bci_config", - "readableName": "Alert Tone", - "helpTip": "Specifies the path to an audio file to be played as an alert tone when experiments or offline analysis is complete. Default: bcipy/static/sounds/beep.wav", - "recommended_values": "", + "name": "Alert Tone", + "helpTip": "Specifies the path to an audio file to be played as an alert tone when experiments or offline analysis is complete. Default: beep.wav", + "recommended": "", + "editable": true, "type": "filepath" }, "signal_model_path": { "value": "", "section": "bci_config", - "readableName": "Signal Model Path", + "name": "Signal Model Path", "helpTip": "Directory of the pre-trained signal model. This is often the the calibration session directory.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "directorypath" }, "filter_high": { "value": "20", "section": "signal_config", - "readableName": "High Frequency Filter Cutoff", + "name": "High Frequency Filter Cutoff", "helpTip": "Specifies the frequency bound (in Hz) of the low-pass filter (high cutoff). Default: 20", - "recommended_values": [ + "recommended": [ "20", "45", "50" ], + "editable": true, "type": "float" }, "filter_low": { "value": "1", "section": "signal_config", - "readableName": "Low Frequency Filter Cutoff", + "name": "Low Frequency Filter Cutoff", "helpTip": "Specifies the frequency bound (in Hz) of the high-pass filter (low cutoff). Default: 1", - "recommended_values": [ + "recommended": [ "1", "2" ], + "editable": true, "type": "float" }, "filter_order": { "value": "2", "section": "signal_config", - "readableName": "Filter Order", + "name": "Filter Order", "helpTip": "Specifies the slope of the low-pass and high-pass filters. Default: 2", - "recommended_values": [ + "recommended": [ "2", "3" ], + "editable": true, "type": "float" }, "notch_filter_frequency": { "value": "60", "section": "signal_config", - "readableName": "Notch Filter Frequency", + "name": "Notch Filter Frequency", "helpTip": "Specifies the frequency (in Hz) of the notch filter used to remove electrical artifact from the surrounding environment. Default: 60", - "recommended_values": [ + "recommended": [ "59", "60" ], + "editable": true, "type": "float" }, "down_sampling_rate": { "value": "2", "section": "signal_config", - "readableName": "Downsampling Rate", + "name": "Downsampling Rate", "helpTip": "Specifies the decimation factor (integer only) for downsampling of EEG data. Default: 2", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, - "artifact_rejection": { - "value": "false", - "section": "artifact_rejection", - "readableName": "Artifact Rejection On/Off", - "helpTip": "If ‘true’, the system will detect and reject inquiries containing unwanted artifacts (e.g. blinks). This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "bool" - }, - "high_voltage_threshold": { - "value": "false", - "section": "artifact_rejection", - "readableName": "High Voltage Threshold On/Off", - "helpTip": "If ‘true’, an upper voltage threshold will be set for artifact rejection. Detection of values above the specified threshold will trigger rejection of a inquiry. This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "bool" - }, - "high_voltage_value": { - "value": "75E+6", - "section": "artifact_rejection", - "readableName": "High Voltage Threshold Value", - "helpTip": "Specifies the high voltage threshold (in microvolts) for artifact rejection (High Voltage Threshold must be set to ‘true’). Default: 75E+6. This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "float" - }, - "low_voltage_threshold": { - "value": "false", - "section": "artifact_rejection", - "readableName": "Low Voltage Threshold On/Off", - "helpTip": "If ‘true’, a lower voltage threshold will be set for artifact rejection. Detection of values below the specified threshold will trigger rejection of a inquiry. This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "bool" - }, - "low_voltage_value": { - "value": "-75E-6", - "section": "artifact_rejection", - "readableName": "LowVoltage Threshold Value", - "helpTip": "Specifies the low voltage threshold (in microvolts) for artifact rejection (Low Voltage Threshold must be set to ‘true’). Default: -75E-6 This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "float" - }, "summarize_session": { "value": "true", "section": "bci_config", - "readableName": "Summarize Session Data", + "name": "Summarize Session Data", "helpTip": "If 'true', writes an Excel file which summarizes the session data by charting evidence per inquiry.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "bool" }, "parameter_location": { "value": "bcipy/parameters/parameters.json", "section": "bci_config", - "readableName": "Parameter File", + "name": "Parameter File", "helpTip": "Specifies the file containing the current system parameters. Default: bcipy/parameters/parameters.json", - "recommended_values": [ + "recommended": [ "parameters/parameters.json" ], + "editable": true, "type": "filepath" }, "data_save_loc": { "value": "data/", "section": "bci_config", - "readableName": "Data Save Location", + "name": "Data Save Location", "helpTip": "Specifies the location in which to save data files after each recording. This must be a directory ending with /. Default: data/", - "recommended_values": [ + "recommended": [ "data/" ], + "editable": true, "type": "directorypath" }, "full_screen": { "value": "false", "section": "bci_config", - "readableName": "Full Screen Mode On/Off", + "name": "Full Screen Mode", "helpTip": "If ‘true’, the task will be displayed in full screen mode. If ‘false’, the task will be displayed in a window.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "bool" }, "window_height": { "value": "500", "section": "bci_config", - "readableName": "Task Window Height", + "name": "Task Window Height", "helpTip": "Specifies the height (in norm units) of the task window when not in full screen mode (Full Screen Mode On/Off must be set to ‘false’). See https://www.psychopy.org/general/units.html. Default: 500", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "window_width": { "value": "500", "section": "bci_config", - "readableName": "Task Window Width", + "name": "Task Window Width", "helpTip": "Specifies the width (in norm units) of the task window when not in full screen mode (Full Screen Mode On/Off must be set to ‘false’). See https://www.psychopy.org/general/units.html. Default: 500", - "recommended_values": "", + "recommended": "", + "editable": true, + "type": "int" + }, + "matrix_rows": { + "value": "5", + "section": "bci_config", + "name": "Matrix Rows", + "helpTip": "Specifies the number of rows to use in the Matrix task. Rows * columns should be greater than or equal to the number of symbols.", + "recommended": "", + "editable": true, + "type": "int" + }, + "matrix_columns": { + "value": "6", + "section": "bci_config", + "name": "Matrix Columns", + "helpTip": "Specifies the number of columns to use in the Matrix task. Rows * columns should be greater than or equal to the number of symbols.", + "recommended": "", + "editable": true, "type": "int" }, + "matrix_width": { + "value": "0.7", + "section": "bci_config", + "name": "Matrix Width (%)", + "helpTip": "Specifies the max percentage of the display that the matrix grid should utilize. Must be between 0 and 1", + "recommended": "", + "editable": true, + "type": "float" + }, "acq_show_viewer": { "value": "false", "section": "acq_config", - "readableName": " EEG Viewer On/Off", - "helpTip": "If ‘true’, the EEG signal viewer will be displayed.", - "recommended_values": "", + "name": " EEG Viewer", + "helpTip": "If ‘true’, the EEG signal viewer will be displayed along with the Task.", + "recommended": "", + "editable": true, "type": "bool" }, "stim_screen": { "value": "0", "section": "bci_config", - "readableName": "Task Display Monitor", + "name": "Task Display Monitor", "helpTip": "Specifies which monitor to use for task display when two monitors are in use. If ‘0’, the task will be displayed on the primary monitor (with the EEG signal viewer on the second monitor, if EEG Viewer On/Off is set to ‘true’). If ‘1’, the opposite monitor assignment will be used.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "task_buffer_length": { "value": "2", "section": "bci_config", - "readableName": "Inter-inquiry Interval", + "name": "Inter-inquiry Interval", "helpTip": "Specifies the delay time (in seconds) between the final stimulus in one inquiry and the beginning (target stimulus or fixation cross) of the next inquiry in a task. Default: 2", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "is_txt_stim": { "value": "true", "section": "bci_config", - "readableName": "Text Stimuli On/Off", - "helpTip": "If ‘true’, text stimuli will be used. If ‘false’, image stimuli will be loaded from the folder specified in Image Stimulus Folder.", - "recommended_values": "", + "name": "Text Stimuli", + "helpTip": "If ‘true’, text stimuli will be used. If ‘false’, image stimuli will be loaded from the folder specified in Image Stimulus Folder. Default: true", + "recommended": "", + "editable": true, "type": "bool" }, "path_to_presentation_images": { "value": "bcipy/static/images/rsvp/", "section": "bci_config", - "readableName": "Image Stimulus Folder", + "name": "Image Stimulus Folder", "helpTip": "Specifies the location of image files to be used as stimuli (Text Stimuli On/Off must be set to ‘false’). This must be a directory ending with /.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "directorypath" }, "stim_space_char": { "value": "–", "section": "bci_config", - "readableName": "Space Character", + "name": "Space Character", "helpTip": "Specifies the text or Unicode character which represents a space during text-stimuli tasks. Default: –", - "recommended_values": [ + "recommended": [ "_", "–", "‒", "□" ], + "editable": true, "type": "str" }, "stim_order": { "value": "random", "section": "bci_config", - "readableName": "Stimuli Order", + "name": "Stimuli Order", "helpTip": "Specifies the ordering of stimuli in an inquiry. Default is random.", - "recommended_values": [ + "recommended": [ "alphabetical", "random" ], + "editable": true, "type": "str" }, "target_positions": { "value": "distributed", "section": "bci_config", - "readableName": "Target Positions", + "name": "Target Positions", "helpTip": "Specifies the positions of target stimuli in calibration task. Default is random.", - "recommended_values": [ + "recommended": [ "distributed", "random" ], + "editable": true, "type": "str" }, "nontarget_inquiries": { "value": "10", "section": "bci_config", - "readableName": "Percentage of Nontarget Inquiries", + "name": "Percentage of Nontarget Inquiries", "helpTip": "Specifies the percentage (0-100) of inquiries which target stimuli flashed is not in inquiry. Default is 10 percent.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "stim_length": { "value": "10", "section": "bci_config", - "readableName": "Stimuli Per inquiry", + "name": "Stimuli Per inquiry", "helpTip": "Specifies the number of stimuli to present in each inquiry. Default: 10", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "time_flash": { "value": "0.25", "section": "bci_config", - "readableName": "Stimulus Presentation Duration", + "name": "Stimulus Presentation Duration", "helpTip": "Specifies the duration of time (in seconds) that each stimulus is displayed in an inquiry.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "time_prompt": { "value": "1", "section": "bci_config", - "readableName": "Time Prompt Stimuli (sec)", + "name": "Time Prompt Stimuli (sec)", "helpTip": "The amount of time in seconds to present the target stimuli prompt in an inquiry.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "time_fixation": { "value": "0.5", "section": "bci_config", - "readableName": "Time Fixation Stimuli (sec)", + "name": "Time Fixation Stimuli (sec)", "helpTip": "The amount of time in seconds to present the fixation stimuli in an inquiry.", - "recommended_values": "", + "recommended": "", + "editable": true, + "type": "float" + }, + "time_vep_animation": { + "value": "1.0", + "section": "bci_config", + "name": "Time VEP Animation (sec)", + "helpTip": "The amount of time in seconds for the animation moving symbols to boxes.", + "recommended": "", + "editable": true, "type": "float" }, "stim_jitter": { "value": "0.0", "section": "bci_config", - "readableName": "Stimulus Presentation Jitter (sec)", + "name": "Stimulus Presentation Jitter (sec)", "helpTip": "Specifies the time (sec) to jitter presentation rates. Default: 0.0", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "stim_pos_x": { "value": "0", "section": "bci_config", - "readableName": "Stimulus Position Horizontal", + "name": "Stimulus Position Horizontal", "helpTip": "Specifies the center point of the stimulus position along the X axis. Possible values range from -1 to 1, with 0 representing the center. Default: 0", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "stim_pos_y": { "value": "0", "section": "bci_config", - "readableName": "Stimulus Position Vertical", + "name": "Stimulus Position Vertical", "helpTip": "Specifies the center point of the stimulus position along the Y axis. Possible values range from -1 to 1, with 0 representing the center. Default: 0", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "font": { - "value": "Consolas", + "value": "Courier New", "section": "bci_config", - "readableName": "Font", - "helpTip": "Specifies the font used for all text stimuli. Default: Consolas", - "recommended_values": [ + "name": "Font", + "helpTip": "Specifies the font used for all text stimuli. Default: Courier New", + "recommended": [ "Courier New", - "Lucida Sans", - "Consolas" + "Lucida Sans" ], + "editable": true, "type": "str" }, "stim_height": { "value": "0.5", "section": "bci_config", - "readableName": "Stimulus Size", + "name": "Stimulus Size", "helpTip": "Specifies the height of text stimuli. See https://www.psychopy.org/general/units.html. Default: 0.5", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "stim_color": { "value": "white", "section": "bci_config", - "readableName": "Stimulus Color", + "name": "Stimulus Color", "helpTip": "Specifies the color of text stimuli within the RSVP stream. Default: white", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "str" }, "target_color": { "value": "white", "section": "bci_config", - "readableName": "Target Color", + "name": "Target Color", "helpTip": "Specifies the color of target characters during calibration. Default: white", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "str" }, "fixation_color": { "value": "red", "section": "bci_config", - "readableName": "Fixation Cross Color", + "name": "Fixation Cross Color", "helpTip": "Specifies the color of the fixation cross that appears before each inquiry. Default: red", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "str" }, "background_color": { "value": "black", "section": "bci_config", - "readableName": "Task Background Color", + "name": "Task Background Color", "helpTip": "Specifies the color of the task background. Default: black", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "str" }, "info_pos_x": { "value": "0", "section": "bci_config", - "readableName": "Position Text (X)", + "name": "Position Text (X)", "helpTip": "Position Text (X)", - "recommended_values": [ + "recommended": [ "0" ], + "editable": true, "type": "float" }, "info_pos_y": { "value": "-0.75", "section": "bci_config", - "readableName": "Position Text (Y)", + "name": "Position Text (Y)", "helpTip": "Position Text (Y)", - "recommended_values": [ + "recommended": [ "-0.75" ], + "editable": true, "type": "float" }, "info_text": { "value": "", "section": "bci_config", - "readableName": "Text below main presentation", + "name": "Text below main presentation", "helpTip": "Text below main presentation", - "recommended_values": [ + "recommended": [ "", "Demo Text", "DEMO" ], + "editable": true, "type": "str" }, "info_height": { "value": "0.1", "section": "bci_config", - "readableName": "Text below main presentation height", + "name": "Text below main presentation height", "helpTip": "Specifies the height of info text stimuli. See https://www.psychopy.org/general/units.html. Default: 0.1", - "recommended_values": [ + "recommended": [ "0.1" ], + "editable": true, "type": "float" }, "info_color": { "value": "white", "section": "bci_config", - "readableName": "Color Text", + "name": "Color Text", "helpTip": "Color Text", - "recommended_values": [ + "recommended": [ "white", "black", "blue" ], + "editable": true, "type": "str" }, "task_text": { "value": "HELLO_WORLD", "section": "bci_config", - "readableName": "Target Phrase", + "name": "Target Phrase", "helpTip": "Specifies the target phrase displayed at the top of the screen during text-stimuli copy/spelling tasks.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "str" }, "task_height": { "value": "0.1", "section": "bci_config", - "readableName": "Task Text Size", + "name": "Task Text Size", "helpTip": "Specifies the height of task-specific text, e.g. #/100 in calibration and target phrase in copy/spelling. See https://www.psychopy.org/general/units.html. Default: 0.1", - "recommended_values": [ + "recommended": [ "0.1" ], + "editable": true, "type": "float" }, "task_color": { "value": "white", "section": "bci_config", - "readableName": "Task Text Color", + "name": "Task Text Color", "helpTip": "Specifies the color of task-specific text, e.g. #/100 in calibration and target phrase in copy/spelling. Default: white", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "str" }, + "task_padding": { + "value": "0.05", + "section": "bci_config", + "name": "Task Bar Padding", + "helpTip": "Specifies the padding around the task bar text. Default: 0.05", + "recommended": [ + "0.05" + ], + "editable": true, + "type": "float" + }, "stim_number": { "value": "100", "section": "bci_config", - "readableName": "Number of Calibration inquiries", + "name": "Number of Calibration inquiries", "helpTip": "Specifies the number of inquiries to present in a calibration session. Default: 100", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "enable_breaks": { "value": "false", "section": "bci_config", - "readableName": "Automatic Calibration Breaks On/Off", + "name": "Automatic Calibration Breaks", "helpTip": "If ‘true’, automatic breaks will be added to the calibration session. If ‘false’, automatic breaks will not occur, but the session can still be paused by pressing Space.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "bool" }, "break_len": { "value": "30", "section": "bci_config", - "readableName": "Automatic Calibration Break Length", + "name": "Automatic Calibration Break Length", "helpTip": "Specifies the length (in seconds) of automatic calibration breaks (Automatic Calibration Breaks On/Off must be set to ‘true’).", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "trials_before_break": { "value": "20", "section": "bci_config", - "readableName": "Automatic Calibration Break Timing", + "name": "Automatic Calibration Break Timing", "helpTip": "Specifies the number of inquiries between automatic calibration breaks (Automatic Calibration Breaks On/Off must be set to ‘true’).", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "break_message": { "value": "Take a break!", "section": "bci_config", - "readableName": "Automatic Calibration Break Message", + "name": "Automatic Calibration Break Message", "helpTip": "Specifies the message displayed during automatic calibration breaks (Automatic Calibration Breaks On/Off must be set to ‘true’).", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "str" }, - "min_inq_len": { - "value": "1", - "section": "bci_config", - "readableName": "Minimum Inquiry Length", - "helpTip": "The minimum number of inquiries to present in spelling tasks", - "recommended_values": [ - "1" - ], - "type": "int" - }, "max_inq_len": { "value": "50", "section": "bci_config", - "readableName": "Maximum Inquiry Length", + "name": "Maximum Inquiry Length", "helpTip": "Specifies the maximum number of inquiries to present in copy/spelling tasks. The task will end if this number is reached.", - "recommended_values": [ + "recommended": [ "20", "25" ], + "editable": true, "type": "int" }, "max_minutes": { "value": "20", "section": "bci_config", - "readableName": "Maximum Task Length (Time)", + "name": "Maximum Task Length (Time)", "helpTip": "Specifies the time limit (in minutes) for copy/spelling tasks. The task will end if this time limit is reached.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "max_selections": { "value": "25", "section": "bci_config", - "readableName": "Maximum Number of Selections", + "name": "Maximum Number of Selections", "helpTip": "The maximum number of selections for copy/spelling tasks. The task will end if this number is reached.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "decision_threshold": { "value": "0.8", "section": "bci_config", - "readableName": "Decision Threshold", + "name": "Decision Threshold", "helpTip": "Specifies the decision threshold for stimulus selection in copy/spelling class. If the posterior probability (combining EEG and language model evidence) for a stimulus reaches this threshold, it will be selected. Possible value range: 0.0-1.0. Default: 0.8", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "min_inq_per_series": { "value": "1", "section": "bci_config", - "readableName": "Minimum Inquiries Per Series", + "name": "Minimum Inquiries Per Series", "helpTip": "Specifies the minimum number of inquiries to present before making a decision in copy/spelling tasks. Default: 1", - "recommended_values": [ + "recommended": [ "1" ], + "editable": true, "type": "int" }, "max_inq_per_series": { "value": "11", "section": "bci_config", - "readableName": "Maximum Inquiries Per Series", + "name": "Maximum Inquiries Per Series", "helpTip": "Specifies the maximum number of inquiries to present before making a decision in copy/spelling tasks.", - "recommended_values": [ + "recommended": [ "10", "15" ], + "editable": true, "type": "int" }, "backspace_always_shown": { "value": "true", "section": "bci_config", - "readableName": "Always Show Backspace On/Off", + "name": "Always Show Backspace", "helpTip": "If ‘true’, the backspace character will be included in every inquiry in text-stimuli copy/spelling tasks. If ‘false’, the backspace character will be treated the same as other characters, appearing in inquiries only when warranted by EEG/language model evidence.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "bool" }, "spelled_letters_count": { "value": "0", "section": "bci_config", - "readableName": "Pre-Selected Letters Count", + "name": "Pre-Selected Letters Count", "helpTip": "Specifies the number of letters in the target phrase that are already typed when the task begins, e.g. if the target phrase is “THE_DOG”, setting this parameter to ‘4’ would display “THE_” as the typed string, and the user would begin typing with D. If ‘0’, the typed string will be blank.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "int" }, "lang_model_type": { - "value": "MIXTURE", + "value": "UNIFORM", "section": "lang_model_config", - "readableName": "Language Model Type", - "helpTip": "Specifies which language model to use. Default: MIXTURE", - "recommended_values": [ + "name": "Language Model Type", + "helpTip": "Specifies which language model to use. Default: UNIFORM", + "recommended": [ "UNIFORM", "CAUSAL", "KENLM", - "MIXTURE" + "MIXTURE", + "ORACLE" ], + "editable": true, "type": "str" }, "lm_backspace_prob": { - "value": "0.05", + "value": "0.0", "section": "bci_config", - "readableName": "Backspace Probability", - "helpTip": "Specifies the initial probability assigned to the backspace character in the language model. Possible value range: 0.0-1.0. Default: 0.05", - "recommended_values": "", + "name": "Backspace Probability", + "helpTip": "Specifies the minimum probability assigned to the backspace character in the language model. Possible value range: 0.0-1.0. Default: 0.0", + "recommended": "0.05", + "editable": true, "type": "float" }, "show_preview_inquiry": { "value": "false", "section": "bci_config", - "readableName": "Preview Inquiry On/Off", - "helpTip": "If ‘true’, the inquiry will be previewed as applicable for the task. *Note* Not all tasks will have this enabled!", - "recommended_values": "", + "name": "Preview Inquiry Display", + "helpTip": "If ‘true’, the inquiry will be previewed as applicable for the Task. *Note* Not all tasks will have this enabled!", + "recommended": "", + "editable": true, "type": "bool" }, "preview_inquiry_progress_method": { "value": "0", "section": "bci_config", - "readableName": "Preview Inquiry Progression Method", + "name": "Preview Inquiry Progression Method", "helpTip": "If show_preview_inquiry true, this will determine how to proceed after a key hit. 0 = preview only; 1 = press to confirm; 2 = press to skip to another inquiry", - "recommended_values": [ + "recommended": [ "0", "1", "2" ], + "editable": true, "type": "int" }, "preview_inquiry_length": { "value": "5", "section": "bci_config", - "readableName": "Preview Inquiry Display Length", + "name": "Preview Inquiry Display Length", "helpTip": "Length of time in seconds to present an inquiry preview to the user.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "preview_inquiry_key_input": { "value": "return", "section": "bci_config", - "readableName": "Preview Inquiry Display Key Input Method", + "name": "Preview Inquiry Display Key Input Method", "helpTip": "Defines the key used to engage with inquiry preview.", - "recommended_values": [ + "recommended": [ "space", "escape", "return" ], + "editable": true, "type": "str" }, "preview_inquiry_isi": { "value": "1", "section": "bci_config", - "readableName": "Preview Inquiry Inter-Stimulus Interval", + "name": "Preview Inquiry Inter-Stimulus Interval", "helpTip": "The time between previewing an inquiry and the start of an inquiry.", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "show_feedback": { "value": "true", "section": "bci_config", - "readableName": "Feedback On/Off", - "helpTip": "If ‘true’, feedback will be shown after each inquiry.", - "recommended_values": "", + "name": "Feedback Display", + "helpTip": "If ‘true’, feedback will be shown after each inquiry. If ‘false’, feedback will not be shown. *Note* Not all tasks will have this enabled!", + "recommended": "", + "editable": true, "type": "bool" }, "feedback_duration": { "value": "2", "section": "bci_config", - "readableName": "Feedback Time (seconds)", + "name": "Feedback Time (seconds)", "helpTip": "Specifies the length in time (seconds) feedback will be displayed after each inquiry in registered tasks (ex. RSVP Copy Phrase). Default: 2", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "psd_method": { "value": "Welch", "section": "bci_config", - "readableName": "Power Spectral Density Method", + "name": "Power Spectral Density Method", "helpTip": "Specifies the method used to approximate power spectral density bands (Welch or MultiTaper). Default: Welch", - "recommended_values": [ + "recommended": [ "Welch", "MutliTaper" ], + "editable": true, "type": "str" } } \ No newline at end of file diff --git a/bcipy/helpers/tests/test_acquisition.py b/bcipy/helpers/tests/test_acquisition.py index 0bb970857..9daa0ce28 100644 --- a/bcipy/helpers/tests/test_acquisition.py +++ b/bcipy/helpers/tests/test_acquisition.py @@ -1,4 +1,5 @@ """Tests for acquisition helper.""" +import logging import shutil import unittest from pathlib import Path @@ -7,9 +8,8 @@ from bcipy.acquisition.devices import DeviceSpec, DeviceStatus from bcipy.config import DEFAULT_PARAMETERS_PATH from bcipy.helpers.acquisition import (RAW_DATA_FILENAME, StreamType, - active_content_types, init_device, - init_eeg_acquisition, - is_stream_type_active, + active_content_types, init_acquisition, + init_device, is_stream_type_active, max_inquiry_duration, parse_stream_type, raw_data_filename, server_spec, stream_types) @@ -38,12 +38,14 @@ def tearDown(self): shutil.rmtree(self.save) def test_init_acquisition(self): - """Test init_eeg_acquisition with LSL client.""" + """Test init_acquisition with LSL client.""" params = self.parameters + logger = Mock(spec=logging.Logger) + logger.info = lambda x: x params['acq_mode'] = 'EEG:passive/DSI-24' - client, servers = init_eeg_acquisition(params, self.save, server=True) + client, servers = init_acquisition(params, self.save, server=True) client.stop_acquisition() client.cleanup() diff --git a/bcipy/helpers/tests/test_load.py b/bcipy/helpers/tests/test_load.py index 8d0f9cb21..ceaaf6890 100644 --- a/bcipy/helpers/tests/test_load.py +++ b/bcipy/helpers/tests/test_load.py @@ -11,8 +11,7 @@ from bcipy.config import (DEFAULT_ENCODING, DEFAULT_EXPERIMENT_PATH, DEFAULT_FIELD_PATH, DEFAULT_PARAMETERS_PATH, EXPERIMENT_FILENAME, FIELD_FILENAME) -from bcipy.helpers.exceptions import (BciPyCoreException, - InvalidExperimentException) +from bcipy.exceptions import BciPyCoreException, InvalidExperimentException from bcipy.helpers.load import (choose_signal_model, choose_signal_models, copy_parameters, extract_mode, load_experiment_fields, load_experiments, @@ -155,6 +154,12 @@ def setUp(self): self.directory_name = 'test_data_load_user' self.data_save_loc = f'{self.directory_name}/' + def tearDown(self): + try: + shutil.rmtree(self.data_save_loc) + except FileNotFoundError: + pass + def test_user_load_with_no_directory_written(self): """Use defined data save location without writing anything""" response = load_users(self.data_save_loc) @@ -163,7 +168,7 @@ def test_user_load_with_no_directory_written(self): def test_user_load_with_valid_directory(self): user = 'user_001' - file_path = f'{self.directory_name}/experiment/{user}' + file_path = f'{self.directory_name}/{user}/experiment' os.makedirs(file_path) response = load_users(self.data_save_loc) @@ -174,36 +179,33 @@ def test_user_load_with_valid_directory(self): # assert user returned is user defined above self.assertEqual(response[0], user) - shutil.rmtree(self.data_save_loc) def test_user_load_with_invalid_directory(self): # create an invalid save structure and assert expected behavior. - user = 'user_001' - file_path = f'{self.directory_name}/experiment{user}' + file_path = f'{self.directory_name}/' os.makedirs(file_path) response = load_users(self.data_save_loc) length_of_users = len(response) self.assertTrue(length_of_users == 0) - shutil.rmtree(self.data_save_loc) class TestExtractMode(unittest.TestCase): def test_extract_mode_calibration(self): - data_save_path = 'data/default/user/user_RSVP_Calibration_Mon_01_Mar_2021_11hr19min49sec_-0800' + data_save_path = 'data/user/default/user_RSVP_Calibration_Mon_01_Mar_2021_11hr19min49sec_-0800' expected_mode = 'calibration' response = extract_mode(data_save_path) self.assertEqual(expected_mode, response) def test_extract_mode_copy_phrase(self): - data_save_path = 'data/default/user/user_RSVP_Copy_Phrase_Mon_01_Mar_2021_11hr19min49sec_-0800' + data_save_path = 'data/user/default/user_RSVP_Copy_Phrase_Mon_01_Mar_2021_11hr19min49sec_-0800' expected_mode = 'copy_phrase' response = extract_mode(data_save_path) self.assertEqual(expected_mode, response) def test_extract_mode_without_mode_defined(self): - invalid_data_save_dir = 'data/default/user/user_bad_dir' + invalid_data_save_dir = 'data/user/default/user_bad_dir' with self.assertRaises(BciPyCoreException): extract_mode(invalid_data_save_dir) diff --git a/bcipy/helpers/tests/test_parameters.py b/bcipy/helpers/tests/test_parameters.py index c46b20088..58b0acbd9 100644 --- a/bcipy/helpers/tests/test_parameters.py +++ b/bcipy/helpers/tests/test_parameters.py @@ -52,19 +52,21 @@ def test_load_data(self): "fake_data": { "value": "true", "section": "bci_config", - "readableName": "Fake EEG Data On/Off", + "name": "Fake EEG Data On/Off", "helpTip": "If ‘true’, fake EEG data will be used instead of real EEG data.", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "bool" }, "acq_device": { "value": "LSL", "section": "acq_config", - "readableName": "Acquisition Device Connection Method", + "name": "Acquisition Device Connection Method", "helpTip": "Specifies the method used to connect to the data acquisition device (LSL or DSI).", - "recommended_values": ["DSI", "LSL"], + "recommended": ["DSI", "LSL"], + "editable": True, "type": "str" } } @@ -78,8 +80,9 @@ def test_load_data_with_missing_field(self): "fake_data": { "value": "true", "section": "bci_config", - "readableName": "Fake EEG Data On/Off", - "recommended_values": "", + "name": "Fake EEG Data On/Off", + "recommended": "", + "editable": True, "type": "bool" } } @@ -93,9 +96,10 @@ def test_load_data_with_unsupported_type(self): "fake_data": { "value": "true", "section": "bci_config", - "readableName": "Fake EEG Data On/Off", + "name": "Fake EEG Data On/Off", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "custom_type" } } @@ -126,41 +130,46 @@ def test_cast_values(self): "myint": { "value": "1", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "int" }, "mybool": { "value": "true", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "bool" }, "mypath": { "value": "bcipy/parameters/parameters.json", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "directorypath" }, "mystr": { "value": "hello", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "str" }, "my_int_range": { "value": "5:10", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "range" } } @@ -194,9 +203,10 @@ def test_setting_valid_values(self): parameters['mystr'] = { "value": "hello", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "str" } self.assertEqual(len(parameters), 1) @@ -206,16 +216,18 @@ def test_setting_invalid_values(self): missing_help_tip = { "value": "true", "section": "", - "readableName": "", - "recommended_values": "", + "name": "", + "recommended": "", + "editable": True, "type": "bool" } unsupported_type = { "value": "true", "section": "bci_config", - "readableName": "Fake EEG Data On/Off", + "name": "Fake EEG Data On/Off", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "custom_type" } @@ -234,9 +246,10 @@ def test_updating_uncast_values(self): parameters['mystr'] = { "value": "hello", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "str" } parameters['mystr']['value'] = 'hello world' @@ -248,25 +261,28 @@ def test_setting_existing_cast_values(self): "acq_port": { "value": "8000", "section": "acquisition", - "readableName": "Acquisition Port", + "name": "Acquisition Port", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "int" }, "acq_device": { "value": "LSL", "section": "acquisition", - "readableName": "Acquisition Device", + "name": "Acquisition Device", "helpTip": "", - "recommended_values": ["LSL", "DSI"], + "recommended": ["LSL", "DSI"], + "editable": True, "type": "str" }, "is_txt_stim": { "value": "false", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "bool" } } @@ -293,17 +309,19 @@ def test_update(self): "acq_port": { "value": "8000", "section": "acquisition", - "readableName": "Acquisition Port", + "name": "Acquisition Port", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "int" }, "acq_device": { "value": "LSL", "section": "acquisition", - "readableName": "Acquisition Device", + "name": "Acquisition Device", "helpTip": "", - "recommended_values": ["LSL", "DSI"], + "recommended": ["LSL", "DSI"], + "editable": True, "type": "str" } } @@ -326,17 +344,19 @@ def test_values(self): "acq_port": { "value": "8000", "section": "acquisition", - "readableName": "Acquisition Port", + "name": "Acquisition Port", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "int" }, "acq_device": { "value": "LSL", "section": "acquisition", - "readableName": "Acquisition Device", + "name": "Acquisition Device", "helpTip": "", - "recommended_values": ["LSL", "DSI"], + "recommended": ["LSL", "DSI"], + "editable": True, "type": "str" } } @@ -391,9 +411,10 @@ def test_save_new(self): parameters['mystr'] = { "value": "hello", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "str" } with self.assertRaises(Exception): @@ -413,9 +434,10 @@ def test_items(self): parameters['mystr'] = { "value": "hello", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "str" } self.assertEqual(len(parameters.items()), 1) @@ -445,9 +467,10 @@ def test_check_entry(self): "fake_data", { "value": "true", "section": "bci_config", - "readableName": "Fake Data Sessions", + "name": "Fake Data Sessions", "helpTip": "If true, fake data server used", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "bool" }) with self.assertRaises(Exception): @@ -461,9 +484,10 @@ def test_check_entry_bool_type(self): "fake_data", { "value": True, "section": "bci_config", - "readableName": "Fake Data Sessions", + "name": "Fake Data Sessions", "helpTip": "If true, fake data server used", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "bool" }) @@ -487,17 +511,19 @@ def test_add_missing(self): entry1 = { "value": "8000", "section": "acquisition", - "readableName": "Acquisition Port", + "name": "Acquisition Port", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "int" } entry2 = { "value": "LSL", "section": "acquisition", - "readableName": "Acquisition Device", + "name": "Acquisition Device", "helpTip": "", - "recommended_values": ["LSL", "DSI"], + "recommended": ["LSL", "DSI"], + "editable": True, "type": "str" } @@ -519,41 +545,46 @@ def test_changed_parameters(self): entry1 = { "value": "8000", "section": "acquisition", - "readableName": "Acquisition Port", + "name": "Acquisition Port", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "int" } entry2 = { "value": "75E+6", "section": "artifact_rejection", - "readableName": "High Voltage Threshold Value", + "name": "High Voltage Threshold Value", "helpTip": "Specifies the high voltage threshold (in microvolts)", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "float" } entry2_same = { "value": "75000000.0", "section": "artifact_rejection", - "readableName": "High Voltage Threshold Value", + "name": "High Voltage Threshold Value", "helpTip": "Specifies the high voltage threshold (in microvolts)", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "float" } entry3 = { "value": "DSI-24", "section": "acquisition", - "readableName": "Acquisition Device", + "name": "Acquisition Device", "helpTip": "", - "recommended_values": ["DSI-24", "DSI-VR300"], + "recommended": ["DSI-24", "DSI-VR300"], + "editable": True, "type": "str" } entry3_modified = { "value": "DSI-VR300", "section": "acquisition", - "readableName": "Acquisition Device", + "name": "Acquisition Device", "helpTip": "", - "recommended_values": ["DSI-24", "DSI-VR300"], + "recommended": ["DSI-24", "DSI-VR300"], + "editable": True, "type": "str" } parameters = Parameters(source=None) @@ -583,25 +614,28 @@ def test_instantiate_tuple(self): "a": { "value": "1", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": "", + "recommended": "", + "editable": True, "type": "int" }, "b": { "value": "2", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": [], + "recommended": [], + "editable": True, "type": "int" }, "c": { "value": "3", "section": "", - "readableName": "", + "name": "", "helpTip": "", - "recommended_values": [], + "recommended": [], + "editable": True, "type": "str" } } diff --git a/bcipy/helpers/tests/test_raw_data.py b/bcipy/helpers/tests/test_raw_data.py index 803697915..12fc67bb5 100644 --- a/bcipy/helpers/tests/test_raw_data.py +++ b/bcipy/helpers/tests/test_raw_data.py @@ -10,7 +10,7 @@ from mockito import any, mock, when, verify, unstub -from bcipy.helpers.exceptions import BciPyCoreException +from bcipy.exceptions import BciPyCoreException from bcipy.helpers.raw_data import (RawData, RawDataReader, RawDataWriter, load, sample_data, settings, write) diff --git a/bcipy/helpers/tests/test_report.py b/bcipy/helpers/tests/test_report.py index 7df690816..3d28833d4 100644 --- a/bcipy/helpers/tests/test_report.py +++ b/bcipy/helpers/tests/test_report.py @@ -30,7 +30,8 @@ def test_init_no_name_default(self): self.assertEqual(report.name, Report.DEFAULT_NAME) def test_init_sections(self): - report_section = SessionReportSection() + summary = {} + report_section = SessionReportSection(summary) section = [report_section] report = Report(self.temp_dir, sections=section) self.assertEqual(report.sections, section) @@ -64,7 +65,8 @@ def test_add_section(self): def test_save(self): report = Report(self.temp_dir) - report_section = SessionReportSection() + summary = {} + report_section = SessionReportSection(summary) report.add(report_section) report.save() self.assertTrue(os.path.exists(os.path.join(self.temp_dir, report.name))) @@ -113,7 +115,8 @@ def setUp(self) -> None: } def test_init(self): - report_section = SessionReportSection() + summary = {} + report_section = SessionReportSection(summary) self.assertIsInstance(report_section, ReportSection) self.assertIsNotNone(report_section.style) @@ -124,7 +127,8 @@ def test_create_summary_text(self): self.assertIsInstance(table, Flowable) def test_create_header(self): - report_section = SessionReportSection() + summary = {} + report_section = SessionReportSection(summary) header = report_section._create_header() self.assertIsInstance(header, Paragraph) diff --git a/bcipy/helpers/tests/test_stimuli.py b/bcipy/helpers/tests/test_stimuli.py index 201951410..2c1dc5b2f 100644 --- a/bcipy/helpers/tests/test_stimuli.py +++ b/bcipy/helpers/tests/test_stimuli.py @@ -9,7 +9,7 @@ from mockito import any, mock, unstub, verify, when from psychopy import core -from bcipy.helpers.exceptions import BciPyCoreException +from bcipy.exceptions import BciPyCoreException from bcipy.helpers.stimuli import (DEFAULT_FIXATION_PATH, InquiryReshaper, StimuliOrder, TargetPositions, TrialReshaper, alphabetize, diff --git a/bcipy/helpers/tests/test_triggers.py b/bcipy/helpers/tests/test_triggers.py index 9b9d2a59a..478015733 100644 --- a/bcipy/helpers/tests/test_triggers.py +++ b/bcipy/helpers/tests/test_triggers.py @@ -5,7 +5,7 @@ import psychopy from mockito import any, mock, unstub, verify, when -from bcipy.helpers.exceptions import BciPyCoreException +from bcipy.exceptions import BciPyCoreException from bcipy.helpers.triggers import (FlushFrequency, Trigger, TriggerHandler, TriggerType, _calibration_trigger, apply_offsets, exclude_types, diff --git a/bcipy/helpers/tests/test_validate.py b/bcipy/helpers/tests/test_validate.py index dc398178d..0c7c6a8f6 100644 --- a/bcipy/helpers/tests/test_validate.py +++ b/bcipy/helpers/tests/test_validate.py @@ -2,8 +2,7 @@ from bcipy.config import DEFAULT_EXPERIMENT_ID from bcipy.helpers.validate import validate_experiment, validate_experiments -from bcipy.helpers.save import save_experiment_data -from bcipy.helpers.exceptions import ( +from bcipy.exceptions import ( InvalidExperimentException, InvalidFieldException, UnregisteredExperimentException, @@ -26,24 +25,6 @@ def test_validate_experiment_throws_unregistered_expection_on_unregistered_exper with self.assertRaises(UnregisteredExperimentException): validate_experiment(experiment_name) - def test_save_experiment_data_throws_unregistered_exception_on_unregistered_fields(self): - # create a fake experiment to load - experiment_name = 'test' - fields = { - 'registered_field': { - 'help_text': 'test', - 'type': 'int' - } - } - experiment = { - experiment_name: { - 'fields': [{'does_not_exist': {'required': 'false', 'anonymize': 'true'}}], 'summary': ''} - } - - # assert it raises the expected exception - with self.assertRaises(UnregisteredFieldException): - save_experiment_data(experiment, fields, '.', 'test_experiment.json') - def test_validate_experiment_throws_file_not_found_with_incorrect_experiment_path(self): # define an invalid path path = 'does/not/exist' @@ -71,7 +52,9 @@ class TestValidateExperiments(unittest.TestCase): } experiments = { experiment_name: { - 'fields': [{'registered_field': {'required': 'false', 'anonymize': 'true'}}], 'summary': ''} + 'fields': [{'registered_field': {'required': 'false', 'anonymize': 'true'}}], + 'summary': '', + 'protocol': ''} } def test_validate_experiments_returns_true_on_valid_experiment(self): @@ -82,7 +65,8 @@ def test_validate_experiments_returns_true_on_valid_experiment(self): def test_validate_experiments_throws_invalid_experiment_exception_on_invalid_experiment_no_field(self): experiments = { 'invalid': { - 'summary': ''} + 'summary': '', + 'protocol': ''} } with self.assertRaises(InvalidExperimentException): validate_experiments(experiments, self.fields) @@ -91,7 +75,7 @@ def test_validate_experiments_throws_invalid_experiment_exception_on_invalid_exp experiments = { 'invalid': { 'summary': '', - 'fields': 'should_be_list!'} + 'fields': 'should_be_list!', 'protocol': ''} } with self.assertRaises(InvalidExperimentException): validate_experiments(experiments, self.fields) @@ -100,7 +84,7 @@ def test_validate_experiments_throws_invalid_experiment_exception_on_invalid_exp experiments = { 'invalid': { 'summary': [], - 'fields': []} + 'fields': [], 'protocol': ''} } with self.assertRaises(InvalidExperimentException): validate_experiments(experiments, self.fields) @@ -108,7 +92,7 @@ def test_validate_experiments_throws_invalid_experiment_exception_on_invalid_exp def test_validate_experiments_throws_invalid_experiment_exception_on_invalid_experiment_no_summary(self): experiments = { 'invalid': { - 'fields': []} + 'fields': [], 'protocol': ''} } with self.assertRaises(InvalidExperimentException): validate_experiments(experiments, self.fields) @@ -116,7 +100,7 @@ def test_validate_experiments_throws_invalid_experiment_exception_on_invalid_exp def test_validate_experiments_throws_invalid_field_exception_on_invalid_field_no_required(self): experiments = { self.experiment_name: { - 'fields': [{'registered_field': {'anonymize': 'true'}}], 'summary': ''} + 'fields': [{'registered_field': {'anonymize': 'true'}}], 'summary': '', 'protocol': ''} } with self.assertRaises(InvalidFieldException): validate_experiments(experiments, self.fields) @@ -124,7 +108,7 @@ def test_validate_experiments_throws_invalid_field_exception_on_invalid_field_no def test_validate_experiments_throws_invalid_field_exception_on_invalid_field_no_anonymize(self): experiments = { self.experiment_name: { - 'fields': [{'registered_field': {'required': 'true'}}], 'summary': ''} + 'fields': [{'registered_field': {'required': 'true'}}], 'summary': '', 'protocol': ''} } with self.assertRaises(InvalidFieldException): validate_experiments(experiments, self.fields) @@ -132,7 +116,9 @@ def test_validate_experiments_throws_invalid_field_exception_on_invalid_field_no def test_validate_experiments_throws_unregistered_exception_on_unregistered_fields(self): experiment = { self.experiment_name: { - 'fields': [{'does_not_exist': {'required': 'false', 'anonymize': 'true'}}], 'summary': ''} + 'fields': [{'does_not_exist': {'required': 'false', 'anonymize': 'true'}}], + 'summary': '', + 'protocol': ''} } # assert it raises the expected exception diff --git a/bcipy/helpers/triggers.py b/bcipy/helpers/triggers.py index 259b46d69..47624ffc3 100644 --- a/bcipy/helpers/triggers.py +++ b/bcipy/helpers/triggers.py @@ -5,13 +5,13 @@ from psychopy import core, visual -from bcipy.config import DEFAULT_ENCODING +from bcipy.config import DEFAULT_ENCODING, SESSION_LOG_FILENAME from bcipy.helpers.clock import Clock -from bcipy.helpers.exceptions import BciPyCoreException +from bcipy.exceptions import BciPyCoreException from bcipy.helpers.parameters import Parameters from bcipy.helpers.stimuli import resize_image -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) NONE_VALUES = ['0', '0.0'] diff --git a/bcipy/helpers/validate.py b/bcipy/helpers/validate.py index d9d47a4d9..7e6e75520 100644 --- a/bcipy/helpers/validate.py +++ b/bcipy/helpers/validate.py @@ -7,11 +7,12 @@ FIELD_FILENAME) from bcipy.helpers.load import load_experiments, load_fields from bcipy.helpers.system_utils import is_battery_powered, is_connected, is_screen_refresh_rate_low -from bcipy.helpers.exceptions import (InvalidFieldException, - InvalidExperimentException, - UnregisteredExperimentException, - UnregisteredFieldException) +from bcipy.exceptions import (InvalidFieldException, + InvalidExperimentException, + UnregisteredExperimentException, + UnregisteredFieldException) from bcipy.gui.alert import confirm +from bcipy.task.orchestrator.protocol import validate_protocol_string def validate_bcipy_session(parameters: dict, fake_data: bool) -> bool: @@ -46,7 +47,7 @@ def validate_experiment( experiment_name: str, experiment_path: str = f'{DEFAULT_EXPERIMENT_PATH}/{EXPERIMENT_FILENAME}', field_path: str = f'{DEFAULT_FIELD_PATH}/{FIELD_FILENAME}' -) -> bool: +) -> dict: """Validate Experiment. Validate the experiment is in the correct format and the fields are properly registered. @@ -65,7 +66,7 @@ def validate_experiment( _validate_experiment_format(experiment, experiment_name) _validate_experiment_fields(experiment['fields'], fields) - return True + return experiment def _validate_experiment_fields(experiment_fields, fields): @@ -118,6 +119,9 @@ def _validate_experiment_format(experiment, name): assert isinstance(exp_summary, str) experiment_fields = experiment['fields'] assert isinstance(experiment_fields, list) + protocol = experiment['protocol'] + if protocol: + validate_protocol_string(protocol) except KeyError: raise InvalidExperimentException( f'Experiment [{name}] is formatted incorrectly. It should contain the keys: summary and fields.') diff --git a/bcipy/helpers/visualization.py b/bcipy/helpers/visualization.py index 38815a95d..2c4402736 100644 --- a/bcipy/helpers/visualization.py +++ b/bcipy/helpers/visualization.py @@ -9,6 +9,7 @@ import numpy as np import pandas as pd import seaborn as sns + from matplotlib.figure import Figure from matplotlib.patches import Ellipse from mne import Epochs @@ -18,10 +19,11 @@ import bcipy.acquisition.devices as devices from bcipy.config import (DEFAULT_DEVICE_SPEC_FILENAME, DEFAULT_GAZE_IMAGE_PATH, RAW_DATA_FILENAME, - TRIGGER_FILENAME) + TRIGGER_FILENAME, SESSION_LOG_FILENAME, + DEFAULT_PARAMETERS_PATH) from bcipy.helpers.acquisition import analysis_channels from bcipy.helpers.convert import convert_to_mne -from bcipy.helpers.load import choose_csv_file, load_raw_data +from bcipy.helpers.load import choose_csv_file, load_raw_data, load_json_parameters from bcipy.helpers.parameters import Parameters from bcipy.helpers.raw_data import RawData from bcipy.helpers.stimuli import mne_epochs @@ -29,7 +31,7 @@ from bcipy.signal.process import (Composition, ERPTransformParams, get_default_transform) -log = logging.getLogger(__name__) +logger = logging.getLogger(SESSION_LOG_FILENAME) def clip_to_display(data, screen_limits): @@ -597,7 +599,7 @@ def visualize_csv_eeg_triggers(trigger_col: Optional[int] = None): plt.ylabel('Trigger Value') plt.xlabel('Samples') - log.info('Press Ctrl + C to exit!') + logger.info('Press Ctrl + C to exit!') # Show us the figure! Depending on your OS / IDE this may not close when # The window is closed, see the message above plt.show() @@ -666,7 +668,11 @@ def visualize_evokeds(epochs: Tuple[Epochs, Epochs], return fig -def visualize_session_data(session_path: str, parameters: Union[dict, Parameters], show=True) -> Figure: +def visualize_session_data( + session_path: str, + parameters: Union[dict, Parameters], + show=True, + save=True) -> Figure: """Visualize Session Data. This method is used to load and visualize EEG data after a session. @@ -683,6 +689,7 @@ def visualize_session_data(session_path: str, parameters: Union[dict, Parameters Output: Figure of Session Data """ + logger.info(f"Visualizing session data at {session_path}") # extract all relevant parameters trial_window = parameters.get("trial_window") @@ -728,7 +735,7 @@ def visualize_session_data(session_path: str, parameters: Union[dict, Parameters transform=default_transform, plot_average=True, plot_topomaps=True, - save_path=session_path, + save_path=session_path if save else None, show=show, ) @@ -752,3 +759,34 @@ def visualize_gaze_accuracies(accuracy_dict: Dict[str, np.ndarray], ax.set_title('Overall Accuracy: ' + str(round(accuracy, 2))) return fig + + +def erp(): + import argparse + + parser = argparse.ArgumentParser(description='Visualize ERP data') + + parser.add_argument( + '-s', '--session_path', + type=str, + help='Path to the session directory', + required=True) + parser.add_argument( + '-p', '--parameters', + type=str, + help='Path to the parameters file', + default=DEFAULT_PARAMETERS_PATH) + parser.add_argument( + '--show', + action='store_true', + help='Whether to show the figure', + default=False) + parser.add_argument( + '--save', + action='store_true', + help='Whether to save the figure', default=True) + + args = parser.parse_args() + + parameters = load_json_parameters(args.parameters, value_cast=True) + visualize_session_data(args.session_path, parameters, args.show, args.save) diff --git a/bcipy/language/demo/demo_kenlm.py b/bcipy/language/demo/demo_kenlm.py index d613c8aa1..d90421e42 100644 --- a/bcipy/language/demo/demo_kenlm.py +++ b/bcipy/language/demo/demo_kenlm.py @@ -4,7 +4,7 @@ from bcipy.helpers.symbols import alphabet from bcipy.language.main import ResponseType from bcipy.config import LM_PATH -from bcipy.helpers.exceptions import KenLMInstallationException +from bcipy.exceptions import KenLMInstallationException try: import kenlm diff --git a/bcipy/language/main.py b/bcipy/language/main.py index 84230c417..8a1922338 100644 --- a/bcipy/language/main.py +++ b/bcipy/language/main.py @@ -4,7 +4,7 @@ from typing import List, Optional, Tuple import json -from bcipy.helpers.exceptions import UnsupportedResponseType +from bcipy.exceptions import UnsupportedResponseType from bcipy.helpers.symbols import DEFAULT_SYMBOL_SET from bcipy.config import DEFAULT_LM_PARAMETERS_PATH diff --git a/bcipy/language/model/causal.py b/bcipy/language/model/causal.py index 6e7aa2b51..677215369 100644 --- a/bcipy/language/model/causal.py +++ b/bcipy/language/model/causal.py @@ -8,7 +8,7 @@ from bcipy.helpers.symbols import BACKSPACE_CHAR, SPACE_CHAR from bcipy.language.main import LanguageModel, ResponseType -from bcipy.helpers.exceptions import InvalidLanguageModelException +from bcipy.exceptions import InvalidLanguageModelException from scipy.special import logsumexp from scipy.special import softmax diff --git a/bcipy/language/model/kenlm.py b/bcipy/language/model/kenlm.py index 8da4ea8c8..1c95241e7 100644 --- a/bcipy/language/model/kenlm.py +++ b/bcipy/language/model/kenlm.py @@ -2,7 +2,7 @@ from typing import Optional, List, Tuple from bcipy.helpers.symbols import BACKSPACE_CHAR, SPACE_CHAR from bcipy.language.main import LanguageModel, ResponseType -from bcipy.helpers.exceptions import InvalidLanguageModelException, KenLMInstallationException +from bcipy.exceptions import InvalidLanguageModelException, KenLMInstallationException from bcipy.config import LM_PATH try: import kenlm diff --git a/bcipy/language/model/mixture.py b/bcipy/language/model/mixture.py index 8a7bd2548..2220c029a 100644 --- a/bcipy/language/model/mixture.py +++ b/bcipy/language/model/mixture.py @@ -4,7 +4,7 @@ from bcipy.language.main import LanguageModel, ResponseType -from bcipy.helpers.exceptions import InvalidLanguageModelException +from bcipy.exceptions import InvalidLanguageModelException # pylint: disable=unused-import # flake8: noqa diff --git a/bcipy/language/model/oracle.py b/bcipy/language/model/oracle.py index 42b5cc560..a23e4789e 100644 --- a/bcipy/language/model/oracle.py +++ b/bcipy/language/model/oracle.py @@ -4,11 +4,12 @@ import numpy as np +from bcipy.config import SESSION_LOG_FILENAME from bcipy.helpers.symbols import BACKSPACE_CHAR from bcipy.language.main import LanguageModel, ResponseType from bcipy.language.model.uniform import equally_probable -logger = logging.getLogger() +logger = logging.getLogger(SESSION_LOG_FILENAME) TARGET_BUMP_MIN = 0.0 TARGET_BUMP_MAX = 0.95 diff --git a/bcipy/language/model/unigram.py b/bcipy/language/model/unigram.py index 4222898c2..9e61e213c 100644 --- a/bcipy/language/model/unigram.py +++ b/bcipy/language/model/unigram.py @@ -1,7 +1,7 @@ from typing import Optional, List, Tuple from bcipy.helpers.symbols import BACKSPACE_CHAR, SPACE_CHAR from bcipy.language.main import LanguageModel, ResponseType -from bcipy.helpers.exceptions import InvalidLanguageModelException +from bcipy.exceptions import InvalidLanguageModelException import json from bcipy.config import LM_PATH diff --git a/bcipy/language/tests/test_causal.py b/bcipy/language/tests/test_causal.py index 561b3a03a..b1d7ed191 100644 --- a/bcipy/language/tests/test_causal.py +++ b/bcipy/language/tests/test_causal.py @@ -4,7 +4,7 @@ import unittest from operator import itemgetter -from bcipy.helpers.exceptions import UnsupportedResponseType, InvalidLanguageModelException +from bcipy.exceptions import UnsupportedResponseType, InvalidLanguageModelException from bcipy.helpers.symbols import alphabet, BACKSPACE_CHAR, SPACE_CHAR from bcipy.language.model.causal import CausalLanguageModel from bcipy.language.main import ResponseType diff --git a/bcipy/language/tests/test_kenlm.py b/bcipy/language/tests/test_kenlm.py index 1e0a81e36..a0cc4805b 100644 --- a/bcipy/language/tests/test_kenlm.py +++ b/bcipy/language/tests/test_kenlm.py @@ -5,7 +5,7 @@ import os from operator import itemgetter -from bcipy.helpers.exceptions import UnsupportedResponseType, InvalidLanguageModelException +from bcipy.exceptions import UnsupportedResponseType, InvalidLanguageModelException from bcipy.helpers.symbols import alphabet, BACKSPACE_CHAR, SPACE_CHAR from bcipy.language.model.kenlm import KenLMLanguageModel from bcipy.language.main import ResponseType diff --git a/bcipy/language/tests/test_mixture.py b/bcipy/language/tests/test_mixture.py index 6f383c8f7..53c160212 100644 --- a/bcipy/language/tests/test_mixture.py +++ b/bcipy/language/tests/test_mixture.py @@ -5,7 +5,7 @@ import os from operator import itemgetter -from bcipy.helpers.exceptions import UnsupportedResponseType, InvalidLanguageModelException +from bcipy.exceptions import UnsupportedResponseType, InvalidLanguageModelException from bcipy.helpers.symbols import alphabet, BACKSPACE_CHAR, SPACE_CHAR from bcipy.language.model.mixture import MixtureLanguageModel from bcipy.language.main import ResponseType diff --git a/bcipy/language/tests/test_unigram.py b/bcipy/language/tests/test_unigram.py index 858849aa8..7a07b002e 100644 --- a/bcipy/language/tests/test_unigram.py +++ b/bcipy/language/tests/test_unigram.py @@ -4,7 +4,7 @@ import unittest import os -from bcipy.helpers.exceptions import UnsupportedResponseType, InvalidLanguageModelException +from bcipy.exceptions import UnsupportedResponseType, InvalidLanguageModelException from bcipy.helpers.symbols import alphabet, BACKSPACE_CHAR from bcipy.language.model.unigram import UnigramLanguageModel from bcipy.language.main import ResponseType diff --git a/bcipy/main.py b/bcipy/main.py index ea4ca3d7d..8d7d47b97 100644 --- a/bcipy/main.py +++ b/bcipy/main.py @@ -1,221 +1,110 @@ import argparse import logging import multiprocessing -from typing import List, Optional +from typing import Optional, Type -from psychopy import visual - -from bcipy.acquisition import ClientManager, LslDataServer -from bcipy.config import (DEFAULT_EXPERIMENT_ID, DEFAULT_PARAMETERS_PATH, - STATIC_AUDIO_PATH) -from bcipy.display import init_display_window -from bcipy.helpers.acquisition import (active_content_types, - init_eeg_acquisition) -from bcipy.helpers.language_model import init_language_model -from bcipy.helpers.load import (choose_signal_models, load_experiments, - load_json_parameters) -from bcipy.helpers.save import init_save_data_structure -from bcipy.helpers.session import collect_experiment_field_data -from bcipy.helpers.stimuli import play_sound -from bcipy.helpers.system_utils import configure_logger, get_system_info -from bcipy.helpers.task import print_message +from bcipy.config import CUSTOM_TASK_EXPERIMENT_ID, DEFAULT_PARAMETERS_PATH +from bcipy.exceptions import BciPyCoreException +from bcipy.helpers.load import load_experiments, load_json_parameters from bcipy.helpers.validate import validate_bcipy_session, validate_experiment -from bcipy.helpers.visualization import visualize_session_data -from bcipy.task import TaskType -from bcipy.task.start_task import start_task +from bcipy.task import Task, TaskRegistry +from bcipy.task.orchestrator import SessionOrchestrator +from bcipy.task.orchestrator.protocol import parse_protocol -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) def bci_main( parameter_location: str, user: str, - task: TaskType, - experiment: str = DEFAULT_EXPERIMENT_ID, + experiment_id: Optional[str] = None, alert: bool = False, visualize: bool = True, - fake: bool = False) -> bool: + fake: bool = False, + task: Optional[Type[Task]] = None) -> bool: """BCI Main. The BCI main function will initialize a save folder, construct needed information and execute the task. This is the main connection between any UI and running the app. + A Task or Experiment ID must be provided to run the task. If a task is provided, the experiment + ID will be ignored. + It may also be invoked via tha command line. Ex. `bcipy` this will default parameters, mode, user, and type. You can pass it those attributes with flags, if desired. - Ex. `bcipy --user "bci_user" --task "RSVP Calibration" --experiment "default" + Ex. `bcipy --user "bci_user" --task "RSVP Calibration" + Input: parameter_location (str): location of parameters file to use user (str): name of the user - task (TaskType): registered bcipy TaskType - experiment_id (str): Name of the experiment. Default name is DEFAULT_EXPERIMENT_ID. + experiment_id (str): Name of the experiment. If task is provided, this will be ignored. alert (bool): whether to alert the user when the task is complete visualize (bool): whether to visualize data at the end of a task fake (bool): whether to use fake acquisition data during the session. If None, the fake data will be determined by the parameters file. + task (Task): registered bcipy Task to execute. If None, the task will be determined by the + experiment protocol. """ - validate_experiment(experiment) + logger.info('Starting BciPy...') + logger.info( + f'User: {user} | Experiment: {experiment_id} | Task: {task} | ' + f'Parameters: {parameter_location} | ' + f'Alert: {alert} | Visualize: {visualize} | Fake: {fake}') + # If no task is provided, extract the tasks from the experiment protocol. Otherwise, we will assume + # the task is a custom task execution with no experiment attached. + if not task and experiment_id: + experiment = validate_experiment(experiment_id) + # extract protocol from experiment + tasks = parse_protocol(experiment['protocol']) + elif task: + tasks = [task] + experiment_id = CUSTOM_TASK_EXPERIMENT_ID + else: + msg = 'No experiment or task provided to BciPy.' + logger.exception(msg) + raise BciPyCoreException(msg) + # Load parameters parameters = load_json_parameters(parameter_location, value_cast=True) # cli overrides parameters file for fake data if provided fake = fake if fake is True else parameters['fake_data'] + parameters['fake_data'] = fake if not validate_bcipy_session(parameters, fake): return False - # Update property to reflect the parameter source + # Update property to reflect the parameter source: parameters['parameter_location'] = parameter_location if parameter_location != DEFAULT_PARAMETERS_PATH: parameters.save() - default_params = load_json_parameters(DEFAULT_PARAMETERS_PATH, value_cast=True) - if parameters.add_missing_items(default_params): - msg = 'Parameters file out of date.' - log.exception(msg) - raise Exception(msg) - - # update our parameters file with system related information - sys_info = get_system_info() - - # Initialize Save Folder - save_folder = init_save_data_structure( - parameters['data_save_loc'], - user, - parameter_location, - task=task.label, - experiment_id=experiment) - - # configure bcipy session logging - configure_logger(save_folder, - version=sys_info['bcipy_version']) - - log.info(sys_info) - - # Collect experiment field data - collect_experiment_field_data(experiment, save_folder) - - if execute_task(task, parameters, save_folder, alert, fake): - if visualize: - - # Visualize session data and fail silently if it errors - try: - visualize_session_data(save_folder, parameters) - except Exception as e: - log.info(f'Error visualizing session data: {e}') - return True - - return False - - -def execute_task( - task: TaskType, - parameters: dict, - save_folder: str, - alert: bool, - fake: bool) -> bool: - """Execute Task. - - Executes the desired task by setting up the display window and - data acquisition, then passing on to the start_task function - which will initialize experiment. - - Input: - task(TaskType): Task that should be registered in TaskType - parameters (dict): parameter dictionary - save_folder (str): path to save folder - alert (bool): whether to alert the user when the task is complete - fake (bool): whether to use fake acquisition data during the session - - Returns: - (bool): True if the task was successfully executed, False otherwise - """ - signal_models = [] - language_model = None - # Init EEG Model, if needed. Calibration Tasks Don't require probabilistic - # modules to be loaded. - if task not in TaskType.calibration_tasks(): - # Try loading in our signal_model and starting a langmodel(if enabled) - if not fake: - try: - signal_models = choose_signal_models( - active_content_types(parameters['acq_mode'])) - assert signal_models, "No signal models selected" - except Exception as error: - log.exception(f'Cannot load signal model. Exiting. {error}') - raise error + # Initialize an orchestrator + orchestrator = SessionOrchestrator( + experiment_id=experiment_id, + user=user, + parameters_path=parameter_location, + parameters=parameters, + fake=fake, + alert=alert, + visualize=visualize, + ) + orchestrator.add_tasks(tasks) - language_model = init_language_model(parameters) - - # Initialize DAQ and export the device configuration - daq, servers = init_eeg_acquisition( - parameters, save_folder, server=fake) - - # Initialize Display Window - # We have to wait until after the prompt to load the signal model before - # displaying the window, otherwise in fullscreen mode this throws an error - display = init_display_window(parameters) - print_message(display, f'Initializing {task}...') - - # Start Task try: - start_task(display, - daq, - task, - parameters, - save_folder, - language_model=language_model, - signal_models=signal_models, - fake=fake) - - # If exception, close all display and acquisition objects + orchestrator.execute() except Exception as e: - log.exception(str(e)) - - if alert: - play_sound(f"{STATIC_AUDIO_PATH}/{parameters['alert_sound_file']}") - - return _clean_up_session(display, daq, servers) - - -def _clean_up_session( - display: visual.Window, - daq: ClientManager, - servers: Optional[List[LslDataServer]] = None) -> bool: - """Clean up session. - - Closes the display window and data acquisition objects. Returns True if the session was closed successfully. - - Input: - display (visual.Window): display window - daq (LslAcquisitionClient): data acquisition client - server (LslDataServer): data server - """ - try: - # Stop Acquisition - daq.stop_acquisition() - daq.cleanup() - - # Stop Servers - if servers: - for server in servers: - server.stop() - - # Close the display window - # NOTE: There is currently a bug in psychopy when attempting to shutdown - # windows when using a USB-C monitor. Putting the display close last in - # the inquiry allows acquisition to properly shutdown. - display.close() - return True - except Exception as e: - log.exception(str(e)) + logger.exception(f'Error executing task: {e}') return False + return True + -def bcipy_main() -> None: +def bcipy_main() -> None: # pragma: no cover """BciPy Main. Command line interface used for running a registered experiment task in BciPy. To see what @@ -223,21 +112,22 @@ def bcipy_main() -> None: """ # Needed for windows machines multiprocessing.freeze_support() - + tr = TaskRegistry() experiment_options = list(load_experiments().keys()) - task_options = TaskType.list() + task_options = tr.list() parser = argparse.ArgumentParser() # Command line utility for adding arguments/ paths via command line parser.add_argument('-p', '--parameters', default=DEFAULT_PARAMETERS_PATH, help='Parameter location. Pass as *.json') parser.add_argument('-u', '--user', default='test_user') - parser.add_argument('-t', '--task', default='RSVP Calibration', - help=f'Task type to execute. Registered options: {task_options}') + parser.add_argument('-t', '--task', required=False, + help=f'Task type to execute. Registered options: {task_options}', + choices=task_options) parser.add_argument( '-e', '--experiment', - default=DEFAULT_EXPERIMENT_ID, + required=False, help=f'Select a valid experiment to run the task for this user. Available options: {experiment_options}') parser.add_argument( '-a', @@ -259,9 +149,20 @@ def bcipy_main() -> None: help='Use fake acquisition data for testing.') args = parser.parse_args() + if args.task: + task = tr.get(args.task) + else: + task = None + # Start BCI Main - bci_main(args.parameters, str(args.user), TaskType.by_value(str(args.task)), - str(args.experiment), args.alert, args.noviz, args.fake) + bci_main( + args.parameters, + str(args.user), + str(args.experiment), + args.alert, + args.noviz, + args.fake, + task) if __name__ == '__main__': diff --git a/bcipy/parameters/experiment/experiments.json b/bcipy/parameters/experiment/experiments.json index 8b3e2cf6c..add8e1184 100644 --- a/bcipy/parameters/experiment/experiments.json +++ b/bcipy/parameters/experiment/experiments.json @@ -1,6 +1,12 @@ { "default": { "fields": [], - "summary": "Default experiment to test various BciPy features without registering a full experiment." + "summary": "Default experiment to test various BciPy features without registering a full experiment.", + "protocol": "RSVP Calibration -> Matrix Calibration" + }, + "BCIOne": { + "fields": [], + "summary": "BCIOne experiment", + "protocol": "RSVP Calibration -> IntertaskAction -> OfflineAnalysisAction -> IntertaskAction -> Matrix Calibration -> IntertaskAction -> OfflineAnalysisAction -> IntertaskAction -> BciPy Report Action" } } \ No newline at end of file diff --git a/bcipy/parameters/experiment/phrases.json b/bcipy/parameters/experiment/phrases.json new file mode 100644 index 000000000..4ec1ea70e --- /dev/null +++ b/bcipy/parameters/experiment/phrases.json @@ -0,0 +1,13 @@ +{ + "Phrases" : [ + ["HELLO_WORLD", 0], + ["GOOD_MORNING", 1], + ["GOOD_AFTERNOON", 5], + ["GOOD_EVENING", 0], + ["GOOD_NIGHT", 5], + ["HOW_ARE_YOU", 4], + ["I_AM_FINE", 5], + ["THANK_YOU", 6], + ["WELCOME_HOME", 8] + ] +} \ No newline at end of file diff --git a/bcipy/parameters/parameters.json b/bcipy/parameters/parameters.json index 19756cefd..296d91858 100755 --- a/bcipy/parameters/parameters.json +++ b/bcipy/parameters/parameters.json @@ -1,772 +1,893 @@ { "fake_data": { "value": "false", - "section": "bci_config", - "readableName": "Fake EEG Data On/Off", + "section": "acq_config", + "name": "Fake EEG Data", "helpTip": "If ‘true’, fake EEG data will be used instead of real EEG data. Useful for testing by software development team.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "bool" }, "acq_mode": { "value": "EEG", "section": "acq_config", - "readableName": "Acquisition Mode", + "name": "Acquisition Mode", "helpTip": "Specifies the hardware device(s) used for data collection. Default: EEG.", - "recommended_values": [ + "recommended": [ "EEG", "EEG/DSI-24", "Eyetracker", "EEG+Eyetracker", "EEG+Eyetracker:passive" ], + "editable": false, "type": "str" }, "trigger_type": { "value": "text", "section": "bci_config", - "readableName": "Trigger Stimulus Type", - "helpTip": "Specifies whether to use a picture or auditory stimulus to calibrate trigger latency. Default: text", - "recommended_values": [ + "name": "Trigger Stimulus Type", + "helpTip": "Specifies whether to use text, image or auditory stimulus to calibrate trigger latency. Default: text", + "recommended": [ "image", "text" ], + "editable": false, "type": "str" }, "k_folds": { "value": "10", - "section": "signal_config", - "readableName": "Number of Cross-Validation Folds", + "section": "model_config", + "name": "Number of Cross-Validation Folds", "helpTip": "Specifies the number of folds used for cross-validation when calculating AUC. Default: 10", - "recommended_values": "[10]", + "recommended": "[10]", + "editable": false, "type": "int" }, "trial_window": { "value": "0.0:0.5", - "section": "bci_config", - "readableName": "Trial Classification Window Length", + "section": "signal_config", + "name": "Trial Classification Window Length", "helpTip": "Specifies the window (in seconds) of the EEG data collection window after each stimulus presentation. Default: 0.0:0.5", - "recommended_values": [ + "recommended": [ "0.0:0.5", "0.0:0.8", "0.2:0.8" ], + "editable": false, "type": "range" }, "prestim_length": { "value": "1", - "section": "bci_config", - "readableName": "Prestimulus Window Length", + "section": "signal_config", + "name": "Prestimulus Window Length", "helpTip": "Specifies the length (in seconds) of the EEG data window to return before inquiry presentation. Default: 1", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, "alert_sound_file": { "value": "beep.wav", - "section": "bci_config", - "readableName": "Alert Tone", + "section": "task_config", + "name": "Alert Tone", "helpTip": "Specifies the path to an audio file to be played as an alert tone when experiments or offline analysis is complete. Default: beep.wav", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "filepath" }, "signal_model_path": { "value": "", - "section": "bci_config", - "readableName": "Signal Model Path", + "section": "model_config", + "name": "Signal Model Path", "helpTip": "Directory of the pre-trained signal model. This is often the the calibration session directory.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "directorypath" }, "filter_high": { "value": "20", "section": "signal_config", - "readableName": "High Frequency Filter Cutoff", + "name": "High Frequency Filter Cutoff", "helpTip": "Specifies the frequency bound (in Hz) of the low-pass filter (high cutoff). Default: 20", - "recommended_values": [ + "recommended": [ "20", "45", "50" ], + "editable": false, "type": "float" }, "filter_low": { "value": "1", "section": "signal_config", - "readableName": "Low Frequency Filter Cutoff", + "name": "Low Frequency Filter Cutoff", "helpTip": "Specifies the frequency bound (in Hz) of the high-pass filter (low cutoff). Default: 1", - "recommended_values": [ + "recommended": [ "1", "2" ], + "editable": false, "type": "float" }, "filter_order": { "value": "2", "section": "signal_config", - "readableName": "Filter Order", + "name": "Filter Order", "helpTip": "Specifies the slope of the low-pass and high-pass filters. Default: 2", - "recommended_values": [ + "recommended": [ "2", "3" ], + "editable": false, "type": "float" }, "notch_filter_frequency": { "value": "60", "section": "signal_config", - "readableName": "Notch Filter Frequency", + "name": "Notch Filter Frequency", "helpTip": "Specifies the frequency (in Hz) of the notch filter used to remove electrical artifact from the surrounding environment. Default: 60", - "recommended_values": [ + "recommended": [ "59", "60" ], + "editable": false, "type": "float" }, "down_sampling_rate": { "value": "2", "section": "signal_config", - "readableName": "Downsampling Rate", + "name": "Downsampling Rate", "helpTip": "Specifies the decimation factor (integer only) for downsampling of EEG data. Default: 2", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, - "artifact_rejection": { - "value": "false", - "section": "artifact_rejection", - "readableName": "Artifact Rejection On/Off", - "helpTip": "If ‘true’, the system will detect and reject inquiries containing unwanted artifacts (e.g. blinks). This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "bool" - }, - "high_voltage_threshold": { - "value": "false", - "section": "artifact_rejection", - "readableName": "High Voltage Threshold On/Off", - "helpTip": "If ‘true’, an upper voltage threshold will be set for artifact rejection. Detection of values above the specified threshold will trigger rejection of a inquiry. This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "bool" - }, - "high_voltage_value": { - "value": "75000000.0", - "section": "artifact_rejection", - "readableName": "High Voltage Threshold Value", - "helpTip": "Specifies the high voltage threshold (in microvolts) for artifact rejection (High Voltage Threshold must be set to ‘true’). Default: 75E+6. This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "float" - }, - "low_voltage_threshold": { - "value": "false", - "section": "artifact_rejection", - "readableName": "Low Voltage Threshold On/Off", - "helpTip": "If ‘true’, a lower voltage threshold will be set for artifact rejection. Detection of values below the specified threshold will trigger rejection of a inquiry. This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "bool" - }, - "low_voltage_value": { - "value": "-0.000075", - "section": "artifact_rejection", - "readableName": "LowVoltage Threshold Value", - "helpTip": "Specifies the low voltage threshold (in microvolts) for artifact rejection (Low Voltage Threshold must be set to ‘true’). Default: -75E-6 This is not implemented in the current version of the system.", - "recommended_values": "", - "type": "float" - }, "summarize_session": { "value": "true", - "section": "bci_config", - "readableName": "Summarize Session Data", + "section": "task_config", + "name": "Summarize Session Data", "helpTip": "If 'true', writes an Excel file which summarizes the session data by charting evidence per inquiry.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "bool" }, "parameter_location": { "value": "bcipy/parameters/parameters.json", - "section": "bci_config", - "readableName": "Parameter File", + "section": "task_config", + "name": "Parameter File", "helpTip": "Specifies the file containing the current system parameters. Default: bcipy/parameters/parameters.json", - "recommended_values": [ + "recommended": [ "parameters/parameters.json" ], + "editable": false, "type": "filepath" }, "data_save_loc": { "value": "data/", "section": "bci_config", - "readableName": "Data Save Location", + "name": "Data Save Location", "helpTip": "Specifies the location in which to save data files after each recording. This must be a directory ending with /. Default: data/", - "recommended_values": [ + "recommended": [ "data/" ], + "editable": false, "type": "directorypath" }, "full_screen": { "value": "false", - "section": "bci_config", - "readableName": "Full Screen Mode On/Off", + "section": "task_config", + "name": "Full Screen Mode", "helpTip": "If ‘true’, the task will be displayed in full screen mode. If ‘false’, the task will be displayed in a window.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "bool" }, "window_height": { "value": "500", - "section": "bci_config", - "readableName": "Task Window Height", + "section": "task_config", + "name": "Task Window Height", "helpTip": "Specifies the height (in norm units) of the task window when not in full screen mode (Full Screen Mode On/Off must be set to ‘false’). See https://www.psychopy.org/general/units.html. Default: 500", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "window_width": { "value": "500", - "section": "bci_config", - "readableName": "Task Window Width", + "section": "task_config", + "name": "Task Window Width", "helpTip": "Specifies the width (in norm units) of the task window when not in full screen mode (Full Screen Mode On/Off must be set to ‘false’). See https://www.psychopy.org/general/units.html. Default: 500", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "matrix_rows": { "value": "5", - "section": "bci_config", - "readableName": "Matrix Rows", + "section": "matrix_task_config", + "name": "Matrix Rows", "helpTip": "Specifies the number of rows to use in the Matrix task. Rows * columns should be greater than or equal to the number of symbols.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "matrix_columns": { "value": "6", - "section": "bci_config", - "readableName": "Matrix Columns", + "section": "matrix_task_config", + "name": "Matrix Columns", "helpTip": "Specifies the number of columns to use in the Matrix task. Rows * columns should be greater than or equal to the number of symbols.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "matrix_width": { "value": "0.7", - "section": "bci_config", - "readableName": "Matrix Width (%)", + "section": "matrix_task_config", + "name": "Matrix Width (%)", "helpTip": "Specifies the max percentage of the display that the matrix grid should utilize. Must be between 0 and 1", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, "acq_show_viewer": { "value": "false", "section": "acq_config", - "readableName": " EEG Viewer On/Off", - "helpTip": "If ‘true’, the EEG signal viewer will be displayed.", - "recommended_values": "", + "name": " EEG Viewer", + "helpTip": "If ‘true’, the EEG signal viewer will be displayed along with the Task. Note: This has not been throughly tested and could cause timing issues. Use with caution.", + "recommended": "", + "editable": false, "type": "bool" }, "stim_screen": { "value": "0", "section": "bci_config", - "readableName": "Task Display Monitor", + "name": "Task Display Monitor", "helpTip": "Specifies which monitor to use for task display when two monitors are in use. If ‘0’, the task will be displayed on the primary monitor (with the EEG signal viewer on the second monitor, if EEG Viewer On/Off is set to ‘true’). If ‘1’, the opposite monitor assignment will be used.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "task_buffer_length": { "value": "2", - "section": "bci_config", - "readableName": "Inter-inquiry Interval", + "section": "task_config", + "name": "Inter-inquiry Interval", "helpTip": "Specifies the delay time (in seconds) between the final stimulus in one inquiry and the beginning (target stimulus or fixation cross) of the next inquiry in a task. Default: 2", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, "is_txt_stim": { "value": "true", "section": "bci_config", - "readableName": "Text Stimuli On/Off", - "helpTip": "If ‘true’, text stimuli will be used. If ‘false’, image stimuli will be loaded from the folder specified in Image Stimulus Folder.", - "recommended_values": "", + "name": "Text Stimuli", + "helpTip": "If ‘true’, text stimuli will be used. If ‘false’, image stimuli will be loaded from the folder specified in Image Stimulus Folder. Default: true", + "recommended": "", + "editable": false, "type": "bool" }, "path_to_presentation_images": { "value": "bcipy/static/images/rsvp/", "section": "bci_config", - "readableName": "Image Stimulus Folder", + "name": "Image Stimulus Folder", "helpTip": "Specifies the location of image files to be used as stimuli (Text Stimuli On/Off must be set to ‘false’). This must be a directory ending with /.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "directorypath" }, "stim_space_char": { "value": "–", - "section": "bci_config", - "readableName": "Space Character", - "helpTip": "Specifies the text or Unicode character which represents a space during text-stimuli tasks. Default: –", - "recommended_values": [ + "section": "task_config", + "name": "Space Character", + "helpTip": "Specifies the text or Unicode character which represents a space during text-stimuli tasks. *Note* This only applies to RSVP Tasks. Default: –", + "recommended": [ "_", "–", "‒", "□" ], + "editable": false, "type": "str" }, "stim_order": { "value": "random", "section": "bci_config", - "readableName": "Stimuli Order", + "name": "Stimuli Order", "helpTip": "Specifies the ordering of stimuli in an inquiry. Default is random.", - "recommended_values": [ + "recommended": [ "alphabetical", "random" ], + "editable": false, "type": "str" }, "target_positions": { "value": "distributed", "section": "bci_config", - "readableName": "Target Positions", + "name": "Target Positions", "helpTip": "Specifies the positions of target stimuli in calibration task. Default is random.", - "recommended_values": [ + "recommended": [ "distributed", "random" ], + "editable": false, "type": "str" }, "nontarget_inquiries": { - "value": "10", + "value": "0", "section": "bci_config", - "readableName": "Percentage of Nontarget Inquiries", - "helpTip": "Specifies the percentage (0-100) of inquiries which target stimuli flashed is not in inquiry. Default is 10 percent.", - "recommended_values": "", + "name": "Percentage of Nontarget Inquiries", + "helpTip": "Specifies the percentage (0-100) of inquiries which target stimuli flashed is not in inquiry. Default is 0 percent.", + "recommended": "", + "editable": false, "type": "int" }, "stim_length": { "value": "10", - "section": "bci_config", - "readableName": "Stimuli Per inquiry", + "section": "task_config", + "name": "Stimuli Per inquiry", "helpTip": "Specifies the number of stimuli to present in each inquiry. Default: 10", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "time_flash": { - "value": "0.25", - "section": "bci_config", - "readableName": "Stimulus Presentation Duration", - "helpTip": "Specifies the duration of time (in seconds) that each stimulus is displayed in an inquiry.", - "recommended_values": "", + "value": "0.2", + "section": "task_config", + "name": "Stimulus Presentation Duration", + "helpTip": "Specifies the duration of time (in seconds) that each stimulus is displayed in an inquiry. Default: 0.2", + "recommended": "", + "editable": false, "type": "float" }, "time_prompt": { "value": "1", - "section": "bci_config", - "readableName": "Time Prompt Stimuli (sec)", + "section": "task_config", + "name": "Time Prompt Stimuli (sec)", "helpTip": "The amount of time in seconds to present the target stimuli prompt in an inquiry.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, "time_fixation": { "value": "0.5", - "section": "bci_config", - "readableName": "Time Fixation Stimuli (sec)", + "section": "task_config", + "name": "Time Fixation Stimuli (sec)", "helpTip": "The amount of time in seconds to present the fixation stimuli in an inquiry.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, "time_vep_animation": { "value": "1.0", - "section": "bci_config", - "readableName": "Time VEP Animation (sec)", + "section": "vep_task_config", + "name": "Time VEP Animation (sec)", "helpTip": "The amount of time in seconds for the animation moving symbols to boxes.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, "stim_jitter": { "value": "0.0", "section": "bci_config", - "readableName": "Stimulus Presentation Jitter (sec)", + "name": "Stimulus Presentation Jitter (sec)", "helpTip": "Specifies the time (sec) to jitter presentation rates. Default: 0.0", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, - "stim_pos_x": { + "rsvp_stim_pos_x": { "value": "0", - "section": "bci_config", - "readableName": "Stimulus Position Horizontal", + "section": "rsvp_task_config", + "name": "RSVP Stimulus Position Horizontal", "helpTip": "Specifies the center point of the stimulus position along the X axis. Possible values range from -1 to 1, with 0 representing the center. Default: 0", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, - "stim_pos_y": { + "rsvp_stim_pos_y": { "value": "0", - "section": "bci_config", - "readableName": "Stimulus Position Vertical", + "section": "rsvp_task_config", + "name": "RSVP Stimulus Position Vertical", "helpTip": "Specifies the center point of the stimulus position along the Y axis. Possible values range from -1 to 1, with 0 representing the center. Default: 0", - "recommended_values": "", + "recommended": "", + "editable": false, + "type": "float" + }, + "matrix_stim_pos_x": { + "value": "-0.6", + "section": "matrix_task_config", + "name": "Matrix Stimulus Starting Position Horizontal", + "helpTip": "Specifies the center point of the stimulus position along the X axis. Possible values range from -1 to 1, with 0 representing the center. Default: -0.6", + "recommended": "", + "editable": false, + "type": "float" + }, + "matrix_stim_pos_y": { + "value": "0.4", + "section": "matrix_task_config", + "name": "Matrix Stimulus Starting Position Vertical", + "helpTip": "Specifies the center point of the stimulus position along the Y axis. Possible values range from -1 to 1, with 0 representing the center. Default: 0.4", + "recommended": "", + "editable": false, "type": "float" }, "font": { - "value": "Overpass Mono Medium", - "section": "bci_config", - "readableName": "Font", - "helpTip": "Specifies the font used for all text stimuli. Default: Consolas", - "recommended_values": [ + "value": "Courier New", + "section": "task_config", + "name": "Font", + "helpTip": "Specifies the font used for all text stimuli. Default: Courier New", + "recommended": [ "Courier New", "Lucida Sans", + "Arial", "Consolas" ], + "editable": false, "type": "str" }, - "stim_height": { - "value": "0.5", - "section": "bci_config", - "readableName": "Stimulus Size", - "helpTip": "Specifies the height of text stimuli. See https://www.psychopy.org/general/units.html. Default: 0.5", - "recommended_values": "", + "rsvp_stim_height": { + "value": "0.4", + "section": "rsvp_task_config", + "name": "RSVP Stimulus Size", + "helpTip": "Specifies the height of text stimuli. See https://www.psychopy.org/general/units.html. Default: 0.4", + "recommended": "", + "editable": false, + "type": "float" + }, + "vep_stim_height": { + "value": "0.1", + "section": "vep_task_config", + "name": "VEP Stimulus Size", + "helpTip": "Specifies the height of text stimuli. See https://www.psychopy.org/general/units.html. Default: 0.1", + "recommended": "", + "editable": false, + "type": "float" + }, + "matrix_stim_height": { + "value": "0.17", + "section": "matrix_task_config", + "name": "Matrix Stimulus Size", + "helpTip": "Specifies the height of text stimuli. See https://www.psychopy.org/general/units.html. Default: 0.17", + "recommended": "", + "editable": false, "type": "float" }, + "matrix_keyboard_layout": { + "value": "ALP", + "section": "matrix_task_config", + "name": "Matrix Keyboard Layout", + "helpTip": "Specifies the keyboard layout to use for the Matrix task. Default: ALP (Alphabetical)", + "recommended": [ + "ALP", + "QWERTY", + "FREQ" + ], + "editable": false, + "type": "str" + }, "stim_color": { "value": "white", "section": "bci_config", - "readableName": "Stimulus Color", + "name": "Stimulus Color", "helpTip": "Specifies the color of text stimuli within the RSVP stream. Default: white", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "str" }, "target_color": { "value": "white", "section": "bci_config", - "readableName": "Target Color", + "name": "Target Color", "helpTip": "Specifies the color of target characters during calibration. Default: white", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "str" }, "fixation_color": { "value": "red", "section": "bci_config", - "readableName": "Fixation Cross Color", + "name": "Fixation Cross Color", "helpTip": "Specifies the color of the fixation cross that appears before each inquiry. Default: red", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "str" }, "background_color": { "value": "black", - "section": "bci_config", - "readableName": "Task Background Color", + "section": "task_config", + "name": "Task Background Color", "helpTip": "Specifies the color of the task background. Default: black", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "str" }, "info_pos_x": { "value": "0", - "section": "bci_config", - "readableName": "Position Text (X)", + "section": "task_config", + "name": "Position Text (X)", "helpTip": "Position Text (X)", - "recommended_values": [ + "recommended": [ "0" ], + "editable": false, "type": "float" }, "info_pos_y": { "value": "-0.75", - "section": "bci_config", - "readableName": "Position Text (Y)", + "section": "task_config", + "name": "Position Text (Y)", "helpTip": "Position Text (Y)", - "recommended_values": [ + "recommended": [ "-0.75" ], + "editable": false, "type": "float" }, "info_text": { "value": "", - "section": "bci_config", - "readableName": "Text below main presentation", + "section": "task_config", + "name": "Text below main presentation", "helpTip": "Text below main presentation", - "recommended_values": [ + "recommended": [ "", "Demo Text", "DEMO" ], + "editable": false, "type": "str" }, "info_height": { "value": "0.1", - "section": "bci_config", - "readableName": "Text below main presentation height", + "section": "task_config", + "name": "Text below main presentation height", "helpTip": "Specifies the height of info text stimuli. See https://www.psychopy.org/general/units.html. Default: 0.1", - "recommended_values": [ + "recommended": [ "0.1" ], + "editable": false, "type": "float" }, "info_color": { "value": "white", - "section": "bci_config", - "readableName": "Color Text", + "section": "task_config", + "name": "Color Text", "helpTip": "Color Text", - "recommended_values": [ + "recommended": [ "white", "black", "blue" ], + "editable": false, "type": "str" }, "task_text": { "value": "HELLO_WORLD", - "section": "bci_config", - "readableName": "Target Phrase", - "helpTip": "Specifies the target phrase displayed at the top of the screen during text-stimuli copy/spelling tasks.", - "recommended_values": "", + "section": "online_config", + "name": "Target Phrase", + "helpTip": "Specifies the target phrase displayed at the top of the screen during text-stimuli copy/spelling tasks. If copy_phrases_location is provided, this parameter will be ignored and overwritten in parameter saving.", + "recommended": "", + "editable": true, "type": "str" }, - "task_height": { + "copy_phrases_location": { + "value": "bcipy/parameters/experiment/phrases.json", + "section": "online_config", + "name": "Copy Phrases Location", + "helpTip": "Specifies a list of copy phrases to execute during Task Orchestration. If provided, any copy phrases in the protocol will be executed in order pulling task text and starting locations from the file.", + "recommended": "", + "editable": true, + "type": "filepath" + }, + "rsvp_task_height": { "value": "0.1", - "section": "bci_config", - "readableName": "Task Text Size", + "section": "rsvp_task_config", + "name": "RSVP Task Text Size", + "helpTip": "Specifies the height of task-specific text, e.g. #/100 in calibration and target phrase in copy/spelling. See https://www.psychopy.org/general/units.html. Default: 0.1", + "recommended": [ + "0.1" + ], + "editable": false, + "type": "float" + }, + "vep_task_height": { + "value": "0.1", + "section": "vep_task_config", + "name": "VEP Task Text Size", "helpTip": "Specifies the height of task-specific text, e.g. #/100 in calibration and target phrase in copy/spelling. See https://www.psychopy.org/general/units.html. Default: 0.1", - "recommended_values": [ + "recommended": [ "0.1" ], + "editable": false, + "type": "float" + }, + "matrix_task_height": { + "value": "0.1", + "section": "matrix_task_config", + "name": "Matrix Task Text Size", + "helpTip": "Specifies the height of task-specific text, e.g. #/100 in calibration and target phrase in copy/spelling. See https://www.psychopy.org/general/units.html. Default: 0.1", + "recommended": [ + "0.1" + ], + "editable": false, "type": "float" }, "task_color": { "value": "white", "section": "bci_config", - "readableName": "Task Text Color", + "name": "Task Text Color", "helpTip": "Specifies the color of task-specific text, e.g. #/100 in calibration and target phrase in copy/spelling. Default: white", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "str" }, - "task_padding": { + "rsvp_task_padding": { + "value": "0.05", + "section": "bci_config", + "name": "RSVP Task Bar Padding", + "helpTip": "Specifies the padding around the task bar text for RSVP tasks. Default: 0.05", + "recommended": [ + "0.05" + ], + "editable": false, + "type": "float" + }, + "matrix_task_padding": { "value": "0.05", "section": "bci_config", - "readableName": "Task Bar Padding", - "helpTip": "Specifies the padding around the task bar text. Default: 0.05", - "recommended_values": [ + "name": "Matrix Task Bar Padding", + "helpTip": "Specifies the padding around the task bar text for Matrix Tasks. Default: 0.05", + "recommended": [ "0.05" ], + "editable": false, "type": "float" }, "stim_number": { - "value": "100", + "value": "5", "section": "bci_config", - "readableName": "Number of Calibration inquiries", + "name": "Number of Calibration inquiries", "helpTip": "Specifies the number of inquiries to present in a calibration session. Default: 100", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "enable_breaks": { "value": "false", "section": "bci_config", - "readableName": "Automatic Calibration Breaks On/Off", + "name": "Automatic Calibration Breaks", "helpTip": "If ‘true’, automatic breaks will be added to the calibration session. If ‘false’, automatic breaks will not occur, but the session can still be paused by pressing Space.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "bool" }, "break_len": { "value": "30", "section": "bci_config", - "readableName": "Automatic Calibration Break Length", + "name": "Automatic Calibration Break Length", "helpTip": "Specifies the length (in seconds) of automatic calibration breaks (Automatic Calibration Breaks On/Off must be set to ‘true’).", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "trials_before_break": { "value": "20", "section": "bci_config", - "readableName": "Automatic Calibration Break Timing", + "name": "Automatic Calibration Break Timing", "helpTip": "Specifies the number of inquiries between automatic calibration breaks (Automatic Calibration Breaks On/Off must be set to ‘true’).", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "break_message": { "value": "Take a break!", "section": "bci_config", - "readableName": "Automatic Calibration Break Message", + "name": "Automatic Calibration Break Message", "helpTip": "Specifies the message displayed during automatic calibration breaks (Automatic Calibration Breaks On/Off must be set to ‘true’).", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "str" }, - "min_inq_len": { - "value": "1", - "section": "bci_config", - "readableName": "Minimum Inquiry Length", - "helpTip": "The minimum number of inquiries to present in spelling tasks", - "recommended_values": [ - "1" - ], - "type": "int" - }, "max_inq_len": { "value": "50", - "section": "bci_config", - "readableName": "Maximum Inquiry Length", + "section": "online_config", + "name": "Maximum Inquiry Length", "helpTip": "Specifies the maximum number of inquiries to present in copy/spelling tasks. The task will end if this number is reached.", - "recommended_values": [ + "recommended": [ "20", "25" ], + "editable": false, "type": "int" }, "max_minutes": { "value": "20", - "section": "bci_config", - "readableName": "Maximum Task Length (Time)", + "section": "online_config", + "name": "Maximum Task Length (Time)", "helpTip": "Specifies the time limit (in minutes) for copy/spelling tasks. The task will end if this time limit is reached.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "max_selections": { "value": "25", - "section": "bci_config", - "readableName": "Maximum Number of Selections", + "section": "online_config", + "name": "Maximum Number of Selections", "helpTip": "The maximum number of selections for copy/spelling tasks. The task will end if this number is reached.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "max_incorrect": { "value": "5", "section": "bci_config", - "readableName": "Maximum Number of Incorrect Selections", + "name": "Maximum Number of Incorrect Selections", "helpTip": "The maximum number of consecutive incorrect selections for copy/spelling tasks. The task will end if this number is reached.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "decision_threshold": { "value": "0.8", - "section": "bci_config", - "readableName": "Decision Threshold", + "section": "online_config", + "name": "Decision Threshold", "helpTip": "Specifies the decision threshold for stimulus selection in copy/spelling class. If the posterior probability (combining EEG and language model evidence) for a stimulus reaches this threshold, it will be selected. Possible value range: 0.0-1.0. Default: 0.8", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, - "min_inq_per_series": { + "min_inq_len": { "value": "1", - "section": "bci_config", - "readableName": "Minimum Inquiries Per Series", + "section": "online_config", + "name": "Minimum Inquiries Per Series", "helpTip": "Specifies the minimum number of inquiries to present before making a decision in copy/spelling tasks. Default: 1", - "recommended_values": [ + "recommended": [ "1" ], + "editable": false, "type": "int" }, "max_inq_per_series": { "value": "11", - "section": "bci_config", - "readableName": "Maximum Inquiries Per Series", + "section": "online_config", + "name": "Maximum Inquiries Per Series", "helpTip": "Specifies the maximum number of inquiries to present before making a decision in copy/spelling tasks.", - "recommended_values": [ + "recommended": [ "10", "15" ], + "editable": false, "type": "int" }, "backspace_always_shown": { "value": "true", - "section": "bci_config", - "readableName": "Always Show Backspace On/Off", + "section": "lang_model_config", + "name": "Always Show Backspace", "helpTip": "If ‘true’, the backspace character will be included in every inquiry in text-stimuli copy/spelling tasks. If ‘false’, the backspace character will be treated the same as other characters, appearing in inquiries only when warranted by EEG/language model evidence.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "bool" }, "spelled_letters_count": { "value": "0", - "section": "bci_config", - "readableName": "Pre-Selected Letters Count", + "section": "lang_model_config", + "name": "Pre-Selected Letters Count", "helpTip": "Specifies the number of letters in the target phrase that are already typed when the task begins, e.g. if the target phrase is “THE_DOG”, setting this parameter to ‘4’ would display “THE_” as the typed string, and the user would begin typing with D. If ‘0’, the typed string will be blank.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "int" }, "lang_model_type": { "value": "UNIFORM", "section": "lang_model_config", - "readableName": "Language Model Type", + "name": "Language Model Type", "helpTip": "Specifies which language model to use. Default: UNIFORM", - "recommended_values": [ + "recommended": [ "UNIFORM", "CAUSAL", "KENLM", "MIXTURE", "ORACLE" ], + "editable": false, "type": "str" }, "lm_backspace_prob": { "value": "0.0", - "section": "bci_config", - "readableName": "Backspace Probability", + "section": "lang_model_config", + "name": "Backspace Probability", "helpTip": "Specifies the minimum probability assigned to the backspace character in the language model. Possible value range: 0.0-1.0. Default: 0.0", - "recommended_values": "0.05", + "recommended": "0.05", + "editable": false, "type": "float" }, "show_preview_inquiry": { "value": "false", - "section": "bci_config", - "readableName": "Preview Inquiry On/Off", - "helpTip": "If ‘true’, the inquiry will be previewed as applicable for the task. *Note* Not all tasks will have this enabled!", - "recommended_values": "", + "section": "task_config", + "name": "Preview Inquiry Display", + "helpTip": "If ‘true’, the inquiry will be previewed as applicable for the Task. *Note* Not all tasks will have this enabled!", + "recommended": "", + "editable": false, "type": "bool" }, "preview_inquiry_error_prob": { "value": "0.05", "section": "bci_config", - "readableName": "Preview Inquiry Button Error Probability", + "name": "Preview Inquiry Button Error Probability", "helpTip": "Specifies the probability of a button error during inquiry preview. Default: 0.05", - "recommended_values": "", + "recommended": "", + "editable": true, "type": "float" }, "preview_inquiry_progress_method": { "value": "0", - "section": "bci_config", - "readableName": "Preview Inquiry Progression Method", + "section": "task_config", + "name": "Preview Inquiry Progression Method", "helpTip": "If show_preview_inquiry true, this will determine how to proceed after a key hit. 0 = preview only; 1 = press to confirm; 2 = press to skip to another inquiry", - "recommended_values": [ + "recommended": [ "0", "1", "2" ], + "editable": false, "type": "int" }, "preview_inquiry_length": { "value": "5", - "section": "bci_config", - "readableName": "Preview Inquiry Display Length", + "section": "task_config", + "name": "Preview Inquiry Display Length", "helpTip": "Length of time in seconds to present an inquiry preview to the user.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, "preview_inquiry_key_input": { "value": "return", - "section": "bci_config", - "readableName": "Preview Inquiry Display Key Input Method", + "section": "task_config", + "name": "Preview Inquiry Display Key Input Method", "helpTip": "Defines the key used to engage with inquiry preview.", - "recommended_values": [ + "recommended": [ "space", "escape", "return" ], + "editable": false, "type": "str" }, "preview_inquiry_isi": { "value": "1", - "section": "bci_config", - "readableName": "Preview Inquiry Inter-Stimulus Interval", + "section": "task_config", + "name": "Preview Inquiry Inter-Stimulus Interval", "helpTip": "The time between previewing an inquiry and the start of an inquiry.", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, "show_feedback": { "value": "true", - "section": "bci_config", - "readableName": "Feedback On/Off", - "helpTip": "If ‘true’, feedback will be shown after each inquiry.", - "recommended_values": "", + "section": "task_config", + "name": "Feedback Display", + "helpTip": "If ‘true’, feedback will be shown after each inquiry. If ‘false’, feedback will not be shown. *Note* Not all tasks will have this enabled!", + "recommended": "", + "editable": false, "type": "bool" }, "feedback_duration": { "value": "2", - "section": "bci_config", - "readableName": "Feedback Time (seconds)", + "section": "task_config", + "name": "Feedback Time (seconds)", "helpTip": "Specifies the length in time (seconds) feedback will be displayed after each inquiry in registered tasks (ex. RSVP Copy Phrase). Default: 2", - "recommended_values": "", + "recommended": "", + "editable": false, "type": "float" }, "psd_method": { "value": "Welch", - "section": "bci_config", - "readableName": "Power Spectral Density Method", + "section": "signal_config", + "name": "Power Spectral Density Method", "helpTip": "Specifies the method used to approximate power spectral density bands (Welch or MultiTaper). Default: Welch", - "recommended_values": [ + "recommended": [ "Welch", "MutliTaper" ], + "editable": false, "type": "str" } } \ No newline at end of file diff --git a/bcipy/signal/evaluate/artifact.py b/bcipy/signal/evaluate/artifact.py index ed7f5be78..19ec6180f 100644 --- a/bcipy/signal/evaluate/artifact.py +++ b/bcipy/signal/evaluate/artifact.py @@ -5,14 +5,13 @@ from typing import Union, List, Tuple, Optional from logging import getLogger -log = getLogger(__name__) - from bcipy.config import ( DEFAULT_PARAMETER_FILENAME, RAW_DATA_FILENAME, TRIGGER_FILENAME, DEFAULT_DEVICE_SPEC_FILENAME, - BCIPY_ROOT + BCIPY_ROOT, + SESSION_LOG_FILENAME ) from bcipy.helpers.acquisition import analysis_channels from bcipy.helpers.load import ( @@ -29,6 +28,7 @@ from bcipy.acquisition.devices import DeviceSpec import mne +log = getLogger(SESSION_LOG_FILENAME) mne.set_log_level('WARNING') from mne import Annotations @@ -40,7 +40,7 @@ class DefaultArtifactParameters(Enum): """ # Voltage - PEAK_THRESHOLD = 100e-7 + PEAK_THRESHOLD = 75e-7 PEAK_MIN_DURATION = 0.005 FLAT_THRESHOLD = 0.5e-6 FLAT_MIN_DURATION = 0.1 @@ -133,6 +133,12 @@ class ArtifactDetection: detect_voltage : bool Whether to detect voltage artifacts. Defaults to True. + + semi_automatic : bool + Whether to use a semi-automatic approach to artifact detection. Defaults to False. + + session_triggers : tuple + A tuple of lists containing the trigger type, trigger timing, and trigger label for the session. """ supported_units: List[str] = ['volts', 'microvolts'] @@ -237,13 +243,13 @@ def label_artifacts( voltage = self.label_voltage_events() if voltage: voltage_annotations, bad_channels = voltage - log.info(f'Voltage violation events found: {len(voltage_annotations)}') if bad_channels: # add bad channel labels to the raw data self.mne_data.info['bads'] = bad_channels log.info(f'Bad channels detected: {bad_channels}') if voltage_annotations: + log.info(f'Voltage violation events found: {len(voltage_annotations)}') annotations += voltage_annotations self.voltage_annotations = voltage_annotations @@ -251,8 +257,9 @@ def label_artifacts( eog = self.label_eog_events() if eog: eog_annotations, eog_events = eog - log.info(f'EOG events found: {len(eog_events)}') + if eog_annotations: + log.info(f'EOG events found: {len(eog_events)}') annotations += eog_annotations self.eog_annotations = eog_annotations @@ -431,8 +438,10 @@ def label_voltage_events( # combine the bad channels bad_channels = bad_channels1 + bad_channels2 - if len(onsets) > 0 or len(bad_channels) > 0: + if len(onsets) > 0 and len(bad_channels) > 0: return mne.Annotations(onsets, durations, descriptions), bad_channels + elif len(bad_channels) > 0: + return None, bad_channels return None diff --git a/bcipy/signal/model/__init__.py b/bcipy/signal/model/__init__.py index 6e9e0a4fe..d95f3d026 100644 --- a/bcipy/signal/model/__init__.py +++ b/bcipy/signal/model/__init__.py @@ -1,13 +1,10 @@ from bcipy.signal.model.base_model import SignalModel, ModelEvaluationReport from bcipy.signal.model.pca_rda_kde.pca_rda_kde import PcaRdaKdeModel from bcipy.signal.model.rda_kde.rda_kde import RdaKdeModel -from bcipy.signal.model.gaussian_mixture.gaussian_mixture import GazeModelIndividual, GazeModelCombined __all__ = [ "SignalModel", "PcaRdaKdeModel", "RdaKdeModel", - "GazeModelIndividual", - "GazeModelCombined", "ModelEvaluationReport", ] diff --git a/bcipy/signal/model/base_model.py b/bcipy/signal/model/base_model.py index 8a854f72b..828e3bb1e 100644 --- a/bcipy/signal/model/base_model.py +++ b/bcipy/signal/model/base_model.py @@ -16,10 +16,14 @@ class SignalModelMetadata(NamedTuple): device_spec: DeviceSpec # device used to train the model transform: Composition # data preprocessing steps evidence_type: str = None # optional; type of evidence produced + auc: float = None # optional; area under the curve + balanced_accuracy: float = None # optional; balanced accuracy class SignalModel(ABC): + name = "undefined" + @property def metadata(self) -> SignalModelMetadata: """Information regarding the data and parameters used to train the diff --git a/bcipy/signal/model/cross_validation.py b/bcipy/signal/model/cross_validation.py index f2ecff702..238229b0a 100644 --- a/bcipy/signal/model/cross_validation.py +++ b/bcipy/signal/model/cross_validation.py @@ -2,10 +2,11 @@ import scipy.optimize from sklearn import metrics +from bcipy.config import SESSION_LOG_FILENAME import logging -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) def cost_cross_validation_auc(model, opt_el, x, y, param, k_folds=10, @@ -14,7 +15,7 @@ def cost_cross_validation_auc(model, opt_el, x, y, param, k_folds=10, Cost function: given a particular architecture (model). Fits the parameters to the folds with leave one fold out procedure. Calculates scores for the validation fold. Concatenates all calculated scores - together and returns a -AUC vale. + together and returns a -AUC value. Args: model(pipeline): model to be iterated on opt_el(int): number of the element in pipeline to be optimized diff --git a/bcipy/signal/model/density_estimation.py b/bcipy/signal/model/density_estimation.py index 5422abcd4..7e88442b9 100644 --- a/bcipy/signal/model/density_estimation.py +++ b/bcipy/signal/model/density_estimation.py @@ -5,6 +5,8 @@ from scipy.stats import iqr from sklearn.neighbors import KernelDensity +from bcipy.config import SESSION_LOG_FILENAME + class KernelDensityEstimate: """Kernel density estimate using scikit learn. @@ -16,7 +18,7 @@ class KernelDensityEstimate: def __init__(self, scores: Optional[np.array] = None, kernel="gaussian", num_cls=2): bandwidth = 1.0 if scores is None else self._compute_bandwidth(scores, scores.shape[0]) - self.logger = logging.getLogger(__name__) + self.logger = logging.getLogger(SESSION_LOG_FILENAME) self.logger.info(f"KDE. bandwidth={bandwidth}, kernel={kernel}") self.num_cls = num_cls self.list_den_est = [KernelDensity(bandwidth=bandwidth, kernel=kernel) for _ in range(self.num_cls)] diff --git a/bcipy/signal/model/dimensionality_reduction.py b/bcipy/signal/model/dimensionality_reduction.py index 2a4d4bfc8..288cc3da3 100644 --- a/bcipy/signal/model/dimensionality_reduction.py +++ b/bcipy/signal/model/dimensionality_reduction.py @@ -4,6 +4,8 @@ import numpy as np from sklearn.decomposition import PCA +from bcipy.config import SESSION_LOG_FILENAME + class ChannelWisePrincipalComponentAnalysis: """Creates a PCA object for each channel. @@ -27,7 +29,7 @@ class ChannelWisePrincipalComponentAnalysis: def __init__(self, n_components: Optional[float] = None, random_state: Optional[int] = None, num_ch: int = 1): self.num_ch = num_ch self.list_pca = [PCA(n_components=n_components, random_state=random_state) for _ in range(self.num_ch)] - self.logger = logging.getLogger(__name__) + self.logger = logging.getLogger(SESSION_LOG_FILENAME) self.logger.info(f"PCA. n_components={n_components}, random_state={random_state}, num_ch={num_ch}") def fit(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> None: diff --git a/bcipy/signal/model/gaussian_mixture/__init__.py b/bcipy/signal/model/gaussian_mixture/__init__.py index 1a5dc0dfe..e69de29bb 100644 --- a/bcipy/signal/model/gaussian_mixture/__init__.py +++ b/bcipy/signal/model/gaussian_mixture/__init__.py @@ -1,6 +0,0 @@ -from .gaussian_mixture import GazeModelCombined, GazeModelIndividual - -__all__ = [ - "GazeModelCombined", - "GazeModelIndividual", -] diff --git a/bcipy/signal/model/gaussian_mixture/gaussian_mixture.py b/bcipy/signal/model/gaussian_mixture/gaussian_mixture.py index 29307b84f..0f71f6c94 100644 --- a/bcipy/signal/model/gaussian_mixture/gaussian_mixture.py +++ b/bcipy/signal/model/gaussian_mixture/gaussian_mixture.py @@ -5,7 +5,6 @@ from sklearn.mixture import GaussianMixture from bcipy.helpers.stimuli import GazeReshaper from sklearn.model_selection import cross_val_score # noqa -from sklearn.utils.estimator_checks import check_estimator # noqa import scipy.stats as stats from typing import Optional @@ -13,6 +12,7 @@ class GazeModelIndividual(SignalModel): reshaper = GazeReshaper() + name = "gaze_model_individual" def __init__(self, num_components=2): self.num_components = num_components # number of gaussians to fit @@ -80,6 +80,7 @@ def load(self, path: Path): class GazeModelCombined(SignalModel): '''Gaze model that uses all symbols to fit a single Gaussian ''' reshaper = GazeReshaper() + name = "gaze_model_combined" def __init__(self, num_components=1): self.num_components = num_components # number of gaussians to fit diff --git a/bcipy/signal/model/offline_analysis.py b/bcipy/signal/model/offline_analysis.py index 1c26f335a..bbbda64e4 100644 --- a/bcipy/signal/model/offline_analysis.py +++ b/bcipy/signal/model/offline_analysis.py @@ -1,42 +1,43 @@ # mypy: disable-error-code="attr-defined" -# needed for the ERPTransformParams import json import logging +import subprocess from pathlib import Path -from typing import Tuple +from typing import List import numpy as np -from matplotlib.figure import Figure + from sklearn.metrics import balanced_accuracy_score from sklearn.model_selection import train_test_split import bcipy.acquisition.devices as devices from bcipy.config import (BCIPY_ROOT, DEFAULT_DEVICE_SPEC_FILENAME, - DEFAULT_PARAMETERS_PATH, MATRIX_IMAGE_FILENAME, - STATIC_AUDIO_PATH, TRIGGER_FILENAME) + DEFAULT_PARAMETERS_PATH, MATRIX_IMAGE_FILENAME, DEFAULT_DEVICES_PATH, + TRIGGER_FILENAME, SESSION_LOG_FILENAME) from bcipy.helpers.acquisition import analysis_channels, raw_data_filename from bcipy.helpers.load import (load_experimental_data, load_json_parameters, load_raw_data) +from bcipy.gui.alert import confirm from bcipy.helpers.parameters import Parameters from bcipy.helpers.save import save_model -from bcipy.helpers.stimuli import play_sound, update_inquiry_timing +from bcipy.helpers.stimuli import update_inquiry_timing from bcipy.helpers.symbols import alphabet from bcipy.helpers.system_utils import report_execution_time from bcipy.helpers.triggers import TriggerType, trigger_decoder from bcipy.helpers.visualization import (visualize_centralized_data, - visualize_erp, visualize_gaze, + visualize_gaze, visualize_gaze_accuracies, visualize_gaze_inquiries, visualize_results_all_symbols) from bcipy.preferences import preferences from bcipy.signal.model.base_model import SignalModel, SignalModelMetadata -from bcipy.signal.model.gaussian_mixture import (GazeModelCombined, - GazeModelIndividual) +from bcipy.signal.model.gaussian_mixture.gaussian_mixture import (GazeModelCombined, + GazeModelIndividual) from bcipy.signal.model.pca_rda_kde import PcaRdaKdeModel from bcipy.signal.process import (ERPTransformParams, extract_eye_info, filter_inquiries, get_default_transform) -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) logging.basicConfig(level=logging.INFO, format="[%(threadName)-9s][%(asctime)s][%(name)s][%(levelname)s]: %(message)s") @@ -74,7 +75,7 @@ def subset_data(data: np.ndarray, labels: np.ndarray, test_size: float, random_s return train_data, test_data, train_labels, test_labels -def analyze_erp(erp_data, parameters, device_spec, data_folder, estimate_balanced_acc, +def analyze_erp(erp_data, parameters, device_spec, data_folder, estimate_balanced_acc: bool, save_figures=False, show_figures=False): """Analyze ERP data and return/save the ERP model. Extract relevant information from raw data object. @@ -182,39 +183,46 @@ def analyze_erp(erp_data, parameters, device_spec, data_folder, estimate_balance # train and save the model as a pkl file log.info("Training model. This will take some time...") model = PcaRdaKdeModel(k_folds=k_folds) - model.fit(data, labels) - model.metadata = SignalModelMetadata(device_spec=device_spec, - transform=default_transform) - log.info(f"Training complete [AUC={model.auc:0.4f}]. Saving data...") + try: + model.fit(data, labels) + model.metadata = SignalModelMetadata(device_spec=device_spec, + transform=default_transform, + evidence_type="ERP", + auc=model.auc) + log.info(f"Training complete [AUC={model.auc:0.4f}]. Saving data...") + except Exception as e: + log.error(f"Error training model: {e}") + + try: + # Using an 80/20 split, report on balanced accuracy + if estimate_balanced_acc: + train_data, test_data, train_labels, test_labels = subset_data(data, labels, test_size=0.2) + dummy_model = PcaRdaKdeModel(k_folds=k_folds) + dummy_model.fit(train_data, train_labels) + probs = dummy_model.predict_proba(test_data) + preds = probs.argmax(-1) + score = balanced_accuracy_score(test_labels, preds) + log.info(f"Balanced acc with 80/20 split: {score}") + model.metadata.balanced_accuracy = score + del dummy_model, train_data, test_data, train_labels, test_labels, probs, preds + + except Exception as e: + log.error(f"Error calculating balanced accuracy: {e}") save_model(model, Path(data_folder, f"model_{model.auc:0.4f}.pkl")) preferences.signal_model_directory = data_folder - # Using an 80/20 split, report on balanced accuracy - if estimate_balanced_acc: - train_data, test_data, train_labels, test_labels = subset_data(data, labels, test_size=0.2) - dummy_model = PcaRdaKdeModel(k_folds=k_folds) - dummy_model.fit(train_data, train_labels) - probs = dummy_model.predict_proba(test_data) - preds = probs.argmax(-1) - score = balanced_accuracy_score(test_labels, preds) - log.info(f"Balanced acc with 80/20 split: {score}") - del dummy_model, train_data, test_data, train_labels, test_labels, probs, preds - - # this should have uncorrected trigger timing for display purposes - figure_handles = visualize_erp( - erp_data, - channel_map, - trigger_timing, - labels, - trial_window, - transform=default_transform, - plot_average=True, - plot_topomaps=True, - save_path=data_folder if save_figures else None, - show=show_figures - ) - return model, figure_handles + if save_figures or show_figures: + cmd = f'bcipy-erp-viz --session_path "{data_folder}" --parameters "{parameters["parameter_location"]}"' + if save_figures: + cmd += ' --save' + if show_figures: + cmd += ' --show' + subprocess.run( + cmd, + shell=True + ) + return model def analyze_gaze( @@ -248,15 +256,13 @@ def analyze_gaze( "Individual": Fits a separate Gaussian for each symbol. Default model "Centralized": Uses data from all symbols to fit a single centralized Gaussian """ - figures = [] - figure_handles = visualize_gaze( + visualize_gaze( gaze_data, save_path=save_figures, img_path=f'{data_folder}/{MATRIX_IMAGE_FILENAME}', show=show_figures, raw_plot=plot_points, ) - figures.extend(figure_handles) channels = gaze_data.channels type_amp = gaze_data.daq_type @@ -347,7 +353,7 @@ def analyze_gaze( means, covs = model.evaluate(test_re) # Visualize the results: - figure_handles = visualize_gaze_inquiries( + visualize_gaze_inquiries( le, re, means, covs, save_path=save_figures, @@ -355,7 +361,6 @@ def analyze_gaze( show=show_figures, raw_plot=plot_points, ) - figures.extend(figure_handles) left_eye_all.append(le) right_eye_all.append(re) means_all.append(means) @@ -399,22 +404,20 @@ def analyze_gaze( print(f"Overall accuracy: {accuracy:.2f}") # Plot all accuracies as bar plot: - figure_handles = visualize_gaze_accuracies(acc_all_symbols, accuracy, save_path=None, show=True) - figures.extend(figure_handles) + visualize_gaze_accuracies(acc_all_symbols, accuracy, save_path=None, show=True) if model_type == "Centralized": cent_left = np.concatenate(np.array(centralized_data_left, dtype=object)) cent_right = np.concatenate(np.array(centralized_data_right, dtype=object)) # Visualize the results: - figure_handles = visualize_centralized_data( + visualize_centralized_data( cent_left, cent_right, save_path=save_figures, img_path=f'{data_folder}/{MATRIX_IMAGE_FILENAME}', show=show_figures, raw_plot=plot_points, ) - figures.extend(figure_handles) # Fit the model: model.fit(cent_left) @@ -427,7 +430,7 @@ def analyze_gaze( le = preprocessed_data[sym][0] re = preprocessed_data[sym][1] # Visualize the results: - figure_handles = visualize_gaze_inquiries( + visualize_gaze_inquiries( le, re, means, covs, save_path=save_figures, @@ -435,13 +438,13 @@ def analyze_gaze( show=show_figures, raw_plot=plot_points, ) - figures.extend(figure_handles) left_eye_all.append(le) right_eye_all.append(re) means_all.append(means) covs_all.append(covs) - fig_handles = visualize_results_all_symbols( + # TODO: add visualizations to subprocess + visualize_results_all_symbols( left_eye_all, right_eye_all, means_all, covs_all, img_path=f'{data_folder}/{MATRIX_IMAGE_FILENAME}', @@ -449,7 +452,6 @@ def analyze_gaze( show=show_figures, raw_plot=plot_points, ) - figures.extend(fig_handles) model.metadata = SignalModelMetadata(device_spec=device_spec, transform=None) @@ -457,7 +459,7 @@ def analyze_gaze( save_model( model, Path(data_folder, f"model_{device_spec.content_type}_{model_type}.pkl")) - return model, figures + return model @report_execution_time @@ -468,7 +470,7 @@ def offline_analysis( estimate_balanced_acc: bool = False, show_figures: bool = False, save_figures: bool = False, -) -> Tuple[SignalModel, Figure]: +) -> List[SignalModel]: """Gets calibration data and trains the model in an offline fashion. pickle dumps the model into a .pkl folder @@ -499,45 +501,52 @@ def offline_analysis( Returns: -------- model (SignalModel): trained model - figure_handles (Figure): handles to the ERP figures """ assert parameters, "Parameters are required for offline analysis." if not data_folder: data_folder = load_experimental_data() + # Load default devices which are used for training the model with different channels, etc. devices_by_name = devices.load( - Path(data_folder, DEFAULT_DEVICE_SPEC_FILENAME), replace=True) + Path(DEFAULT_DEVICES_PATH, DEFAULT_DEVICE_SPEC_FILENAME), replace=True) - active_devices = (spec for spec in devices_by_name.values() + # Load the active devices used during a session; this will be used to exclude inactive devices + active_devices_by_name = devices.load( + Path(data_folder, DEFAULT_DEVICE_SPEC_FILENAME), replace=True) + active_devices = (spec for spec in active_devices_by_name.values() if spec.is_active) active_raw_data_paths = (Path(data_folder, raw_data_filename(device_spec)) for device_spec in active_devices) data_file_paths = [path for path in active_raw_data_paths if path.exists()] + assert len(data_file_paths) < 3, "BciPy only supports up to 2 devices for offline analysis." + assert len(data_file_paths) > 0, "No data files found for offline analysis." + models = [] - figure_handles = [] + log.info(f"Starting offline analysis for {data_file_paths}") for raw_data_path in data_file_paths: raw_data = load_raw_data(raw_data_path) device_spec = devices_by_name.get(raw_data.daq_type) # extract relevant information from raw data object eeg if device_spec.content_type == "EEG": - erp_model, erp_figure_handles = analyze_erp( + erp_model = analyze_erp( raw_data, parameters, device_spec, data_folder, estimate_balanced_acc, save_figures, show_figures) models.append(erp_model) - figure_handles.extend(erp_figure_handles) if device_spec.content_type == "Eyetracker": - et_model, et_figure_handles = analyze_gaze( + et_model = analyze_gaze( raw_data, parameters, device_spec, data_folder, save_figures, show_figures, model_type="Individual") models.append(et_model) - figure_handles.extend(et_figure_handles) if alert_finished: - play_sound(f"{STATIC_AUDIO_PATH}/{parameters['alert_sound_file']}") - return models, figure_handles + log.info("Alerting Offline Analysis Complete") + results = [f"{model.name}: {model.auc}" for model in models] + confirm(f"Offline analysis complete! \n Results={results}") + log.info("Offline analysis complete") + return models -if __name__ == "__main__": +def main(): import argparse parser = argparse.ArgumentParser() @@ -548,13 +557,17 @@ def offline_analysis( parser.add_argument("--alert", dest="alert", action="store_true") parser.add_argument("--balanced-acc", dest="balanced", action="store_true") parser.set_defaults(alert=False) - parser.set_defaults(balanced=True) + parser.set_defaults(balanced=False) parser.set_defaults(save_figures=False) - parser.set_defaults(show_figures=True) + parser.set_defaults(show_figures=False) args = parser.parse_args() log.info(f"Loading params from {args.parameters_file}") parameters = load_json_parameters(args.parameters_file, value_cast=True) + log.info( + f"Starting offline analysis client with the following: Data={args.data_folder} || " + f"Save Figures={args.save_figures} || Show Figures={args.show_figures} || " + f"Alert={args.alert} || Calculate Balanced Accuracy={args.balanced}") offline_analysis( args.data_folder, @@ -563,4 +576,7 @@ def offline_analysis( estimate_balanced_acc=args.balanced, save_figures=args.save_figures, show_figures=args.show_figures) - log.info("Offline Analysis complete.") + + +if __name__ == "__main__": + main() diff --git a/bcipy/signal/model/pca_rda_kde/pca_rda_kde.py b/bcipy/signal/model/pca_rda_kde/pca_rda_kde.py index 7187e5c2c..28280c1d3 100644 --- a/bcipy/signal/model/pca_rda_kde/pca_rda_kde.py +++ b/bcipy/signal/model/pca_rda_kde/pca_rda_kde.py @@ -4,7 +4,7 @@ import numpy as np -from bcipy.helpers.exceptions import SignalException +from bcipy.exceptions import SignalException from bcipy.helpers.stimuli import InquiryReshaper from bcipy.signal.model import ModelEvaluationReport, SignalModel from bcipy.signal.model.classifier import RegularizedDiscriminantAnalysis @@ -18,6 +18,7 @@ class PcaRdaKdeModel(SignalModel): reshaper: InquiryReshaper = InquiryReshaper() + name = "pca_rda_kde" def __init__(self, k_folds: int = 10, prior_type="uniform", pca_n_components=0.9): self.k_folds = k_folds diff --git a/bcipy/signal/model/rda_kde/rda_kde.py b/bcipy/signal/model/rda_kde/rda_kde.py index 405250374..6c716d569 100644 --- a/bcipy/signal/model/rda_kde/rda_kde.py +++ b/bcipy/signal/model/rda_kde/rda_kde.py @@ -9,12 +9,13 @@ from bcipy.signal.model.density_estimation import KernelDensityEstimate from bcipy.signal.model.dimensionality_reduction import MockPCA from bcipy.signal.model.pipeline import Pipeline -from bcipy.helpers.exceptions import SignalException +from bcipy.exceptions import SignalException from bcipy.helpers.stimuli import InquiryReshaper class RdaKdeModel(SignalModel): reshaper = InquiryReshaper() + name = "rda_kde" def __init__(self, k_folds: int, prior_type: str = "uniform"): self.k_folds = k_folds diff --git a/bcipy/signal/process/decomposition/psd.py b/bcipy/signal/process/decomposition/psd.py index 69869f2d4..ddeda93da 100644 --- a/bcipy/signal/process/decomposition/psd.py +++ b/bcipy/signal/process/decomposition/psd.py @@ -8,7 +8,7 @@ from typing import Tuple -from bcipy.helpers.exceptions import SignalException +from bcipy.exceptions import SignalException class PSD_TYPE(Enum): diff --git a/bcipy/signal/process/extract_gaze.py b/bcipy/signal/process/extract_gaze.py index 6c707dad0..6aa95d6fb 100644 --- a/bcipy/signal/process/extract_gaze.py +++ b/bcipy/signal/process/extract_gaze.py @@ -1,6 +1,6 @@ import numpy as np -from bcipy.helpers.exceptions import SignalException +from bcipy.exceptions import SignalException def extract_eye_info(data): diff --git a/bcipy/signal/tests/model/pca_rda_kde/test_pca_rda_kde.py b/bcipy/signal/tests/model/pca_rda_kde/test_pca_rda_kde.py index e4238d2b7..4d5439ecc 100644 --- a/bcipy/signal/tests/model/pca_rda_kde/test_pca_rda_kde.py +++ b/bcipy/signal/tests/model/pca_rda_kde/test_pca_rda_kde.py @@ -8,7 +8,7 @@ import pytest from scipy.stats import norm -from bcipy.helpers.exceptions import SignalException +from bcipy.exceptions import SignalException from bcipy.helpers.load import load_signal_models from bcipy.helpers.save import save_model from bcipy.helpers.symbols import alphabet diff --git a/bcipy/signal/tests/model/rda_kde/test_rda_kde.py b/bcipy/signal/tests/model/rda_kde/test_rda_kde.py index 9ea8312ca..c09e53e0c 100644 --- a/bcipy/signal/tests/model/rda_kde/test_rda_kde.py +++ b/bcipy/signal/tests/model/rda_kde/test_rda_kde.py @@ -15,7 +15,7 @@ from bcipy.signal.model.density_estimation import KernelDensityEstimate from bcipy.signal.model.dimensionality_reduction import MockPCA from bcipy.signal.model.pipeline import Pipeline -from bcipy.helpers.exceptions import SignalException +from bcipy.exceptions import SignalException expected_output_folder = Path(__file__).absolute().parent.parent / "unit_test_expected_output" diff --git a/bcipy/signal/tests/model/test_offline_analysis.py b/bcipy/signal/tests/model/test_offline_analysis.py index 3f8535e2e..54f84d8e3 100644 --- a/bcipy/signal/tests/model/test_offline_analysis.py +++ b/bcipy/signal/tests/model/test_offline_analysis.py @@ -19,7 +19,7 @@ @pytest.mark.slow -class TestOfflineAnalysis(unittest.TestCase): +class TestOfflineAnalysisEEG(unittest.TestCase): """Integration test of offline_analysis.py (slow) This test is slow because it runs the full offline analysis pipeline and compares its' output @@ -50,12 +50,14 @@ def setUpClass(cls): params_path = pwd.parent.parent.parent / "parameters" / DEFAULT_PARAMETER_FILENAME cls.parameters = load_json_parameters(params_path, value_cast=True) - models, fig_handles = offline_analysis( - str(cls.tmp_dir), cls.parameters, save_figures=True, show_figures=False, alert_finished=False) + models = offline_analysis( + str(cls.tmp_dir), + cls.parameters, + save_figures=False, + show_figures=False, + alert_finished=False) + # only one model is generated using the default parameters cls.model = models[0] - cls.mean_erp_fig_handle = fig_handles[0] - cls.mean_nontarget_topomap_handle = fig_handles[1] - cls.mean_target_topomap_handle = fig_handles[2] @classmethod def tearDownClass(cls): @@ -73,20 +75,6 @@ def test_model_AUC(self): found_auc = self.get_auc(list(self.tmp_dir.glob("model_*.pkl"))[0].name) self.assertAlmostEqual(expected_auc, found_auc, delta=0.005) - @pytest.mark.mpl_image_compare(baseline_dir=expected_output_folder, filename="test_mean_erp.png", remove_text=True) - def test_mean_erp(self): - return self.mean_erp_fig_handle - - @pytest.mark.mpl_image_compare(baseline_dir=expected_output_folder, - filename="test_target_topomap.png", remove_text=False) - def test_target_topomap(self): - return self.mean_target_topomap_handle - - @pytest.mark.mpl_image_compare(baseline_dir=expected_output_folder, - filename="test_nontarget_topomap.png", remove_text=False) - def test_nontarget_topomap(self): - return self.mean_nontarget_topomap_handle - if __name__ == "__main__": unittest.main() diff --git a/bcipy/signal/tests/process/decomposition/test_decomposition.py b/bcipy/signal/tests/process/decomposition/test_decomposition.py index 540685e5d..6f6adf887 100644 --- a/bcipy/signal/tests/process/decomposition/test_decomposition.py +++ b/bcipy/signal/tests/process/decomposition/test_decomposition.py @@ -1,7 +1,7 @@ import unittest from bcipy.config import BCIPY_ROOT -from bcipy.helpers.exceptions import SignalException +from bcipy.exceptions import SignalException from bcipy.signal.process.decomposition import continuous_wavelet_transform from bcipy.signal.process.decomposition.psd import power_spectral_density, PSD_TYPE import numpy as np diff --git a/bcipy/simulator/data/data_engine.py b/bcipy/simulator/data/data_engine.py index 0e10813ca..fe591db85 100644 --- a/bcipy/simulator/data/data_engine.py +++ b/bcipy/simulator/data/data_engine.py @@ -7,7 +7,7 @@ import numpy as np import pandas as pd -from bcipy.helpers.exceptions import TaskConfigurationException +from bcipy.exceptions import TaskConfigurationException from bcipy.helpers.parameters import Parameters from bcipy.simulator.data import data_process from bcipy.simulator.data.data_process import (ExtractedExperimentData, diff --git a/bcipy/simulator/data/sampler.py b/bcipy/simulator/data/sampler.py index fa1e3f8c4..5424c95e2 100644 --- a/bcipy/simulator/data/sampler.py +++ b/bcipy/simulator/data/sampler.py @@ -180,8 +180,8 @@ def sample(self, state: SimState) -> List[Trial]: inquiry_n = random.choice(source_inquiries[data_source]) # select all trials for the data_source and inquiry - inquiry_df = self.data.loc[(self.data['source'] == data_source) - & (self.data['inquiry_n'] == inquiry_n)] + inquiry_df = self.data.loc[(self.data['source'] == data_source) & + (self.data['inquiry_n'] == inquiry_n)] assert len(inquiry_df) == len( inquiry_letter_subset), f"Invalid data source {data_source}" diff --git a/bcipy/simulator/task/copy_phrase.py b/bcipy/simulator/task/copy_phrase.py index 1d78c44ec..1dee73b09 100644 --- a/bcipy/simulator/task/copy_phrase.py +++ b/bcipy/simulator/task/copy_phrase.py @@ -1,5 +1,7 @@ +# mypy: disable-error-code="union-attr" """Simulates the Copy Phrase task""" from typing import Dict, List, Optional, Tuple +import logging from bcipy.display.main import Display from bcipy.feedback.visual.visual_feedback import VisualFeedback @@ -39,6 +41,7 @@ def __init__(self, parameters: Parameters, file_save: str, fake=False) self.save_session_every_inquiry = False self.samplers = samplers + self.logger = logging.getLogger(__name__) def init_evidence_evaluators( self, signal_models: List[SignalModel]) -> List[EvidenceEvaluator]: diff --git a/bcipy/task/README.md b/bcipy/task/README.md index 3943d744c..7defbe7e4 100644 --- a/bcipy/task/README.md +++ b/bcipy/task/README.md @@ -5,8 +5,7 @@ These are the tasks that can be run to collect experimental data. ## Paradigms ------------ -Within `task/` there are folders for each of the supported paradigms, and within them, the supported modes. To add new paradigms, create a folder for it and place the tasks in files within it. Be sure to add it to the `start_task` file at the root to be able execute it! An entry must also be added to the task_registry TaskType -enum. This updates the GUI (BCInterface.py) and makes the task available to `start_task`. +Within `task/` there are folders for each of the supported paradigms, and within them, the supported modes. To add new paradigms, create a folder for it and place the tasks in files within it. An entry must also be added to the task_registry TaskType and/or TaskMode enum. This updates the GUI (BCInterface.py) and makes the task available to the BciPy Client. Currently, these are the supported paradigms and modes: @@ -33,25 +32,160 @@ Currently, these are the supported paradigms and modes: > Copy Phrase: Used to copy a phrase using the Matrix paradigm (e.g. P300 Speller) on data from a P300 calibration +### *Paradigm: VEP* -## Start Task -------------- +##### Mode: Calibration + +> Calibration: Used to calibrate the VEP paradigm for a user. Note this has not been extensively tested, use with caution. + + +## Running Tasks using the SessionOrchestrator + +The `SessionOrchestrator` is a class that can be used to run a Protocol (sequence of Tasks/Actions). The core BciPy client and GUI use this class and resulting data strucutures. It will run the tasks in the order defined, handle the transition between tasks, and persist data. There are several optional arguments that can be provided to the orchestrator: + +experiment_id: str + This is used to load any defined protocols or field collections. If no experiment_id is provided, a default will be used, and the orchestrator will run any tasks in the order they were added. +user: str + The user ID to associate with the session data. By default, this is DEFAULT_USER. +parameters_path: str + The path to the BciPy parameters file. By default, this is DEFAULT_PARAMETERS_PATH, located at bcipy/parameters/parameters.json. +parameters: Parameters + A Parameters object to use for the Tasks. If provided, this will override the parameters_path. +fake: bool + If True, the Tasks will run in fake mode. This is useful for testing or debugging paradigms with streaming data or models. By default, this is False. +alert: bool + If True, after a Task execution the orchestrator will alert experimenters when a task is complete. By default, this is False. +visualize: bool + If True, after a Task execution the orchestrator will visualize data. By default, this is False. This only works for EEG data with target/non-target labels. + + +The data will be saved in the following format in the specified data_save_loc (default is bcipy/data/): + +``` + +data_save_loc/ + user_id/ + experiment_id/ + run_id / + task_id/ + logs/ + task_log_data + task_data (e.g. acquisition data, parameters, visualizations) + task_id/ + logs/ + task_log_data + task_data (e.g. acquisition data, parameters, visualizations) + logs/ + protocol_log_data + protocol_data (system data, protocol/tasks executed) +``` + +### Usage manually + +```python +from bcipy.task import Task +from bcipy.task import SessionOrchestrator +from bcipy.task.action import OfflineAnalysisAction +from bcipy.task.task_registry import TaskType +from bcipy.task.paradigm.rsvp.calibration import RSVPCalibration +from bcipy.task.paradigm.rsvp.copy_phrase import RSVPCopyPhrase + +# Create a list of tasks to run, These should not be initialized. +tasks = [ + RSVPCalibration, + OfflineAnalysisAction, + RSVPCopyPhrase +] + +# Create a SessionOrchestrator +orchestrator = SessionOrchestrator() + +# add the tasks to the orchestrator +orchestrator.add_tasks(tasks) + +# run the tasks +orchestrator.execute() +``` + +### Usage with a Protocol -Start Task takes in Display [object], parameters [dict], file save [str-path] and task type [dict]. Using the -task type, start_task() will route to the correct paradigm (RSVP, SSVEP, MATRIX) and mode (Calibration, Copy Phrase, etc.) +```python +from bcipy.task import Task +from bcipy.task import SessionOrchestrator +from bcipy.task.action import OfflineAnalysisAction +from bcipy.task.task_registry import TaskType +from bcipy.task.paradigm.rsvp.calibration import RSVPCalibration +from bcipy.task.paradigm.rsvp.copy_phrase import RSVPCopyPhrase +from bcipy.task.protocol import parse_protocol -It is called in the following way: +# Create a protocol. This would be extracted from a registered experiment. +example_protocol = 'RSVPCalibration -> OfflineAnalysisAction -> RSVPCopyPhrase' +# Parse the protocol into a list of tasks. This will raise an error if the TaskType is not registered. +tasks = parse_protocol(example_protocol) +# Create a SessionOrchestrator +orchestrator = SessionOrchestrator() +# add the tasks to the orchestrator +orchestrator.add_tasks(tasks) + +# run the tasks +orchestrator.execute() ``` - from bcipy.task.start_task import start_task - start_task( - display_window, - data_acquisition_client, - parameters, - file_save) +### Usage from experiment loading + +Note: A new experiment must be registered in the `bcipy/parameters/experiments.json` file. The BCInterface may also be used to create a new named experiment. + +```python +from bcipy.task import Task +from bcipy.task import SessionOrchestrator +from bcipy.task.action import OfflineAnalysisAction +from bcipy.task.task_registry import TaskType +from bcipy.task.paradigm.rsvp.calibration import RSVPCalibration +from bcipy.task.paradigm.rsvp.copy_phrase import RSVPCopyPhrase +from bcipy.task.protocol import parse_protocol +from bcipy.helpers.load import load_experiment + +# Load an experiment from the registered experiments +experiment_name = 'default' +experiment = load_experiment(experiment_name) +# grab the protocol from the experiment and parse it +protocol = experiment['protocol'] +tasks = parse_protocol(protocol) + +# Create a SessionOrchestrator +orchestrator = SessionOrchestrator() +# add the tasks to the orchestrator +orchestrator.add_tasks(tasks) + +# run the tasks +orchestrator.execute() ``` -It will throw an error if the task isn't implemented. +### Using orchestration to type using multiple copy phrases with different text and spelled letters + +There are experiments in which multiple copy phrases would be used to test the performance of a user and the system over a variety of phrases. This is especially useful for testing the language model performance over different contexts and starting data. Additionally, this can be used to test the performance of the system over different spelling lengths and complexity. + +If the `copy_phrase_location` parameter is set in the parameters.json file, the orchestrator will use the provided file to load the phrases to be copied in Tasks with the mode TaskMode.COPYPHRASE. The file should be a JSON file with the following format: + +```json +{ + "Phrases": [ + ["This is the first phrase", 1], + ["This is the second phrase", 2], + ["This is the third phrase", 3] + ] +} +``` + +Each phrase should be a list with the phrase as the first element (string) and the spelled letter count as the second element (integer). The orchestrator will iterate through the phrases in order, copying each one the specified number of times. If any phrases are remaining, the orchestrator will save the phrase list to the run directory for future use. + + + + + + + + diff --git a/bcipy/task/__init__.py b/bcipy/task/__init__.py index 79f953973..2a60353aa 100644 --- a/bcipy/task/__init__.py +++ b/bcipy/task/__init__.py @@ -1,10 +1,14 @@ """ This import statement allows users to import submodules from Task """ -from .main import Task -from .task_registry import TaskType +from .main import Task, TaskData, TaskMode + +# Makes the following classes available to the task registry +from .registry import TaskRegistry __all__ = [ 'Task', - 'TaskType', + 'TaskRegistry', + 'TaskData', + 'TaskMode' ] diff --git a/bcipy/task/actions.py b/bcipy/task/actions.py new file mode 100644 index 000000000..12b520624 --- /dev/null +++ b/bcipy/task/actions.py @@ -0,0 +1,380 @@ +# mypy: disable-error-code="assignment,arg-type" +import subprocess +from typing import Any, Optional, List, Callable, Tuple +import logging +from pathlib import Path +import glob + +from bcipy.gui.bciui import run_bciui +from matplotlib.figure import Figure + +from bcipy.gui.intertask_gui import IntertaskGUI +from bcipy.gui.experiments.ExperimentField import start_experiment_field_collection_gui +from bcipy.task import Task, TaskMode, TaskData +from bcipy.helpers.triggers import trigger_decoder, TriggerType + +from bcipy.acquisition import devices +from bcipy.helpers.acquisition import analysis_channels +from bcipy.helpers.parameters import Parameters +from bcipy.acquisition.devices import DeviceSpec +from bcipy.helpers.load import load_raw_data +from bcipy.helpers.raw_data import RawData +from bcipy.signal.process import get_default_transform +from bcipy.helpers.report import SignalReportSection, SessionReportSection, Report, ReportSection +from bcipy.config import SESSION_LOG_FILENAME, RAW_DATA_FILENAME, TRIGGER_FILENAME +from bcipy.helpers.visualization import visualize_erp +from bcipy.signal.evaluate.artifact import ArtifactDetection + + +logger = logging.getLogger(SESSION_LOG_FILENAME) + + +class CodeHookAction(Task): + """ + Action for running generic code hooks. + """ + + name = "CodeHookAction" + mode = TaskMode.ACTION + + def __init__( + self, + parameters: Parameters, + data_directory: str, + code_hook: Optional[str] = None, + subprocess: bool = True, + **kwargs) -> None: + super().__init__() + self.code_hook = code_hook + self.subprocess = subprocess + + def execute(self) -> TaskData: + if self.code_hook: + if self.subprocess: + subprocess.Popen(self.code_hook, shell=True) + + else: + subprocess.run(self.code_hook, shell=True) + return TaskData() + + +class OfflineAnalysisAction(Task): + """ + Action for running offline analysis. + """ + + name = "OfflineAnalysisAction" + mode = TaskMode.ACTION + + def __init__( + self, + parameters: Parameters, + data_directory: str, + parameters_path: str, + last_task_dir: Optional[str] = None, + alert_finished: bool = False, + **kwargs: Any) -> None: + super().__init__() + self.parameters = parameters + self.parameters_path = parameters_path + self.alert_finished = alert_finished + + # TODO: add a feature to orchestrator to permit the user to select the last task directory or have it loaded. + if last_task_dir: + self.data_directory = last_task_dir + else: + self.data_directory = data_directory + + def execute(self) -> TaskData: + """Execute the offline analysis. + + Note: This function is called by the orchestrator to execute the offline analysis task. Some of the + exceptions that can be raised by this function are not recoverable and will cause the orchestrator + to stop execution. For example, if Exception is thrown in cross_validation due to the # of folds being + inconsistent. + + """ + logger.info("Running offline analysis action") + try: + cmd = f'bcipy-train -p "{self.parameters_path}"' + if self.alert_finished: + cmd += " --alert" + response = subprocess.run( + cmd, + shell=True, + check=True, + ) + except Exception as e: + logger.exception(f"Error running offline analysis: {e}") + raise e + return TaskData( + save_path=self.data_directory, + task_dict={"parameters": self.parameters_path, + "response": response}, + ) + + +class IntertaskAction(Task): + name = "IntertaskAction" + mode = TaskMode.ACTION + tasks: List[Task] + current_task_index: int + + def __init__( + self, + parameters: Parameters, + save_path: str, + progress: Optional[int] = None, + tasks: Optional[List[Task]] = None, + exit_callback: Optional[Callable] = None, + **kwargs: Any) -> None: + super().__init__() + self.save_folder = save_path + self.parameters = parameters + assert progress is not None and tasks is not None, "Either progress or tasks must be provided" + self.next_task_index = progress # progress is 1-indexed, tasks is 0-indexed so we can use the same index + assert self.next_task_index >= 0, "Progress must be greater than 1 " + self.tasks = tasks + self.task_name = self.tasks[self.next_task_index].name + self.task_names = [task.name for task in self.tasks] + self.exit_callback = exit_callback + + def execute(self) -> TaskData: + + run_bciui( + IntertaskGUI, + tasks=self.task_names, + next_task_index=self.next_task_index, + exit_callback=self.exit_callback), + + return TaskData( + save_path=self.save_folder, + task_dict={ + "next_task_index": self.next_task_index, + "tasks": self.task_names, + "task_name": self.task_name, + }, + ) + + def alert(self): + pass + + +class ExperimentFieldCollectionAction(Task): + """ + Action for collecting experiment field data. + """ + + name = "ExperimentFieldCollectionAction" + mode = TaskMode.ACTION + + def __init__( + self, + parameters: Parameters, + data_directory: str, + experiment_id: str = 'default', + **kwargs: Any) -> None: + super().__init__() + self.experiment_id = experiment_id + self.save_folder = data_directory + self.parameters = parameters + + def execute(self) -> TaskData: + logger.info( + f"Collecting experiment field data for experiment {self.experiment_id} in save folder {self.save_folder}" + ) + start_experiment_field_collection_gui(self.experiment_id, self.save_folder) + return TaskData( + save_path=self.save_folder, + task_dict={ + "experiment_id": self.experiment_id, + }, + ) + + +class BciPyCalibrationReportAction(Task): + """ + Action for generating a report after calibration Tasks. + """ + + name = "BciPyReportAction" + mode = TaskMode.ACTION + + def __init__( + self, + parameters: Parameters, + save_path: str, + protocol_path: Optional[str] = None, + last_task_dir: Optional[str] = None, + trial_window: Optional[Tuple[float, float]] = None, + **kwargs: Any) -> None: + super().__init__() + self.save_folder = save_path + # Currently we assume all Tasks have the same parameters, this may change in the future. + self.parameters = parameters + self.protocol_path = protocol_path or '' + self.last_task_dir = last_task_dir + self.default_transform = None + self.trial_window = (-0.2, 1.0) + self.static_offset = None + self.report = Report(self.protocol_path) + self.report_sections: List[ReportSection] = [] + self.all_raw_data: List[RawData] = [] + self.type_amp = None + + def execute(self) -> TaskData: + """Excute the report generation action. + + This assumes all data were collected using the same protocol, device, and parameters. + """ + logger.info(f"Generating report in save folder {self.save_folder}") + # loop through all the files in the last_task_dir + + data_directories = [] + # If a protocol is given, loop over and look for any calibration directories + try: + if self.protocol_path: + # Use glob to find all directories with Calibration in the name + calibration_directories = glob.glob( + f"{self.protocol_path}/**/*Calibration*", + recursive=True) + for data_dir in calibration_directories: + path_data_dir = Path(data_dir) + # pull out the last directory name + task_name = path_data_dir.parts[-1].split('_')[0] + data_directories.append(path_data_dir) + # For each calibration directory, attempt to load the raw data + signal_report_section = self.create_signal_report(path_data_dir) + session_report = self.create_session_report(path_data_dir, task_name) + self.report_sections.append(session_report) + self.report.add(session_report) + self.report_sections.append(signal_report_section) + self.report.add(signal_report_section) + if data_directories: + logger.info(f"Saving report generated from: {self.protocol_path}") + else: + logger.info(f"No data found in {self.protocol_path}") + + except Exception as e: + logger.exception(f"Error generating report: {e}") + + self.report.compile() + self.report.save() + return TaskData( + save_path=self.save_folder, + task_dict={}, + ) + + def create_signal_report(self, data_dir: Path) -> SignalReportSection: + raw_data = load_raw_data(Path(data_dir, f'{RAW_DATA_FILENAME}.csv')) + if not self.type_amp: + self.type_amp = raw_data.daq_type + channels = raw_data.channels + sample_rate = raw_data.sample_rate + device_spec = devices.preconfigured_device(raw_data.daq_type) + self.static_offset = device_spec.static_offset + channel_map = analysis_channels(channels, device_spec) + self.all_raw_data.append(raw_data) + + # Set the default transform if not already set + if not self.default_transform: + self.set_default_transform(sample_rate) + + triggers = self.get_triggers(data_dir) + # get figure handles + figure_handles = self.get_figure_handles(raw_data, channel_map, triggers) + artifact_detector = self.get_artifact_detector(raw_data, device_spec, triggers) + return SignalReportSection(figure_handles, artifact_detector) + + def create_session_report(self, data_dir, task_name) -> SessionReportSection: + # get task name + summary_dict = { + "task": task_name, + "data_location": data_dir, + "amplifier": self.type_amp + } + signal_model_metrics = self.get_signal_model_metrics(data_dir) + summary_dict.update(signal_model_metrics) + + return SessionReportSection(summary_dict) + + def get_signal_model_metrics(self, data_directory: Path) -> dict: + """Get the signal model metrics from the session folder. + + In the future, the model will save a ModelMetrics with the pkl file. + For now, we just look for the pkl file and extract the AUC from the filename. + """ + pkl_file = None + for file in data_directory.iterdir(): + if file.suffix == '.pkl': + pkl_file = file + break + + if pkl_file: + auc = pkl_file.stem.split('_')[-1] + else: + auc = 'No Signal Model found in session folder' + + return {'AUC': auc} + + def set_default_transform(self, sample_rate: int) -> None: + downsample_rate = self.parameters.get("down_sampling_rate") + notch_filter = self.parameters.get("notch_filter_frequency") + filter_high = self.parameters.get("filter_high") + filter_low = self.parameters.get("filter_low") + filter_order = self.parameters.get("filter_order") + self.default_transform = get_default_transform( + sample_rate_hz=sample_rate, + notch_freq_hz=notch_filter, + bandpass_low=filter_low, + bandpass_high=filter_high, + bandpass_order=filter_order, + downsample_factor=downsample_rate, + ) + + def find_eye_channels(self, device_spec: DeviceSpec) -> Optional[list]: + eye_channels = [] + for channel in device_spec.channels: + if 'F' in channel: + eye_channels.append(channel) + if len(eye_channels) == 0: + eye_channels = None + return eye_channels + + def get_triggers(self, session) -> tuple: + trigger_type, trigger_timing, trigger_label = trigger_decoder( + offset=self.static_offset, + trigger_path=f"{session}/{TRIGGER_FILENAME}", + exclusion=[ + TriggerType.PREVIEW, + TriggerType.EVENT, + TriggerType.FIXATION], + device_type='EEG' + ) + return trigger_type, trigger_timing, trigger_label + + def get_figure_handles(self, raw_data, channel_map, triggers) -> List[Figure]: + _, trigger_timing, trigger_label = triggers + figure_handles = visualize_erp( + raw_data, + channel_map, + trigger_timing, + trigger_label, + self.trial_window, + transform=self.default_transform, + plot_average=True, + plot_topomaps=True, + show=False, + ) + return figure_handles + + def get_artifact_detector(self, raw_data, device_spec, triggers) -> ArtifactDetection: + eye_channels = self.find_eye_channels(device_spec) + artifact_detector = ArtifactDetection( + raw_data, + self.parameters, + device_spec, + eye_channels=eye_channels, + session_triggers=triggers) + artifact_detector.detect_artifacts() + return artifact_detector diff --git a/bcipy/task/base_calibration.py b/bcipy/task/calibration.py similarity index 81% rename from bcipy/task/base_calibration.py rename to bcipy/task/calibration.py index 392de4fc4..839c85eda 100644 --- a/bcipy/task/base_calibration.py +++ b/bcipy/task/calibration.py @@ -1,14 +1,16 @@ """Base calibration task.""" - +from abc import abstractmethod from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple -from psychopy import core, visual +from psychopy import core +from psychopy.visual import Window import bcipy.task.data as session_data from bcipy.acquisition import ClientManager from bcipy.config import (SESSION_DATA_FILENAME, TRIGGER_FILENAME, - WAIT_SCREEN_MESSAGE) -from bcipy.display import Display + WAIT_SCREEN_MESSAGE, SESSION_LOG_FILENAME) +from bcipy.helpers.acquisition import init_acquisition, LslDataServer +from bcipy.display import init_display_window, Display from bcipy.helpers.clock import Clock from bcipy.helpers.parameters import Parameters from bcipy.helpers.save import _save_session_related_data @@ -21,7 +23,10 @@ from bcipy.helpers.triggers import (FlushFrequency, Trigger, TriggerHandler, TriggerType, convert_timing_triggers, offset_label) -from bcipy.task import Task +from bcipy.task import Task, TaskData, TaskMode + +import logging +logger = logging.getLogger(SESSION_LOG_FILENAME) class Inquiry(NamedTuple): @@ -50,10 +55,9 @@ class BaseCalibrationTask(Task): PARAMETERS: ---------- - win (PsychoPy Display) - daq (Data Acquisition Client) parameters (dict) file_save (str) + fake (bool) Subclasses should override the provided MODE and can specialize behavior by overriding the following methods: @@ -62,19 +66,33 @@ class BaseCalibrationTask(Task): - trigger_type ; used for assigning trigger types to the timing data - session_task_data ; provide task-specific session data - session_inquiry_data ; provide task-specific inquiry data to the session - - cleanup ; perform any necessary cleanup (closing connections, etc.) + - cleanup ; perform any necessary cleanup (closing connections, etc.). + + Returns: + ------- + TaskData """ - MODE = 'Undefined' + mode = TaskMode.CALIBRATION + paradigm = 'Undefined' + initalized = False - def __init__(self, win: visual.Window, daq: ClientManager, - parameters: Parameters, file_save: str) -> None: + def __init__(self, + parameters: Parameters, + file_save: str, + fake: bool = False, + **kwargs: Any) -> None: super().__init__() + + self.fake = fake + self.validate() + daq, servers, win = self.setup(parameters, file_save, fake) self.window = win self.frame_rate = self.window.getActualFrameRate() self.parameters = parameters self.daq = daq + self.servers = servers self.static_clock = core.StaticPeriod(screenHz=self.frame_rate) self.experiment_clock = Clock() self.start_time = self.experiment_clock.getTime() @@ -103,11 +121,47 @@ def symbol_set(self) -> List[str]: """Symbols used in the calibration""" return self._symbol_set - def name(self) -> str: - """Task name""" - if self.MODE == 'Undefined': - raise NotImplementedError - return f"{self.MODE} Calibration Task" + def setup(self, parameters, data_save_location, fake=False) -> Tuple[ClientManager, List[LslDataServer], Window]: + # Initialize Acquisition + daq, servers = init_acquisition( + parameters, data_save_location, server=fake) + + # Initialize Display + display = init_display_window(parameters) + self.initalized = True + + return daq, servers, display + + def validate(self) -> None: + """Validate the task.""" + assert self.paradigm != 'Undefined', 'Paradigm must be defined in subclass.' + + def cleanup(self) -> None: + """Any cleanup code to run after the last inquiry is complete.""" + logger.info('Cleaning up task acquisition and display.') + self.exit_display() + self.write_offset_trigger() + self.wait() + if self.initalized: + try: + # Stop Acquisition + self.daq.stop_acquisition() + self.daq.cleanup() + + # Stop Servers + if self.servers: + for server in self.servers: + server.stop() + + # Close the display window + # NOTE: There is currently a bug in psychopy when attempting to shutdown + # windows when using a USB-C monitor. Putting the display close last in + # the inquiry allows acquisition to properly shutdown. + self.window.close() + self.initalized = False + + except Exception as e: + logger.exception(str(e)) def wait(self, seconds: Optional[float] = None) -> None: """Pause for a time. @@ -120,9 +174,10 @@ def wait(self, seconds: Optional[float] = None) -> None: seconds = seconds or self.parameters['task_buffer_length'] core.wait(seconds) + @abstractmethod def init_display(self) -> Display: """Initialize the display""" - raise NotImplementedError + ... def init_inquiry_generator(self) -> Iterator[Inquiry]: """Initializes a generator that returns inquiries to be presented.""" @@ -148,8 +203,8 @@ def init_inquiry_generator(self) -> Iterator[Inquiry]: def init_session(self) -> session_data.Session: """Initialize the session data.""" return session_data.Session(save_location=self.file_save, - task='Calibration', - mode=self.MODE, + task=self.name, + mode=str(self.mode), symbol_set=self.symbol_set, task_data=self.session_task_data()) @@ -212,12 +267,12 @@ def user_wants_to_continue(self, first_inquiry: bool = False) -> bool: self.wait_screen_message_color, first_run=first_inquiry) if not should_continue: - self.logger.info('User wants to exit.') + logger.info('User wants to exit.') return should_continue - def execute(self) -> str: + def execute(self) -> TaskData: """Task run loop.""" - self.logger.info(f'Starting {self.name()}!') + logger.info(f'Starting {self.name}!') self.wait() inq_index = 0 @@ -241,11 +296,9 @@ def execute(self) -> str: self.wait() inq_index += 1 - self.exit_display() - self.write_offset_trigger() self.cleanup() - return self.file_save + return TaskData(save_path=self.file_save, task_dict=self.session.as_dict()) def exit_display(self) -> None: """Close the UI and cleanup.""" @@ -258,9 +311,6 @@ def exit_display(self) -> None: # Allow for some additional data to be collected for later processing self.wait() - def cleanup(self) -> None: - """Any cleanup code to run after the last inquiry is complete.""" - def write_trigger_data(self, timing: List[Tuple[str, float]], first_run) -> None: """Write Trigger Data. @@ -331,4 +381,4 @@ def add_session_data(self, inquiry: Inquiry) -> None: def session_inquiry_data(self, inquiry: Inquiry) -> Optional[Dict[str, Any]]: """Defines task-specific session data for each inquiry.""" - return None + ... diff --git a/bcipy/task/control/criteria.py b/bcipy/task/control/criteria.py index 29249dbd4..701fb544d 100644 --- a/bcipy/task/control/criteria.py +++ b/bcipy/task/control/criteria.py @@ -3,7 +3,8 @@ from typing import Dict, List from copy import copy -log = logging.getLogger(__name__) +from bcipy.config import SESSION_LOG_FILENAME +log = logging.getLogger(SESSION_LOG_FILENAME) class DecisionCriteria: diff --git a/bcipy/task/control/evidence.py b/bcipy/task/control/evidence.py index 929f38eaf..3cb7c1480 100644 --- a/bcipy/task/control/evidence.py +++ b/bcipy/task/control/evidence.py @@ -6,13 +6,14 @@ import numpy as np from bcipy.acquisition.multimodal import ContentType +from bcipy.config import SESSION_LOG_FILENAME from bcipy.helpers.acquisition import analysis_channels from bcipy.helpers.stimuli import TrialReshaper from bcipy.signal.model import SignalModel from bcipy.task.data import EvidenceType from bcipy.task.exceptions import MissingEvidenceEvaluator -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) class EvidenceEvaluator: diff --git a/bcipy/task/control/handler.py b/bcipy/task/control/handler.py index 56ec283dd..9ca1a6262 100644 --- a/bcipy/task/control/handler.py +++ b/bcipy/task/control/handler.py @@ -4,13 +4,14 @@ import numpy as np +from bcipy.config import SESSION_LOG_FILENAME from bcipy.helpers.stimuli import InquirySchedule, inq_generator, StimuliOrder from bcipy.helpers.symbols import SPACE_CHAR, BACKSPACE_CHAR from bcipy.task.control.query import RandomStimuliAgent, StimuliAgent from bcipy.task.control.criteria import CriteriaEvaluator from bcipy.task.data import EvidenceType -log = logging.getLogger(__name__) +log = logging.getLogger(SESSION_LOG_FILENAME) class EvidenceFusion(): diff --git a/bcipy/task/demo/demo_orchestrator.py b/bcipy/task/demo/demo_orchestrator.py new file mode 100644 index 000000000..d57bd0013 --- /dev/null +++ b/bcipy/task/demo/demo_orchestrator.py @@ -0,0 +1,54 @@ +from bcipy.config import DEFAULT_PARAMETERS_PATH +from bcipy.task.orchestrator import SessionOrchestrator +from bcipy.task.actions import (OfflineAnalysisAction, IntertaskAction, BciPyCalibrationReportAction) +from bcipy.task.paradigm.rsvp import RSVPCalibrationTask +# from bcipy.task.paradigm.rsvp import RSVPCopyPhraseTask, RSVPTimingVerificationCalibration +from bcipy.task.paradigm.matrix import MatrixCalibrationTask +# from bcipy.task.paradigm.matrix.timing_verification import MatrixTimingVerificationCalibration + + +def demo_orchestrator(parameters_path: str) -> None: + """Demo the SessionOrchestrator. + + This function demonstrates how to use the SessionOrchestrator to execute actions. + + The action in this case is an OfflineAnalysisAction, which will analyze the data in a given directory. + """ + fake_data = True + alert_finished = True + tasks = [ + RSVPCalibrationTask, + IntertaskAction, + # OfflineAnalysisAction, + # IntertaskAction, + MatrixCalibrationTask, + IntertaskAction, + OfflineAnalysisAction, + IntertaskAction, + BciPyCalibrationReportAction + ] + orchestrator = SessionOrchestrator( + user='offline_testing', + parameters_path=parameters_path, + alert=alert_finished, + visualize=True, + fake=fake_data) + orchestrator.add_tasks(tasks) + orchestrator.execute() + + +if __name__ == '__main__': + + import argparse + + parser = argparse.ArgumentParser(description="Demo the SessionOrchestrator") + parser.add_argument( + '-p', + '--parameters_path', + help='Path to the parameters file to use for training. If none provided, data path will be used.', + default=DEFAULT_PARAMETERS_PATH) + args = parser.parse_args() + + parameters_path = f'{args.parameters_path}' + + demo_orchestrator(parameters_path) diff --git a/bcipy/task/main.py b/bcipy/task/main.py index 9e6b92f06..65083f8d1 100644 --- a/bcipy/task/main.py +++ b/bcipy/task/main.py @@ -1,6 +1,35 @@ -import logging - +from dataclasses import dataclass +from typing import Optional +from bcipy.helpers.parameters import Parameters from abc import ABC, abstractmethod +from enum import Enum + +from bcipy.helpers.stimuli import play_sound +from bcipy.config import STATIC_AUDIO_PATH + + +@dataclass +class TaskData(): + """TaskData. + + Data structure for storing task return data. + """ + save_path: Optional[str] = None + task_dict: Optional[dict] = None + + +class TaskMode(Enum): + CALIBRATION = "calibration" + COPYPHRASE = "copy phrase" + TIMING_VERIFICATION = "timing verification" + ACTION = "action" + TRAINING = "training" + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return self.value class Task(ABC): @@ -8,15 +37,25 @@ class Task(ABC): Base class for BciPy tasks. """ + name: str + mode: TaskMode + parameters: Parameters + data_save_location: str - def __init__(self) -> None: + def __init__(self, *args, **kwargs) -> None: super(Task, self).__init__() - self.logger = logging.getLogger(__name__) + assert getattr(self, 'name', None) is not None, "Task must have a `name` attribute defined" + assert getattr(self, 'mode', None) is not None, "Task must have a `mode` attribute defined" @abstractmethod - def execute(self) -> str: + def execute(self) -> TaskData: ... - @abstractmethod - def name(self) -> str: + def setup(self, *args, **kwargs): ... + + def cleanup(self, *args, **kwargs): + ... + + def alert(self): + play_sound(f"{STATIC_AUDIO_PATH}/{self.parameters['alert_sound_file']}") diff --git a/bcipy/task/orchestrator/__init__.py b/bcipy/task/orchestrator/__init__.py new file mode 100644 index 000000000..df0973e4c --- /dev/null +++ b/bcipy/task/orchestrator/__init__.py @@ -0,0 +1,3 @@ +from bcipy.task.orchestrator.orchestrator import SessionOrchestrator + +__all__ = ['SessionOrchestrator'] diff --git a/bcipy/task/orchestrator/orchestrator.py b/bcipy/task/orchestrator/orchestrator.py new file mode 100644 index 000000000..b3b493242 --- /dev/null +++ b/bcipy/task/orchestrator/orchestrator.py @@ -0,0 +1,295 @@ +# mypy: disable-error-code="arg-type, assignment" +import errno +import os +import json +import subprocess +from datetime import datetime +import random +import logging +import time +from logging import Logger +from typing import List, Type, Optional + +from bcipy.helpers.parameters import Parameters +from bcipy.helpers.system_utils import get_system_info, configure_logger +from bcipy.task import Task, TaskData, TaskMode +from bcipy.config import ( + DEFAULT_EXPERIMENT_ID, + DEFAULT_PARAMETERS_PATH, + DEFAULT_USER_ID, + MULTIPHRASE_FILENAME, + PROTOCOL_FILENAME, + PROTOCOL_LOG_FILENAME, + SESSION_LOG_FILENAME, +) +from bcipy.helpers.load import load_json_parameters + + +class SessionOrchestrator: + """ + Session Orchestrator + -------------------- + + The Session Orchestrator is responsible for managing the execution of a protocol of tasks. It is initialized with an + experiment ID, user ID, and parameters file. Tasks are added to the orchestrator, which are then executed in order. + """ + tasks: List[Type[Task]] + task_names: List[str] + parameters: Parameters + sys_info: dict + log: Logger + save_folder: str + session_data: List[TaskData] + ready_to_execute: bool = False + last_task_dir: Optional[str] = None + + def __init__( + self, + experiment_id: str = DEFAULT_EXPERIMENT_ID, + user: str = DEFAULT_USER_ID, + parameters_path: str = DEFAULT_PARAMETERS_PATH, + parameters: Parameters = None, + fake: bool = False, + alert: bool = False, + visualize: bool = False + ) -> None: + self.parameters_path = parameters_path + if not parameters: + self.parameters = load_json_parameters(parameters_path, value_cast=True) + else: + # This allows for the parameters to be passed in directly and modified before executions + self.parameters = parameters + + self.copyphrases = None + self.next_phrase = None + self.starting_index = 0 + + self.initialize_copy_phrases() + + self.user = user + self.fake = fake + self.experiment_id = experiment_id + self.sys_info = self.get_system_info() + self.tasks = [] + self.task_names = [] + self.session_data = [] + self.save_folder = self._init_orchestrator_save_folder(self.parameters["data_save_loc"]) + self.logger = self._init_orchestrator_logger(self.save_folder) + + self.alert = alert + self.logger.info("Alerts are on") if self.alert else self.logger.info("Alerts are off") + self.visualize = visualize + self.progress = 0 + + self.ready_to_execute = False + self.user_exit = False + self.logger.info("Session Orchestrator initialized successfully") + + def add_task(self, task: Type[Task]) -> None: + """Add a task to the orchestrator""" + self.tasks.append(task) + self.task_names.append(task.name) + self.ready_to_execute = True + + def add_tasks(self, tasks: List[Type[Task]]) -> None: + """Add a list of tasks to the orchestrator""" + for task in tasks: + self.add_task(task) + self.ready_to_execute = True + + def set_next_phrase(self) -> None: + """Set the next phrase to be copied from the list of copy phrases loaded or the parameters directly. + + If there are no more phrases to copy, the task text and spelled letters from parameters will be used. + """ + if self.copyphrases: + if len(self.copyphrases) > 0: + text, index = self.copyphrases.pop(0) + self.next_phrase = text + self.starting_index = index + else: + self.next_phrase = self.parameters['task_text'] + self.parameters['task_text'] = self.next_phrase + self.parameters['spelled_letters_count'] = self.starting_index + + def initialize_copy_phrases(self) -> None: + """Load copy phrases from a json file or take the task text if no file is provided. + + Expects a json file structured as follows: + { + "Phrases": [ + [string, int], + [string, int], + ... + ] + } + """ + # load copy phrases from json file or take the task text if no file is provided + if self.parameters.get('copy_phrases_location'): + with open(self.parameters['copy_phrases_location'], 'r') as f: + copy_phrases = json.load(f) + self.copyphrases = copy_phrases['Phrases'] + # randomize the order of the phrases + random.shuffle(self.copyphrases) + else: + self.copyphrases = None + self.next_phrase = self.parameters['task_text'] + self.starting_index = self.parameters['spelled_letters_count'] + + def execute(self) -> None: + """Executes queued tasks in order""" + + if not self.ready_to_execute: + msg = "Orchestrator not ready to execute. No tasks have been added." + self.log.error(msg) + raise Exception(msg) + + self.logger.info(f"Session Orchestrator executing tasks in order: {self.task_names}") + for task in self.tasks: + self.progress += 1 + if task.mode == TaskMode.COPYPHRASE: + self.set_next_phrase() + try: + # initialize the task save folder and logger + self.logger.info(f"Initializing task {self.progress}/{len(self.tasks)} {task.name}") + data_save_location = self._init_task_save_folder(task) + self._init_task_logger(data_save_location) + + # initialize the task and execute it + initialized_task: Task = task( + self.parameters, + data_save_location, + fake=self.fake, + alert_finished=self.alert, + experiment_id=self.experiment_id, + parameters_path=self.parameters_path, + protocol_path=self.save_folder, + last_task_dir=self.last_task_dir, + progress=self.progress, + tasks=self.tasks, + exit_callback=self.close_experiment_callback) + task_data = initialized_task.execute() + self.session_data.append(task_data) + self.logger.info(f"Task {task.name} completed successfully") + # some tasks may need access to the previous task's data + self.last_task_dir = data_save_location + + if self.user_exit: + break + + if initialized_task.mode != TaskMode.ACTION: + if self.alert: + initialized_task.alert() + + if self.visualize: + # Visualize session data and fail silently if it errors + try: + self.logger.info(f"Visualizing session data. Saving to {data_save_location}") + subprocess.run( + f'bcipy-erp-viz -s "{data_save_location}" ' + f'--parameters "{self.parameters_path}" --show --save', + shell=True) + except Exception as e: + self.logger.info(f'Error visualizing session data: {e}') + + initialized_task = None + + except Exception as e: + self.logger.error(f"Task {task.name} failed to execute") + self.logger.exception(e) + try: + initialized_task.cleanup() + except BaseException: + pass + + # give the orchestrator time to save data before exiting + time.sleep(1) + + # Save the protocol data and reset the orchestrator + self._save_data() + self.ready_to_execute = False + self.tasks = [] + self.task_names = [] + self.progress = 0 + + def _init_orchestrator_logger(self, save_folder: str) -> Logger: + return configure_logger( + save_folder, + PROTOCOL_LOG_FILENAME, + logging.DEBUG) + + def _init_orchestrator_save_folder(self, save_path: str) -> str: + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + # * No '/' after `save_folder` since it is included in + # * `data_save_location` in parameters + path = f'{save_path}{self.user}/{self.experiment_id}/{timestamp}/' + os.makedirs(path) + os.makedirs(os.path.join(path, 'logs'), exist_ok=True) + return path + + def _init_task_save_folder(self, task: Type[Task]) -> str: + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + save_directory = self.save_folder + f'{task.name.replace(" ", "_")}_{timestamp}/' + try: + # make a directory to save task data to + os.makedirs(save_directory) + os.makedirs(os.path.join(save_directory, 'logs'), exist_ok=True) + # save parameters to save directory with task name + self.parameters.add_entry( + "task", + { + "value": task.name, + "section": "task_congig", + "name": "BciPy Task", + "helpTip": "A string representing the task that was executed", + "recommended": "", + "editable": "false", + "type": "str", + } + ) + self.parameters.save(save_directory) + + except OSError as error: + # If the error is anything other than file existing, raise an error + if error.errno != errno.EEXIST: + raise error + return save_directory + + def _init_task_logger(self, save_folder: str) -> None: + configure_logger( + save_folder, + SESSION_LOG_FILENAME, + logging.DEBUG) + + def _save_data(self) -> None: + + self._save_procotol_data() + # Save the remaining phrase data to a json file to be used in the next session + if self.copyphrases and len(self.copyphrases) > 0: + self._save_copy_phrases() + + def _save_procotol_data(self) -> None: + # Save the protocol data to a json file + with open(f'{self.save_folder}/{PROTOCOL_FILENAME}', 'w') as f: + f.write(json.dumps({ + 'tasks': self.task_names, + 'parameters': self.parameters_path, + 'system_info': self.sys_info, + })) + self.logger.info("Protocol data successfully saved") + + def _save_copy_phrases(self) -> None: + # Save the copy phrases data to a json file + with open(f'{self.save_folder}/{MULTIPHRASE_FILENAME}', 'w') as f: + f.write(json.dumps({ + 'Phrases': self.copyphrases + })) + self.logger.info("Copy phrases data successfully saved") + + def get_system_info(self) -> dict: + return get_system_info() + + def close_experiment_callback(self): + """Callback to close the experiment.""" + self.logger.info("User has exited the experiment.") + self.user_exit = True diff --git a/bcipy/task/orchestrator/protocol.py b/bcipy/task/orchestrator/protocol.py new file mode 100644 index 000000000..af950162c --- /dev/null +++ b/bcipy/task/orchestrator/protocol.py @@ -0,0 +1,76 @@ +"""This file can define actions that can happen in a session orchestrator visit. +To start these will be 1:1 with tasks, but later this can be extended to represent training sequences, GUI popups etc""" + +from typing import List, Type +from bcipy.task import Task +from bcipy.config import TASK_SEPERATOR +from bcipy.task.registry import TaskRegistry + + +def parse_protocol(protocol: str) -> List[Type[Task]]: + """ + Parses a string of actions into a list of Task objects. + + Converts a string of actions into a list of Task objects. The string is expected + to be in the format of 'Action1 -> Action2 -> ... -> ActionN'. + Parameters + ---------- + protocol : str + A string of actions in the format of 'Action1 -> Action2 -> ... -> ActionN'. + + Returns + ------- + List[TaskType] + A list of TaskType objects that represent the actions in the input string. + """ + task_registry = TaskRegistry() + return [task_registry.get(item.strip()) for item in protocol.split(TASK_SEPERATOR)] + + +def validate_protocol_string(protocol: str) -> None: + """ + Validates a string of actions. + + Validates a string of actions. The string is expected to be in the format of 'Action1 -> Action2 -> ... -> ActionN'. + + Parameters + ---------- + protocol : str + A string of actions in the format of 'Action1 -> Action2 -> ... -> ActionN'. + + Raises + ------ + ValueError + If the string of actions is invalid. + """ + for protocol_item in protocol.split(TASK_SEPERATOR): + if protocol_item.strip() not in TaskRegistry().list(): + raise ValueError(f"Invalid task '{protocol_item}' name in protocol string.") + + +def serialize_protocol(protocol: List[Type[Task]]) -> str: + """ + Converts a list of TaskType objects into a string of actions. + + Converts a list of TaskType objects into a string of actions. The string is in the format of + 'Action1 -> Action2 -> ... -> ActionN'. + + Parameters + ---------- + protocol : str + A string of actions in the format of 'Action1 -> Action2 -> ... -> ActionN'. + + Returns + ------- + List[TaskType] + A list of TaskType objects that represent the actions in the input string. + """ + + return f" {TASK_SEPERATOR} ".join([item.name for item in protocol]) + + +if __name__ == '__main__': + actions = parse_protocol("Matrix Calibration -> Matrix Copy Phrase") + string = serialize_protocol(actions) + print(actions) + print(string) diff --git a/bcipy/task/paradigm/matrix/__init__.py b/bcipy/task/paradigm/matrix/__init__.py index e69de29bb..f7dd6bc2c 100644 --- a/bcipy/task/paradigm/matrix/__init__.py +++ b/bcipy/task/paradigm/matrix/__init__.py @@ -0,0 +1,4 @@ +# Import all matrix tasks to make them available to the task registry +from .calibration import MatrixCalibrationTask # noqa +from .copy_phrase import MatrixCopyPhraseTask # noqa +from .timing_verification import MatrixTimingVerificationCalibration # noqa diff --git a/bcipy/task/paradigm/matrix/calibration.py b/bcipy/task/paradigm/matrix/calibration.py index a4ab773aa..a16cc6305 100644 --- a/bcipy/task/paradigm/matrix/calibration.py +++ b/bcipy/task/paradigm/matrix/calibration.py @@ -1,6 +1,6 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, List -from psychopy import core, visual +from psychopy import visual from bcipy.display import InformationProperties, StimuliProperties from bcipy.display.components.task_bar import CalibrationTaskBar @@ -10,7 +10,7 @@ from bcipy.helpers.parameters import Parameters from bcipy.helpers.save import save_stimuli_position_info from bcipy.helpers.system_utils import get_screen_info -from bcipy.task.base_calibration import BaseCalibrationTask +from bcipy.task.calibration import BaseCalibrationTask class MatrixCalibrationTask(BaseCalibrationTask): @@ -28,12 +28,13 @@ class MatrixCalibrationTask(BaseCalibrationTask): PARAMETERS: ---------- - win (PsychoPy Display Object) - daq (Data Acquisition Object [ClientManager]]) - parameters (Parameters Object) - file_save (String) + parameters (dict) + file_save (str) + fake (bool) + """ - MODE = 'Matrix' + name = 'Matrix Calibration' + paradigm = 'Matrix' @property def screen_info(self) -> Dict[str, Any]: @@ -56,7 +57,6 @@ def exit_display(self) -> None: def cleanup(self) -> None: assert isinstance(self.display, MatrixDisplay) - # TODO: refactor offline_analysis to use session data and and remove this. save_stimuli_position_info(self.display.stim_positions, self.file_save, self.screen_info) return super().cleanup() @@ -68,7 +68,7 @@ def session_task_data(self) -> Optional[Dict[str, Any]]: def init_matrix_display(parameters: Parameters, window: visual.Window, experiment_clock: Clock, - symbol_set: core.StaticPeriod) -> MatrixDisplay: + symbol_set: List[str]) -> MatrixDisplay: """Initialize the matrix display""" info = InformationProperties( info_color=[parameters['info_color']], @@ -78,22 +78,23 @@ def init_matrix_display(parameters: Parameters, window: visual.Window, info_text=[parameters['info_text']], ) stimuli = StimuliProperties(stim_font=parameters['font'], - stim_pos=(-0.6, 0.4), - stim_height=0.1, + stim_pos=(parameters['matrix_stim_pos_x'], parameters['matrix_stim_pos_y']), + stim_height=parameters['matrix_stim_height'], stim_inquiry=[''] * parameters['stim_length'], stim_colors=[parameters['stim_color']] * parameters['stim_length'], stim_timing=[10] * parameters['stim_length'], is_txt_stim=parameters['is_txt_stim'], - prompt_time=parameters["time_prompt"]) + prompt_time=parameters['time_prompt'], + layout=parameters['matrix_keyboard_layout']) task_bar = CalibrationTaskBar(window, inquiry_count=parameters['stim_number'], current_index=0, colors=[parameters['task_color']], font=parameters['font'], - height=parameters['task_height'], - padding=parameters['task_padding']) + height=parameters['matrix_task_height'], + padding=parameters['matrix_task_padding']) return MatrixDisplay(window, experiment_clock, diff --git a/bcipy/task/paradigm/matrix/copy_phrase.py b/bcipy/task/paradigm/matrix/copy_phrase.py index a605688b8..79748e7b7 100644 --- a/bcipy/task/paradigm/matrix/copy_phrase.py +++ b/bcipy/task/paradigm/matrix/copy_phrase.py @@ -1,10 +1,14 @@ """Defines the Copy Phrase Task which uses a Matrix display""" +from psychopy import visual from bcipy.display import InformationProperties, StimuliProperties from bcipy.display.components.task_bar import CopyPhraseTaskBar from bcipy.display.main import PreviewParams from bcipy.display.paradigm.matrix.display import MatrixDisplay +from bcipy.task import TaskMode from bcipy.task.paradigm.rsvp.copy_phrase import RSVPCopyPhraseTask +from bcipy.helpers.parameters import Parameters +from bcipy.helpers.clock import Clock class MatrixCopyPhraseTask(RSVPCopyPhraseTask): @@ -16,26 +20,83 @@ class MatrixCopyPhraseTask(RSVPCopyPhraseTask): Parameters ---------- - win : object, - display window to present visual stimuli. - daq : object, - data acquisition object initialized for the desired protocol parameters : dict, configuration details regarding the experiment. See parameters.json file_save : str, path location of where to save data from the session - signal_models : list of trained signal models. - language_model: object, - trained language model. fake : boolean, optional boolean to indicate whether this is a fake session or not. Returns ------- - file_save : str, - path location of where to save data from the session + TaskData """ - TASK_NAME = 'Matrix Copy Phrase Task' - MODE = 'Matrix' + name = 'Matrix Copy Phrase' + paradigm = 'Matrix' + mode = TaskMode.COPYPHRASE + + PARAMETERS_USED = [ + "time_fixation", + "time_flash", + "time_prompt", + "trial_window", + "font", + "fixation_color", + "trigger_type", + "filter_high", + "filter_low", + "filter_order", + "notch_filter_frequency", + "down_sampling_rate", + "prestim_length", + "is_txt_stim", + "lm_backspace_prob", + "backspace_always_shown", + "decision_threshold", + "max_inq_len", + "max_inq_per_series", + "max_minutes", + "max_selections", + "max_incorrect", + "min_inq_len", + "show_feedback", + "feedback_duration", + "show_preview_inquiry", + "preview_inquiry_isi", + "preview_inquiry_key_input", + "preview_inquiry_error_prob", + "preview_inquiry_length", + "preview_inquiry_progress_method", + "spelled_letters_count", + "stim_color", + "matrix_stim_height", + "stim_jitter", + "stim_length", + "stim_number", + "stim_order", + "stim_pos_x", + "stim_pos_y", + "stim_space_char", + "target_color", + "task_buffer_length", + "task_color", + "matrix_task_height", + "matrix_task_padding", + "matrix_keyboard_layout", + "matrix_rows", + "matrix_columns", + "matrix_width", + "matrix_stim_pos_x", + "matrix_stim_pos_y", + "task_text", + "info_pos_x", + "info_pos_y", + "info_color", + "info_height", + "info_text", + "info_color", + "info_height", + "info_text", + ] def init_display(self) -> MatrixDisplay: """Initialize the Matrix display""" @@ -44,7 +105,10 @@ def init_display(self) -> MatrixDisplay: def init_display( - parameters, win, experiment_clock, starting_spelled_text) -> MatrixDisplay: + parameters: Parameters, + win: visual.Window, + experiment_clock: Clock, + starting_spelled_text: str) -> MatrixDisplay: """Constructs a new Matrix display""" info = InformationProperties( @@ -56,19 +120,20 @@ def init_display( ) stimuli = StimuliProperties(stim_font=parameters['font'], - stim_pos=(parameters['stim_pos_x'], - parameters['stim_pos_y']), - stim_height=parameters['stim_height'], + stim_pos=(parameters['matrix_stim_pos_x'], + parameters['matrix_stim_pos_y']), + stim_height=parameters['matrix_stim_height'], is_txt_stim=parameters['is_txt_stim'], - prompt_time=parameters['time_prompt']) + prompt_time=parameters['time_prompt'], + layout=parameters['matrix_keyboard_layout']) task_bar = CopyPhraseTaskBar(win, task_text=parameters['task_text'], spelled_text=starting_spelled_text, colors=[parameters['task_color']], font=parameters['font'], - height=parameters['task_height'], - padding=parameters['task_padding']) + height=parameters['matrix_task_height'], + padding=parameters['matrix_task_padding']) return MatrixDisplay( win, diff --git a/bcipy/task/paradigm/matrix/timing_verification.py b/bcipy/task/paradigm/matrix/timing_verification.py index 2638f52a8..a26c20876 100644 --- a/bcipy/task/paradigm/matrix/timing_verification.py +++ b/bcipy/task/paradigm/matrix/timing_verification.py @@ -3,7 +3,8 @@ from bcipy.helpers.stimuli import (PhotoDiodeStimuli, get_fixation, jittered_timing) -from bcipy.task.base_calibration import Inquiry +from bcipy.task.calibration import Inquiry +from bcipy.task import TaskMode from bcipy.task.paradigm.matrix.calibration import (MatrixCalibrationTask, MatrixDisplay) @@ -15,15 +16,15 @@ class MatrixTimingVerificationCalibration(MatrixCalibrationTask): stimuli can be used with a photodiode to ensure accurate presentations. Input: - win (PsychoPy Display Object) - daq (Data Acquisition Object) parameters (Dictionary) file_save (String) + fake (Boolean) Output: - file_save (String) + TaskData """ - TASK_NAME = 'Matrix Timing Verification Task' + name = 'Matrix Timing Verification' + mode = TaskMode.TIMING_VERIFICATION def init_display(self) -> MatrixDisplay: """Initialize the display""" diff --git a/bcipy/task/paradigm/rsvp/__init__.py b/bcipy/task/paradigm/rsvp/__init__.py index e69de29bb..111883a46 100644 --- a/bcipy/task/paradigm/rsvp/__init__.py +++ b/bcipy/task/paradigm/rsvp/__init__.py @@ -0,0 +1,4 @@ +# Import all RSVP tasks to make them available to the task registry +from .calibration.calibration import RSVPCalibrationTask # noqa +from .calibration.timing_verification import RSVPTimingVerificationCalibration # noqa +from .copy_phrase import RSVPCopyPhraseTask # noqa diff --git a/bcipy/task/paradigm/rsvp/calibration/calibration.py b/bcipy/task/paradigm/rsvp/calibration/calibration.py index 44b6a4cce..b381349f6 100644 --- a/bcipy/task/paradigm/rsvp/calibration/calibration.py +++ b/bcipy/task/paradigm/rsvp/calibration/calibration.py @@ -7,7 +7,7 @@ from bcipy.helpers.clock import Clock from bcipy.helpers.parameters import Parameters from bcipy.helpers.triggers import TriggerType -from bcipy.task.base_calibration import BaseCalibrationTask +from bcipy.task.calibration import BaseCalibrationTask class RSVPCalibrationTask(BaseCalibrationTask): @@ -25,12 +25,12 @@ class RSVPCalibrationTask(BaseCalibrationTask): PARAMETERS: ---------- - win (PsychoPy Display) - daq (Data Acquisition Client) parameters (dict) file_save (str) + fake (bool) """ - MODE = 'RSVP' + name = 'RSVP Calibration' + paradigm = 'RSVP' def trigger_type(self, symbol: str, target: str, index: int) -> TriggerType: @@ -64,8 +64,8 @@ def init_calibration_display_task( ) stimuli = StimuliProperties( stim_font=parameters['font'], - stim_pos=(parameters['stim_pos_x'], parameters['stim_pos_y']), - stim_height=parameters['stim_height'], + stim_pos=(parameters['rsvp_stim_pos_x'], parameters['rsvp_stim_pos_y']), + stim_height=parameters['rsvp_stim_height'], stim_inquiry=[''] * parameters['stim_length'], stim_colors=[parameters['stim_color']] * parameters['stim_length'], stim_timing=[10] * parameters['stim_length'], @@ -76,8 +76,8 @@ def init_calibration_display_task( current_index=0, colors=[parameters['task_color']], font=parameters['font'], - height=parameters['task_height'], - padding=parameters['task_padding']) + height=parameters['rsvp_task_height'], + padding=parameters['rsvp_task_padding']) return CalibrationDisplay(window, static_clock, diff --git a/bcipy/task/paradigm/rsvp/calibration/timing_verification.py b/bcipy/task/paradigm/rsvp/calibration/timing_verification.py index be94ca30c..07bcbba74 100644 --- a/bcipy/task/paradigm/rsvp/calibration/timing_verification.py +++ b/bcipy/task/paradigm/rsvp/calibration/timing_verification.py @@ -1,14 +1,12 @@ # mypy: disable-error-code="assignment" from itertools import cycle, islice, repeat -from typing import Iterator, List +from typing import Any, Iterator, List -from psychopy import visual - -from bcipy.acquisition import ClientManager from bcipy.helpers.parameters import Parameters from bcipy.helpers.stimuli import (PhotoDiodeStimuli, get_fixation, jittered_timing) -from bcipy.task.base_calibration import Inquiry +from bcipy.task.calibration import Inquiry +from bcipy.task import TaskMode from bcipy.task.paradigm.rsvp.calibration.calibration import \ RSVPCalibrationTask @@ -20,22 +18,25 @@ class RSVPTimingVerificationCalibration(RSVPCalibrationTask): stimuli can be used with a photodiode to ensure accurate presentations. Input: - win (PsychoPy Window) - daq (ClientManager) parameters (Parameters) file_save (str) + fake (bool) Output: - file_save (str) + TaskData """ - TASK_NAME = 'RSVP Timing Verification Task' + name = 'RSVP Timing Verification' + mode = TaskMode.TIMING_VERIFICATION - def __init__(self, win: visual.Window, daq: ClientManager, - parameters: Parameters, file_save: str) -> None: - parameters['stim_height'] = 0.8 - parameters['stim_pos_y'] = 0.0 + def __init__(self, + parameters: Parameters, + file_save: str, + fake: bool = False, + **kwargs: Any) -> None: + parameters['rsvp_stim_height'] = 0.8 + parameters['rsvp_stim_pos_y'] = 0.0 super(RSVPTimingVerificationCalibration, - self).__init__(win, daq, parameters, file_save) + self).__init__(parameters, file_save, fake=fake) @property def symbol_set(self) -> List[str]: @@ -63,10 +64,3 @@ def init_inquiry_generator(self) -> Iterator[Inquiry]: return repeat(Inquiry(stimuli, durations, colors), params['stim_number']) - - @classmethod - def label(cls) -> str: - return RSVPTimingVerificationCalibration.TASK_NAME - - def name(self) -> str: - return RSVPTimingVerificationCalibration.TASK_NAME diff --git a/bcipy/task/paradigm/rsvp/copy_phrase.py b/bcipy/task/paradigm/rsvp/copy_phrase.py index 772d9c103..6380f029d 100644 --- a/bcipy/task/paradigm/rsvp/copy_phrase.py +++ b/bcipy/task/paradigm/rsvp/copy_phrase.py @@ -1,22 +1,34 @@ # mypy: disable-error-code="arg-type" import logging -from typing import List, NamedTuple, Optional, Tuple +from typing import Any, List, NamedTuple, Optional, Tuple from psychopy import core, visual +from psychopy.visual import Window from bcipy.acquisition import ClientManager -from bcipy.config import (DEFAULT_EVIDENCE_PRECISION, SESSION_DATA_FILENAME, - SESSION_SUMMARY_FILENAME, TRIGGER_FILENAME, - WAIT_SCREEN_MESSAGE) -from bcipy.display import (InformationProperties, PreviewInquiryProperties, - StimuliProperties) +from bcipy.config import ( + DEFAULT_EVIDENCE_PRECISION, + SESSION_DATA_FILENAME, + SESSION_SUMMARY_FILENAME, + TRIGGER_FILENAME, + WAIT_SCREEN_MESSAGE, + SESSION_LOG_FILENAME +) +from bcipy.display import ( + InformationProperties, + StimuliProperties, +) from bcipy.display.components.task_bar import CopyPhraseTaskBar from bcipy.display.main import PreviewParams from bcipy.display.paradigm.rsvp.mode.copy_phrase import CopyPhraseDisplay from bcipy.feedback.visual.visual_feedback import VisualFeedback +from bcipy.helpers.acquisition import init_acquisition, LslDataServer from bcipy.helpers.clock import Clock from bcipy.helpers.copy_phrase_wrapper import CopyPhraseWrapper -from bcipy.helpers.exceptions import TaskConfigurationException +from bcipy.display import init_display_window +from bcipy.exceptions import TaskConfigurationException +from bcipy.helpers.language_model import init_language_model +from bcipy.helpers.load import load_signal_models from bcipy.helpers.list import destutter from bcipy.helpers.parameters import Parameters from bcipy.helpers.save import _save_session_related_data @@ -34,12 +46,13 @@ from bcipy.language.main import LanguageModel from bcipy.signal.model import SignalModel from bcipy.signal.model.inquiry_preview import compute_probs_after_preview -from bcipy.task import Task -from bcipy.task.control.evidence import (EvidenceEvaluator, - init_evidence_evaluator) +from bcipy.task import Task, TaskData, TaskMode +from bcipy.task.control.evidence import EvidenceEvaluator, init_evidence_evaluator from bcipy.task.data import EvidenceType, Inquiry, Session from bcipy.task.exceptions import DuplicateModelEvidence +logger = logging.getLogger(SESSION_LOG_FILENAME) + class Decision(NamedTuple): """Represents the result of evaluating evidence. @@ -53,6 +66,7 @@ class Decision(NamedTuple): - new_inq_schedule : the next inquiry to present if there was not a decision. """ + decision_made: bool selection: str spelled_text: str @@ -69,27 +83,21 @@ class RSVPCopyPhraseTask(Task): Parameters ---------- - win : object, - display window to present visual stimuli. - daq : object, - data acquisition object initialized for the desired protocol parameters : dict, configuration details regarding the experiment. See parameters.json file_save : str, path location of where to save data from the session - signal_models : list of trained signal models. - language_model: object, - trained language model. fake : boolean, optional boolean to indicate whether this is a fake session or not. Returns ------- - file_save : str, - path location of where to save data from the session + TaskData """ - TASK_NAME = 'RSVP Copy Phrase Task' - MODE = 'RSVP' + name = "RSVP Copy Phrase" + paradigm = "RSVP" + mode = TaskMode.COPYPHRASE + initalized = False PARAMETERS_USED = [ 'time_fixation', 'time_flash', 'time_prompt', 'trial_window', @@ -102,29 +110,28 @@ class RSVPCopyPhraseTask(Task): 'show_preview_inquiry', 'preview_inquiry_isi', 'preview_inquiry_error_prob', 'preview_inquiry_key_input', 'preview_inquiry_length', 'preview_inquiry_progress_method', 'spelled_letters_count', - 'stim_color', 'stim_height', 'stim_jitter', 'stim_length', 'stim_number', - 'stim_order', 'stim_pos_x', 'stim_pos_y', 'stim_space_char', 'target_color', - 'task_buffer_length', 'task_color', 'task_height', 'task_text', + 'stim_color', 'rsvp_stim_height', 'stim_jitter', 'stim_length', 'stim_number', + 'stim_order', 'rsvp_stim_pos_x', 'rsvp_stim_pos_y', 'stim_space_char', 'target_color', + 'task_buffer_length', 'task_color', 'rsvp_task_height', 'task_text', 'rsvp_task_padding', 'info_pos_x', 'info_pos_y', 'info_color', 'info_height', 'info_text', 'info_color', 'info_height', 'info_text', ] def __init__( - self, - win: visual.Window, - daq: ClientManager, - parameters: Parameters, - file_save: str, - signal_models: List[SignalModel], - language_model: LanguageModel, - fake: bool) -> None: + self, + parameters: Parameters, + file_save: str, + fake: bool = False, + **kwargs: Any + ) -> None: super(RSVPCopyPhraseTask, self).__init__() - self.logger = logging.getLogger(__name__) + self.fake = fake + self.parameters = parameters + self.language_model = self.get_language_model() + self.signal_models = self.get_signal_models() + daq, servers, win = self.setup(parameters, file_save, fake) + self.servers = servers self.window = win self.daq = daq - self.parameters = parameters - self.signal_models = signal_models - self.language_model = language_model - self.fake = fake self.validate_parameters() @@ -136,7 +143,8 @@ def __init__( self.button_press_error_prob = parameters['preview_inquiry_error_prob'] - self.evidence_evaluators = self.init_evidence_evaluators(signal_models) + self.signal_model = self.signal_models[0] if self.signal_models else None + self.evidence_evaluators = self.init_evidence_evaluators(self.signal_models) self.evidence_types = self.init_evidence_types(self.signal_models, self.evidence_evaluators) self.file_save = file_save @@ -144,32 +152,108 @@ def __init__( self.trigger_handler = self.default_trigger_handler() self.session_save_location = f"{self.file_save}/{SESSION_DATA_FILENAME}" - self.copy_phrase = parameters['task_text'] + self.copy_phrase = parameters["task_text"] - self.signal_model = signal_models[0] if signal_models else None self.evidence_precision = DEFAULT_EVIDENCE_PRECISION self.feedback = self.init_feedback() - self.setup() + self.set() # set a preview_only parameter self.parameters.add_entry( - 'preview_only', + "preview_only", { - 'value': 'true' if self.parameters['preview_inquiry_progress_method'] == 0 else 'false', - 'section': '', - 'readableName': '', - 'helpTip': '', - 'recommended_values': '', - 'type': 'bool' - } + "value": ( + "true" + if self.parameters["preview_inquiry_progress_method"] == 0 + else "false" + ), + "section": "", + "name": "", + "helpTip": "", + "recommended": "", + "editable": "false", + "type": "bool", + }, ) self.rsvp = self.init_display() - def init_evidence_evaluators(self, - signal_models: List[SignalModel]) -> List[EvidenceEvaluator]: + def setup(self, parameters, data_save_location, fake=False) -> Tuple[ClientManager, List[LslDataServer], Window]: + # Initialize Acquisition + daq, servers = init_acquisition( + parameters, data_save_location, server=fake) + + # Initialize Display + display = init_display_window(parameters) + self.initalized = True + + return daq, servers, display + + def get_language_model(self) -> LanguageModel: + return init_language_model(self.parameters) + + def get_signal_models(self) -> Optional[List[SignalModel]]: + if not self.fake: + try: + model_dir = self.parameters.get('signal_model_path', None) + signal_models = load_signal_models(directory=model_dir) + assert signal_models, f"No signal models found in {model_dir}" + except Exception as error: + logger.exception(f'Cannot load signal model. Exiting. {error}') + raise error + return signal_models + return [] + + def cleanup(self): + self.exit_display() + self.write_offset_trigger() + self.save_session_data() + # Wait some time before exiting so there is trailing eeg data saved + self.wait() + + if self.initalized: + + try: + # Stop Acquisition + self.daq.stop_acquisition() + self.daq.cleanup() + + # Stop Servers + if self.servers: + for server in self.servers: + server.stop() + + # Close the display window + # NOTE: There is currently a bug in psychopy when attempting to shutdown + # windows when using a USB-C monitor. Putting the display close last in + # the inquiry allows acquisition to properly shutdown. + self.window.close() + self.initalized = False + + except Exception as e: + logger.exception(str(e)) + + def save_session_data(self) -> None: + self.session.task_summary = TaskSummary( + self.session, + self.parameters["show_preview_inquiry"], + self.parameters["preview_inquiry_progress_method"], + self.trigger_handler.file_path, + ).as_dict() + self.write_session_data() + + # Evidence is not recorded in the session when using fake decisions. + if self.parameters["summarize_session"] and self.session.has_evidence(): + session_excel( + session=self.session, + excel_file=f"{self.file_save}/{SESSION_SUMMARY_FILENAME}", + ) + + def init_evidence_evaluators( + self, signal_models: List[SignalModel] + ) -> List[EvidenceEvaluator]: """Initializes the evidence evaluators from the provided signal models. Returns a list of evaluators for active devices. Raises an exception if @@ -189,7 +273,7 @@ def init_evidence_evaluators(self, ) evidence_types.append(evidence_type) else: - self.logger.info( + logger.info( f"SignalModel not used: there is no active device of type: {content_type}" ) return evaluators @@ -210,19 +294,19 @@ def default_trigger_handler(self) -> TriggerHandler: return TriggerHandler(self.file_save, TRIGGER_FILENAME, FlushFrequency.EVERY) - def setup(self) -> None: + def set(self) -> None: """Initialize/reset parameters used in the execute run loop.""" - self.spelled_text = str( - self.copy_phrase[0:self.starting_spelled_letters()]) - self.last_selection = '' + self.spelled_text = str(self.copy_phrase[0: self.starting_spelled_letters()]) + self.last_selection = "" self.inq_counter = 0 self.session = Session( save_location=self.file_save, - task='Copy Phrase', - mode=self.MODE, + task="Copy Phrase", + mode=str(self.mode), symbol_set=self.alp, - decision_threshold=self.parameters['decision_threshold']) + decision_threshold=self.parameters["decision_threshold"], + ) self.write_session_data() self.init_copy_phrase_task() @@ -230,10 +314,13 @@ def setup(self) -> None: def init_display(self) -> CopyPhraseDisplay: """Initialize the display""" - return _init_copy_phrase_display(self.parameters, self.window, - self.static_clock, - self.experiment_clock, - self.spelled_text) + return _init_copy_phrase_display( + self.parameters, + self.window, + self.static_clock, + self.experiment_clock, + self.spelled_text, + ) def init_feedback(self) -> Optional[VisualFeedback]: """Initialize visual feedback""" @@ -259,22 +346,26 @@ def validate_parameters(self) -> None: raise TaskConfigurationException(f"parameter '{param}' is required") # ensure data / query parameters are set correctly - buffer_len = self.parameters['task_buffer_length'] - prestim = self.parameters['prestim_length'] - poststim = self.parameters['trial_window'][1] - self.parameters['trial_window'][0] + buffer_len = self.parameters["task_buffer_length"] + prestim = self.parameters["prestim_length"] + poststim = ( + self.parameters["trial_window"][1] - self.parameters["trial_window"][0] + ) if buffer_len < prestim: raise TaskConfigurationException( - f'task_buffer_length=[{buffer_len}] must be greater than prestim_length=[{prestim}]') + f"task_buffer_length=[{buffer_len}] must be greater than prestim_length=[{prestim}]" + ) if buffer_len < poststim: raise TaskConfigurationException( - f'task_buffer_length=[{buffer_len}] must be greater than trial_length=[{poststim}]') + f"task_buffer_length=[{buffer_len}] must be greater than trial_length=[{poststim}]" + ) def starting_spelled_letters(self) -> int: """Number of letters already spelled at the start of the task.""" - spelled_letters_count = self.parameters['spelled_letters_count'] + spelled_letters_count = self.parameters["spelled_letters_count"] if spelled_letters_count > len(self.copy_phrase): - self.logger.info('Already spelled letters exceeds phrase length.') + logger.info("Already spelled letters exceeds phrase length.") spelled_letters_count = 0 return spelled_letters_count @@ -294,22 +385,24 @@ def init_copy_phrase_task(self) -> None: """ self.copy_phrase_task = CopyPhraseWrapper( - self.parameters['min_inq_len'], - self.parameters['max_inq_per_series'], + self.parameters["min_inq_len"], + self.parameters["max_inq_per_series"], lmodel=self.language_model, alp=self.alp, evidence_names=self.evidence_types, task_list=[(str(self.copy_phrase), self.spelled_text)], - is_txt_stim=self.parameters['is_txt_stim'], + is_txt_stim=self.parameters["is_txt_stim"], stim_timing=[ - self.parameters['time_fixation'], self.parameters['time_flash'] + self.parameters["time_fixation"], + self.parameters["time_flash"], ], - decision_threshold=self.parameters['decision_threshold'], - backspace_prob=self.parameters['lm_backspace_prob'], - backspace_always_shown=self.parameters['backspace_always_shown'], - stim_length=self.parameters['stim_length'], - stim_jitter=self.parameters['stim_jitter'], - stim_order=StimuliOrder(self.parameters['stim_order'])) + decision_threshold=self.parameters["decision_threshold"], + backspace_prob=self.parameters["lm_backspace_prob"], + backspace_always_shown=self.parameters["backspace_always_shown"], + stim_length=self.parameters["stim_length"], + stim_jitter=self.parameters["stim_jitter"], + stim_order=StimuliOrder(self.parameters["stim_order"]), + ) def user_wants_to_continue(self) -> bool: """Check if user wants to continue or terminate. @@ -322,10 +415,11 @@ def user_wants_to_continue(self) -> bool: should_continue = get_user_input( self.rsvp, WAIT_SCREEN_MESSAGE, - self.parameters['stim_color'], - first_run=self.first_run) + self.parameters["stim_color"], + first_run=self.first_run, + ) if not should_continue: - self.logger.info('User wants to exit.') + logger.info("User wants to exit.") return should_continue def wait(self, seconds: Optional[float] = None) -> None: @@ -336,11 +430,12 @@ def wait(self, seconds: Optional[float] = None) -> None: - seconds : duration of time to wait; if missing, defaults to the value of the parameter `'task_buffer_length'` """ - seconds = seconds or self.parameters['task_buffer_length'] + seconds = seconds or self.parameters["task_buffer_length"] core.wait(seconds) - def present_inquiry(self, inquiry_schedule: InquirySchedule - ) -> Tuple[List[Tuple[str, float]], bool]: + def present_inquiry( + self, inquiry_schedule: InquirySchedule + ) -> Tuple[List[Tuple[str, float]], bool]: """Present the given inquiry and return the trigger timing info. Parameters @@ -367,10 +462,13 @@ def present_inquiry(self, inquiry_schedule: InquirySchedule self.wait() # Setup the new stimuli - self.rsvp.schedule_to(stimuli=inquiry_schedule.stimuli[0], - timing=inquiry_schedule.durations[0], - colors=inquiry_schedule.colors[0] - if self.parameters['is_txt_stim'] else None) + self.rsvp.schedule_to( + stimuli=inquiry_schedule.stimuli[0], + timing=inquiry_schedule.durations[0], + colors=( + inquiry_schedule.colors[0] if self.parameters["is_txt_stim"] else None + ), + ) stim_times = self.rsvp.do_inquiry() proceed = not self.rsvp.preview_enabled or self.rsvp.preview_accepted @@ -393,31 +491,36 @@ def check_stop_criteria(self) -> bool: should continue. """ if self.copy_phrase == self.spelled_text: - self.logger.info('Spelling complete') + logger.info("Spelling complete") return False - if (self.inq_counter + 1) >= self.parameters['max_inq_len']: - self.logger.info('Max tries exceeded: to allow for more tries' - ' adjust the Maximum inquiry Length ' - '(max_inq_len) parameter.') + if (self.inq_counter + 1) >= self.parameters["max_inq_len"]: + logger.info( + "Max tries exceeded: to allow for more tries" + " adjust the Maximum inquiry Length " + "(max_inq_len) parameter." + ) return False - if self.session.total_time_spent >= (self.parameters['max_minutes'] * - 60): - self.logger.info('Max time exceeded. To allow for more time ' - 'adjust the max_minutes parameter.') + if self.session.total_time_spent >= (self.parameters["max_minutes"] * 60): + logger.info( + "Max time exceeded. To allow for more time " + "adjust the max_minutes parameter." + ) return False - if self.session.total_number_decisions >= self.parameters['max_selections']: - self.logger.info('Max number of selections reached ' - '(configured with the max_selections parameter)') + if self.session.total_number_decisions >= self.parameters["max_selections"]: + logger.info( + "Max number of selections reached " + "(configured with the max_selections parameter)" + ) return False if consecutive_incorrect( target_text=self.copy_phrase, spelled_text=self.spelled_text) >= self.parameters.get( - 'max_incorrect', 3): - self.logger.info( + 'max_incorrect'): + logger.info( 'Max number of consecutive incorrect selections reached ' '(configured with the max_incorrect parameter)') return False @@ -425,28 +528,26 @@ def check_stop_criteria(self) -> bool: return True def next_target(self) -> str: - """Computes the next target letter based on the currently spelled_text. - """ - if self.copy_phrase[0:len(self.spelled_text)] == self.spelled_text: + """Computes the next target letter based on the currently spelled_text.""" + if self.copy_phrase[0: len(self.spelled_text)] == self.spelled_text: # if correctly spelled so far, get the next letter. return self.copy_phrase[len(self.spelled_text)] return BACKSPACE_CHAR - def execute(self) -> str: + def execute(self) -> TaskData: """Executes the task. Returns ------- data save location (triggers.txt, session.json) """ - self.logger.info('Starting Copy Phrase Task!') + logger.info("Starting Copy Phrase Task!") run = True self.wait() # buffer for data processing while run and self.user_wants_to_continue() and self.current_inquiry: target_stimuli = self.next_target() - stim_times, proceed = self.present_inquiry( - self.current_inquiry) + stim_times, proceed = self.present_inquiry(self.current_inquiry) self.write_trigger_data(stim_times, target_stimuli) self.wait() @@ -464,8 +565,9 @@ def execute(self) -> str: decision_made=decision.decision_made) if decision.decision_made: - self.show_feedback(decision.selection, - (decision.selection == target_stimuli)) + self.show_feedback( + decision.selection, (decision.selection == target_stimuli) + ) self.spelled_text = decision.spelled_text self.current_inquiry = self.next_inquiry() @@ -475,24 +577,9 @@ def execute(self) -> str: run = self.check_stop_criteria() self.inq_counter += 1 - self.exit_display() - self.write_offset_trigger() - - self.session.task_summary = TaskSummary( - self.session, self.parameters['show_preview_inquiry'], - self.parameters['preview_inquiry_progress_method'], - self.trigger_handler.file_path).as_dict() - self.write_session_data() - - # Evidence is not recorded in the session when using fake decisions. - if self.parameters['summarize_session'] and self.session.has_evidence(): - session_excel(session=self.session, - excel_file=f"{self.file_save}/{SESSION_SUMMARY_FILENAME}") - - # Wait some time before exiting so there is trailing eeg data saved - self.wait() + self.cleanup() - return self.file_save + return TaskData(save_path=self.file_save, task_dict=self.session.as_dict()) def evaluate_evidence(self) -> Decision: """Uses the `copy_phrase_task` parameter to evaluate the provided @@ -503,9 +590,9 @@ def evaluate_evidence(self) -> Decision: - self.copy_phrase_task """ if self.fake: - _, spelled, _ = fake_copy_phrase_decision(self.copy_phrase, - self.next_target(), - self.spelled_text) + _, spelled, _ = fake_copy_phrase_decision( + self.copy_phrase, self.next_target(), self.spelled_text + ) # Reset the stoppage criteria by forcing the commit to a decision. self.copy_phrase_task.decision_maker.do_series() # In fake mode, only the LM is providing evidence, so the decision @@ -513,21 +600,24 @@ def evaluate_evidence(self) -> Decision: self.copy_phrase_task.decision_maker.update(spelled) # In fake mode, all inquiries result in a selection. - return Decision(decision_made=True, - selection=spelled[-1], - spelled_text=spelled, - new_inq_schedule=None) + return Decision( + decision_made=True, + selection=spelled[-1], + spelled_text=spelled, + new_inq_schedule=None, + ) decision_made, new_sti = self.copy_phrase_task.decide() spelled_text = self.copy_phrase_task.decision_maker.displayed_state - selection = '' + selection = "" if decision_made: selection = self.copy_phrase_task.decision_maker.last_selection return Decision(decision_made, selection, spelled_text, new_sti) - def add_evidence(self, stim_times: List[List], - proceed: bool = True) -> List[EvidenceType]: + def add_evidence( + self, stim_times: List[List], proceed: bool = True + ) -> List[EvidenceType]: """Add all evidence used to make a decision. Evaluates evidence from various sources (button press, devices, @@ -546,9 +636,7 @@ def add_evidence(self, stim_times: List[List], -------- - self.copy_phrase_task """ - evidences = [ - self.compute_button_press_evidence(proceed) - ] + evidences = [self.compute_button_press_evidence(proceed)] # evidence from one or more devices evidences.extend(self.compute_device_evidence(stim_times, proceed)) @@ -563,7 +651,8 @@ def add_evidence(self, stim_times: List[List], return evidence_types def compute_button_press_evidence( - self, proceed: bool) -> Optional[Tuple[EvidenceType, List[float]]]: + self, proceed: bool + ) -> Optional[Tuple[EvidenceType, List[float]]]: """If 'show_preview_inquiry' feature is enabled, compute the button press evidence and add it to the copy phrase task. @@ -576,20 +665,23 @@ def compute_button_press_evidence( tuple of (evidence type, evidence) or None if inquiry preview is not enabled. """ - if not self.parameters['show_preview_inquiry'] \ - or not self.current_inquiry \ - or self.parameters['preview_only']: + if ( + not self.parameters["show_preview_inquiry"] or + not self.current_inquiry or + self.parameters["preview_only"] + ): return None - probs = compute_probs_after_preview(self.current_inquiry.stimuli[0], - self.alp, - self.button_press_error_prob, - proceed) + probs = compute_probs_after_preview( + self.current_inquiry.stimuli[0], + self.alp, + self.button_press_error_prob, + proceed, + ) return (EvidenceType.BTN, probs) def compute_device_evidence( - self, - stim_times: List[List], - proceed: bool = True) -> List[Tuple[EvidenceType, List[float]]]: + self, stim_times: List[List], proceed: bool = True + ) -> List[Tuple[EvidenceType, List[float]]]: """Get inquiry data from all devices and evaluate the evidence, but don't yet attempt a decision. @@ -608,13 +700,15 @@ def compute_device_evidence( # currently prestim_length is used as a buffer for filter application post_stim_buffer = int(self.parameters.get("task_buffer_length") / 2) - prestim_buffer: float = self.parameters['prestim_length'] - trial_window: Tuple[float, float] = self.parameters['trial_window'] + prestim_buffer: float = self.parameters["prestim_length"] + trial_window: Tuple[float, float] = self.parameters["trial_window"] window_length = trial_window[1] - trial_window[0] inquiry_timing = self.stims_for_decision(stim_times) # update the inquiry timing list (stim, time) based on the trial window first time value - inquiry_timing = [(stim, time + trial_window[0]) for stim, time in inquiry_timing] + inquiry_timing = [ + (stim, time + trial_window[0]) for stim, time in inquiry_timing + ] # Get all data at once so we don't redundantly query devices which are # used in more than one signal model. @@ -622,13 +716,15 @@ def compute_device_evidence( inquiry_timing=inquiry_timing, daq=self.daq, prestim=prestim_buffer, - poststim=post_stim_buffer + window_length) + poststim=post_stim_buffer + window_length, + ) triggers = relative_triggers(inquiry_timing, prestim_buffer) # we assume all are nontargets at this point - labels = ['nontarget'] * len(triggers) + labels = ["nontarget"] * len(triggers) letters, times, filtered_labels = self.copy_phrase_task.letter_info( - triggers, labels) + triggers, labels + ) evidences = [] for evidence_evaluator in self.evidence_evaluators: @@ -637,12 +733,15 @@ def compute_device_evidence( symbols=letters, times=times, target_info=filtered_labels, - window_length=window_length) + window_length=window_length, + ) evidences.append((evidence_evaluator.produces, probs)) return evidences - def stims_for_decision(self, stim_times: List[Tuple[str, float]]) -> List[Tuple[str, float]]: + def stims_for_decision( + self, stim_times: List[Tuple[str, float]] + ) -> List[Tuple[str, float]]: """The stim_timings from the display may include non-letter stimuli such as calibration and inquiry_preview timings. This method extracts only the letter data used to process the data for a decision. @@ -656,16 +755,16 @@ def stims_for_decision(self, stim_times: List[Tuple[str, float]]) -> List[Tuple[ stim times where the stim is in the current alphabet; filters out 'calibration', 'inquiry_preview', etc. """ - return [ - timing for timing in stim_times if timing[0] in (self.alp + ['+']) - ] - - def new_data_record(self, - stim_times: List[Tuple[str, float]], - target_stimuli: str, - current_text: str, - decision: Decision, - evidence_types: Optional[List[EvidenceType]] = None) -> Inquiry: + return [timing for timing in stim_times if timing[0] in (self.alp + ["+"])] + + def new_data_record( + self, + stim_times: List[Tuple[str, float]], + target_stimuli: str, + current_text: str, + decision: Decision, + evidence_types: Optional[List[EvidenceType]] = None, + ) -> Inquiry: """Construct a new inquiry data record. Parameters @@ -685,16 +784,19 @@ def new_data_record(self, assert self.current_inquiry, "Current inquiry is required" evidence_types = evidence_types or [] triggers = construct_triggers(self.stims_for_decision(stim_times)) - data = Inquiry(stimuli=self.current_inquiry.stimuli, - timing=self.current_inquiry.durations, - triggers=triggers, - target_info=target_info(triggers, target_stimuli, - self.parameters['is_txt_stim']), - target_letter=target_stimuli, - current_text=current_text, - target_text=self.copy_phrase, - selection=decision.selection, - next_display_state=decision.spelled_text) + data = Inquiry( + stimuli=self.current_inquiry.stimuli, + timing=self.current_inquiry.durations, + triggers=triggers, + target_info=target_info( + triggers, target_stimuli, self.parameters["is_txt_stim"] + ), + target_letter=target_stimuli, + current_text=current_text, + target_text=self.copy_phrase, + selection=decision.selection, + next_display_state=decision.spelled_text, + ) data.precision = self.evidence_precision if not self.fake: @@ -719,10 +821,9 @@ def exit_display(self) -> None: # Give the system time to process self.wait() - def update_session_data(self, - data: Inquiry, - save: bool = True, - decision_made: bool = False) -> None: + def update_session_data( + self, data: Inquiry, save: bool = True, decision_made: bool = False + ) -> None: """Update the current session with the latest inquiry data Parameters @@ -749,25 +850,26 @@ def write_session_data(self) -> None: """Save session data to disk.""" if self.session: session_file = _save_session_related_data( - self.session_save_location, - self.session.as_dict()) + self.session_save_location, self.session.as_dict() + ) session_file.close() def write_offset_trigger(self) -> None: - """Append the offset to the end of the triggers file. - """ + """Append the offset to the end of the triggers file.""" # To help support future refactoring or use of lsl timestamps only # we write only the sample offset here. triggers = [] for content_type, client in self.daq.clients_by_type.items(): - label = offset_label(content_type.name, prefix='daq_sample_offset') + label = offset_label(content_type.name, prefix="daq_sample_offset") time = client.offset(self.rsvp.first_stim_time) triggers.append(Trigger(label, TriggerType.SYSTEM, time)) self.trigger_handler.add_triggers(triggers) self.trigger_handler.close() - def write_trigger_data(self, stim_times: List[Tuple[str, float]], target_stimuli: str) -> None: + def write_trigger_data( + self, stim_times: List[Tuple[str, float]], target_stimuli: str + ) -> None: """Save trigger data to disk. Parameters @@ -781,13 +883,15 @@ def write_trigger_data(self, stim_times: List[Tuple[str, float]], target_stimuli offset_triggers = [] for content_type, client in self.daq.clients_by_type.items(): label = offset_label(content_type.name) - time = client.offset( - self.rsvp.first_stim_time) - self.rsvp.first_stim_time - offset_triggers.append(Trigger(label, TriggerType.OFFSET, - time)) + time = ( + client.offset(self.rsvp.first_stim_time) - self.rsvp.first_stim_time + ) + offset_triggers.append(Trigger(label, TriggerType.OFFSET, time)) self.trigger_handler.add_triggers(offset_triggers) - triggers = convert_timing_triggers(stim_times, target_stimuli, self.trigger_type) + triggers = convert_timing_triggers( + stim_times, target_stimuli, self.trigger_type + ) self.trigger_handler.add_triggers(triggers) def trigger_type(self, symbol: str, target: str, index: int) -> TriggerType: @@ -795,19 +899,16 @@ def trigger_type(self, symbol: str, target: str, index: int) -> TriggerType: Cast a given symbol to a TriggerType. """ - if symbol == 'inquiry_preview': + if symbol == "inquiry_preview": return TriggerType.PREVIEW - if 'bcipy_key_press' in symbol: + if "bcipy_key_press" in symbol: return TriggerType.EVENT - if symbol == '+': + if symbol == "+": return TriggerType.FIXATION if target == symbol: return TriggerType.TARGET return TriggerType.NONTARGET - def name(self) -> str: - return self.TASK_NAME - @property def first_run(self) -> bool: """First run. @@ -830,50 +931,47 @@ class TaskSummary: 2 = press to skip to another inquiry """ - def __init__(self, - session: Session, - show_preview: bool = False, - preview_mode: int = 0, - trigger_path: Optional[str] = None) -> None: - assert preview_mode in range(3), 'Preview mode out of range' + def __init__( + self, + session: Session, + show_preview: bool = False, + preview_mode: int = 0, + trigger_path: Optional[str] = None, + ) -> None: + assert preview_mode in range(3), "Preview mode out of range" self.session = session self.show_preview = show_preview self.preview_mode = preview_mode self.trigger_path = trigger_path - self.logger = logging.getLogger(__name__) + logger = logging.getLogger(__name__) def as_dict(self) -> dict: """Computes the task summary data to append to the session.""" - selections = [ - inq for inq in self.session.all_inquiries if inq.selection - ] + selections = [inq for inq in self.session.all_inquiries if inq.selection] correct = [inq for inq in selections if inq.is_correct_decision] incorrect = [inq for inq in selections if not inq.is_correct_decision] # Note that SPACE is considered a symbol - correct_symbols = [ - inq for inq in correct if inq.selection != BACKSPACE_CHAR - ] + correct_symbols = [inq for inq in correct if inq.selection != BACKSPACE_CHAR] btn_presses = self.btn_press_count() sel_count = len(selections) - switch_per_selection = (btn_presses / - sel_count) if sel_count > 0 else 0 + switch_per_selection = (btn_presses / sel_count) if sel_count > 0 else 0 accuracy = (len(correct) / sel_count) if sel_count > 0 else 0 # Note that minutes includes startup time and any breaks. minutes = self.session.total_time_spent / 60 return { - 'selections_correct': len(correct), - 'selections_incorrect': len(incorrect), - 'selections_correct_symbols': len(correct_symbols), - 'switch_total': btn_presses, - 'switch_per_selection': switch_per_selection, - 'switch_response_time': self.switch_response_time(), - 'typing_accuracy': accuracy, - 'correct_rate': len(correct) / minutes if minutes else 0, - 'copy_rate': len(correct_symbols) / minutes if minutes else 0 + "selections_correct": len(correct), + "selections_incorrect": len(incorrect), + "selections_correct_symbols": len(correct_symbols), + "switch_total": btn_presses, + "switch_per_selection": switch_per_selection, + "switch_response_time": self.switch_response_time(), + "typing_accuracy": accuracy, + "correct_rate": len(correct) / minutes if minutes else 0, + "copy_rate": len(correct_symbols) / minutes if minutes else 0, } def btn_press_count(self) -> int: @@ -903,13 +1001,12 @@ def switch_response_time(self) -> Optional[float]: # Confirm that the data is structured as expected. for preview, keypress in pairs: if (preview.type != TriggerType.PREVIEW) or ( - keypress.type != TriggerType.EVENT): - self.logger.info('Could not compute switch_response_time') + keypress.type != TriggerType.EVENT + ): + logger.info("Could not compute switch_response_time") return None - response_times = [ - keypress.time - preview.time for preview, keypress in pairs - ] + response_times = [keypress.time - preview.time for preview, keypress in pairs] count = len(response_times) return sum(response_times) / count if count > 0 else None @@ -919,48 +1016,45 @@ def switch_triggers(self) -> List[Trigger]: return [] triggers, _offset = TriggerHandler.read_text_file(self.trigger_path) return [ - trg for trg in triggers + trg + for trg in triggers if trg.type in [TriggerType.PREVIEW, TriggerType.EVENT] ] def _init_copy_phrase_display( - parameters: Parameters, - win: visual.Window, - static_clock: core.StaticPeriod, - experiment_clock: Clock, - starting_spelled_text) -> CopyPhraseDisplay: - preview_inquiry = PreviewInquiryProperties( - preview_on=parameters['show_preview_inquiry'], - preview_only=parameters['preview_only'], - preview_inquiry_length=parameters['preview_inquiry_length'], - preview_inquiry_key_input=parameters['preview_inquiry_key_input'], - preview_inquiry_progress_method=parameters[ - 'preview_inquiry_progress_method'], - preview_inquiry_isi=parameters['preview_inquiry_isi']) + parameters: Parameters, + win: visual.Window, + static_clock: core.StaticPeriod, + experiment_clock: Clock, + starting_spelled_text, +) -> CopyPhraseDisplay: info = InformationProperties( - info_color=[parameters['info_color']], - info_pos=[(parameters['info_pos_x'], parameters['info_pos_y'])], - info_height=[parameters['info_height']], - info_font=[parameters['font']], - info_text=[parameters['info_text']], + info_color=[parameters["info_color"]], + info_pos=[(parameters["info_pos_x"], parameters["info_pos_y"])], + info_height=[parameters["info_height"]], + info_font=[parameters["font"]], + info_text=[parameters["info_text"]], + ) + stimuli = StimuliProperties( + stim_font=parameters["font"], + stim_pos=(parameters["rsvp_stim_pos_x"], parameters["rsvp_stim_pos_y"]), + stim_height=parameters["rsvp_stim_height"], + stim_inquiry=["A"] * parameters["stim_length"], + stim_colors=[parameters["stim_color"]] * parameters["stim_length"], + stim_timing=[10] * parameters["stim_length"], + is_txt_stim=parameters["is_txt_stim"], + ) + + task_bar = CopyPhraseTaskBar( + win, + task_text=parameters["task_text"], + spelled_text=starting_spelled_text, + colors=[parameters["task_color"]], + font=parameters["font"], + height=parameters["rsvp_task_height"], + padding=parameters["rsvp_task_padding"], ) - stimuli = StimuliProperties(stim_font=parameters['font'], - stim_pos=(parameters['stim_pos_x'], - parameters['stim_pos_y']), - stim_height=parameters['stim_height'], - stim_inquiry=['A'] * parameters['stim_length'], - stim_colors=[parameters['stim_color']] * parameters['stim_length'], - stim_timing=[10] * parameters['stim_length'], - is_txt_stim=parameters['is_txt_stim']) - - task_bar = CopyPhraseTaskBar(win, - task_text=parameters['task_text'], - spelled_text=starting_spelled_text, - colors=[parameters['task_color']], - font=parameters['font'], - height=parameters['task_height'], - padding=parameters['task_padding']) return CopyPhraseDisplay( win, diff --git a/bcipy/task/paradigm/vep/__init__.py b/bcipy/task/paradigm/vep/__init__.py index e69de29bb..bbcb8bd6c 100644 --- a/bcipy/task/paradigm/vep/__init__.py +++ b/bcipy/task/paradigm/vep/__init__.py @@ -0,0 +1,2 @@ +# Import all VEP tasks to make them available to the task registry +from .calibration import VEPCalibrationTask # noqa diff --git a/bcipy/task/paradigm/vep/calibration.py b/bcipy/task/paradigm/vep/calibration.py index e291a6121..34cf9909e 100644 --- a/bcipy/task/paradigm/vep/calibration.py +++ b/bcipy/task/paradigm/vep/calibration.py @@ -1,9 +1,10 @@ """VEP Calibration task-related code""" +import logging from typing import Any, Dict, Iterator, List, Optional from psychopy import visual # type: ignore -from bcipy.acquisition.multimodal import ClientManager +from bcipy.config import DEFAULT_FRAME_RATE, SESSION_LOG_FILENAME from bcipy.display import InformationProperties, VEPStimuliProperties from bcipy.display.components.layout import centered from bcipy.display.components.task_bar import CalibrationTaskBar @@ -13,10 +14,12 @@ from bcipy.helpers.clock import Clock from bcipy.helpers.parameters import Parameters from bcipy.helpers.triggers import TriggerType -from bcipy.task.base_calibration import BaseCalibrationTask, Inquiry +from bcipy.task.calibration import BaseCalibrationTask, Inquiry from bcipy.task.paradigm.vep.stim_generation import \ generate_vep_calibration_inquiries +logger = logging.getLogger(SESSION_LOG_FILENAME) + class VEPCalibrationTask(BaseCalibrationTask): """VEP Calibration Task. @@ -28,26 +31,30 @@ class VEPCalibrationTask(BaseCalibrationTask): PARAMETERS: ---------- - win (PsychoPy Display Object) - daq (Data Acquisition Object) - parameters (Dictionary) - file_save (String) + parameters (dict) + file_save (str) + fake (bool) """ - MODE = 'VEP' - - def __init__(self, win: visual.Window, daq: ClientManager, - parameters: Parameters, file_save: str): + name = 'VEP Calibration' + paradigm = 'VEP' + + def __init__(self, + parameters: Parameters, + file_save: str, + fake: bool = False, + **kwargs: Any) -> None: self.box_colors = [ '#00FF80', '#FFFFB3', '#CB99FF', '#FB8072', '#80B1D3', '#FF8232' ] self.num_boxes = 6 - super().__init__(win, daq, parameters, file_save) + super().__init__(parameters, file_save, fake=fake, **kwargs) def init_display(self) -> VEPDisplay: """Initialize the display""" return init_vep_display(self.parameters, self.window, self.experiment_clock, self.symbol_set, - self.box_colors) + self.box_colors, + fake=self.fake) def init_inquiry_generator(self) -> Iterator[Inquiry]: """Initializes a generator that returns inquiries to be presented.""" @@ -118,7 +125,7 @@ def target_box_index(inquiry: Inquiry) -> Optional[int]: def init_vep_display(parameters: Parameters, window: visual.Window, experiment_clock: Clock, symbol_set: List[str], - box_colors: List[str]) -> VEPDisplay: + box_colors: List[str], fake: bool = False) -> VEPDisplay: """Initialize the display""" info = InformationProperties( info_color=[parameters['info_color']], @@ -141,7 +148,7 @@ def init_vep_display(parameters: Parameters, window: visual.Window, stim_props = VEPStimuliProperties( stim_font=parameters['font'], stim_pos=box_config.positions, - stim_height=0.1, + stim_height=parameters['vep_stim_height'], timing=timing, stim_color=colors, inquiry=[], @@ -153,9 +160,15 @@ def init_vep_display(parameters: Parameters, window: visual.Window, current_index=0, colors=[parameters['task_color']], font=parameters['font'], - height=parameters['task_height']) + height=parameters['vep_task_height']) # issue #186641183 ; determine a better configuration strategy for flicker + if fake: + frame_rate = window.getActualFrameRate() + if frame_rate is None: + frame_rate = DEFAULT_FRAME_RATE + + logger.info(f"Frame rate set to: {frame_rate}") return VEPDisplay(window, experiment_clock, @@ -165,4 +178,5 @@ def init_vep_display(parameters: Parameters, window: visual.Window, symbol_set=symbol_set, box_config=box_config, flicker_rates=DEFAULT_FLICKER_RATES, - should_prompt_target=True) + should_prompt_target=True, + frame_rate=frame_rate if fake else None) diff --git a/bcipy/task/registry.py b/bcipy/task/registry.py new file mode 100644 index 000000000..36b1bacf2 --- /dev/null +++ b/bcipy/task/registry.py @@ -0,0 +1,49 @@ +"""Task Registry ; used to provide task options to the GUI and command line +tools. User defined tasks can be added to the Registry.""" +from typing import Dict, List, Type +from bcipy.task import Task + + +class TaskRegistry: + registry_dict: Dict[str, Type[Task]] + + def __init__(self): + # Collects all non-abstract subclasses of Task. type ignore is used to work around a mypy bug + # https://github.com/python/mypy/issues/3115 + from bcipy.task.paradigm import vep, rsvp, matrix # noqa + from bcipy.task import actions # noqa + + self.registry_dict = {} + self.collect_subclasses(Task) # type: ignore[type-abstract] + + def collect_subclasses(self, cls: Type[Task]): + """Recursively collects all non-abstract subclasses of the given class and adds them to the registry.""" + for sub_class in cls.__subclasses__(): + if not getattr(sub_class, '__abstractmethods__', False): + self.registry_dict[sub_class.name] = sub_class + self.collect_subclasses(sub_class) + + def get(self, task_name: str) -> Type[Task]: + """Returns a task type based on its name property.""" + if task_name in self.registry_dict: + return self.registry_dict[task_name] + raise ValueError(f'{task_name} not a registered task') + + def get_all_types(self) -> List[Type[Task]]: + """Returns a list of all registered tasks.""" + return list(self.registry_dict.values()) + + def list(self) -> List[str]: + """Returns a list of all registered task names.""" + return list(self.registry_dict.keys()) + + def calibration_tasks(self) -> List[Type[Task]]: + """Returns a list of all registered calibration tasks.""" + from bcipy.task.calibration import BaseCalibrationTask + return [task for task in self.get_all_types() if issubclass(task, BaseCalibrationTask)] + + def register_task(self, task: Type[Task]) -> None: + """Registers a task with the TaskRegistry.""" + # Note that all imported tasks are automatically registered when the TaskRegistry is initialized. This + # method allows for the registration of additional tasks after initialization. + self.registry_dict[task.name] = task diff --git a/bcipy/task/start_task.py b/bcipy/task/start_task.py deleted file mode 100644 index a42a04d67..000000000 --- a/bcipy/task/start_task.py +++ /dev/null @@ -1,104 +0,0 @@ -"""Code for constructing and executing registered tasks""" -# mypy: disable-error-code="arg-type, misc" -from typing import List, Optional -from psychopy import visual - -from bcipy.task import Task -from bcipy.task.exceptions import TaskRegistryException -from bcipy.task.paradigm.matrix.calibration import MatrixCalibrationTask -from bcipy.task.paradigm.matrix.copy_phrase import MatrixCopyPhraseTask -from bcipy.task.paradigm.matrix.timing_verification import \ - MatrixTimingVerificationCalibration -from bcipy.task.paradigm.rsvp.calibration.calibration import \ - RSVPCalibrationTask -from bcipy.task.paradigm.rsvp.calibration.timing_verification import \ - RSVPTimingVerificationCalibration -from bcipy.task.paradigm.rsvp.copy_phrase import RSVPCopyPhraseTask -from bcipy.task.paradigm.vep.calibration import VEPCalibrationTask -from bcipy.task.task_registry import TaskType - -from bcipy.acquisition import ClientManager -from bcipy.helpers.parameters import Parameters -from bcipy.signal.model import SignalModel -from bcipy.language import LanguageModel - - -def make_task( - display_window: visual.Window, - daq: ClientManager, - task: TaskType, - parameters: Parameters, - file_save: str, - signal_models: Optional[List[SignalModel]] = None, - language_model: Optional[LanguageModel] = None, - fake: bool = True) -> Task: - """Creates a Task based on the provided parameters. - - Parameters: - ----------- - display_window: psychopy Window - daq: ClientManager - manages one or more acquisition clients - task: TaskType - parameters: dict - file_save: str - path to file in which to save data - signal_models - list of trained models - language_model - language model - fake: boolean - true if eeg stream is randomly generated - Returns: - -------- - Task instance - """ - - # NORMAL RSVP MODES - if task is TaskType.RSVP_CALIBRATION: - return RSVPCalibrationTask( - display_window, daq, parameters, file_save) - - if task is TaskType.RSVP_COPY_PHRASE: - return RSVPCopyPhraseTask( - display_window, daq, parameters, file_save, signal_models, - language_model, fake=fake) - - if task is TaskType.RSVP_TIMING_VERIFICATION_CALIBRATION: - return RSVPTimingVerificationCalibration(display_window, daq, parameters, file_save) - - if task is TaskType.MATRIX_CALIBRATION: - return MatrixCalibrationTask( - display_window, daq, parameters, file_save - ) - - if task is TaskType.MATRIX_TIMING_VERIFICATION_CALIBRATION: - return MatrixTimingVerificationCalibration(display_window, daq, parameters, file_save) - - if task is TaskType.MATRIX_COPY_PHRASE: - return MatrixCopyPhraseTask( - display_window, daq, parameters, file_save, signal_models, - language_model, fake=fake) - - if task is TaskType.VEP_CALIBRATION: - return VEPCalibrationTask(display_window, daq, parameters, file_save) - - raise TaskRegistryException( - 'The provided experiment type is not registered.') - - -def start_task( - display_window: visual.Window, - daq: ClientManager, - task: TaskType, - parameters: Parameters, - file_save: str, - signal_models: Optional[List[SignalModel]] = None, - language_model: Optional[LanguageModel] = None, - fake: bool = True) -> str: - """Creates a Task and starts execution.""" - bcipy_task = make_task( - display_window, - daq, - task, - parameters, - file_save, - signal_models, - language_model, - fake) - return bcipy_task.execute() diff --git a/bcipy/task/task_registry.py b/bcipy/task/task_registry.py deleted file mode 100644 index 661b1d339..000000000 --- a/bcipy/task/task_registry.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Task Registry ; used to provide task options to the GUI and command line -tools. User defined tasks can be added to the Registry.""" - -# NOTE: -# In the future we may want to consider dynamically retrieving all subclasses -# of Task and use these to populate a registry. We could also provide -# functionality for bcipy users to define their own tasks and register them so -# they would appear as options in the GUI. -# -# However, this approach is currently problematic for the GUI interface. Due -# to the tight coupling of the display code with the Tasks, importing any of -# the Task subclasses causes pygame (a psychopy dependency) to create a GUI, -# which seems to prevent our other GUI code from working. - -from typing import List - -from bcipy.helpers.exceptions import BciPyCoreException -from bcipy.helpers.system_utils import AutoNumberEnum - - -class TaskType(AutoNumberEnum): - """Enum of the registered experiment types (Tasks), along with the label - used for display in the GUI and command line tools. Values are looked up - by their (1-based) index. - - Examples: - >>> TaskType(1) - - - >>> TaskType(1).label - 'RSVP Calibration' - """ - - RSVP_CALIBRATION = 'RSVP Calibration' - RSVP_COPY_PHRASE = 'RSVP Copy Phrase' - RSVP_TIMING_VERIFICATION_CALIBRATION = 'RSVP Time Test Calibration' - MATRIX_CALIBRATION = 'Matrix Calibration' - MATRIX_TIMING_VERIFICATION_CALIBRATION = 'Matrix Time Test Calibration' - MATRIX_COPY_PHRASE = 'Matrix Copy Phrase' - VEP_CALIBRATION = 'VEP Calibration' - - def __init__(self, label): - self.label = label - - @classmethod - def by_value(cls, item): - tasks = cls.list() - # The cls.list method returns a sorted list of enum tasks - # check if item present and return the index + 1 (which is the ENUM value for the task) - if item in tasks: - return cls(tasks.index(item) + 1) - raise BciPyCoreException(f'{item} not a registered TaskType={tasks}') - - @classmethod - def calibration_tasks(cls) -> List['TaskType']: - return [ - task for task in cls if task.name.endswith('CALIBRATION') and - 'COPY_PHRASE' not in task.name - ] - - @classmethod - def list(cls) -> List[str]: - return list(map(lambda c: c.label, cls)) diff --git a/bcipy/task/tests/core/test_actions.py b/bcipy/task/tests/core/test_actions.py new file mode 100644 index 000000000..185ba8cf3 --- /dev/null +++ b/bcipy/task/tests/core/test_actions.py @@ -0,0 +1,73 @@ +import unittest +import subprocess + +from mockito import mock, when, verify, unstub +from bcipy.task import actions, TaskData +from bcipy.task.actions import CodeHookAction, OfflineAnalysisAction, ExperimentFieldCollectionAction + + +class TestActions(unittest.TestCase): + + def setUp(self) -> None: + self.parameters = mock() + self.parameters_path = 'parameters_path' + self.data_directory = 'data/' + + def tearDown(self) -> None: + unstub() + + def test_code_hook_action_subprocess(self) -> None: + code_hook = 'code_hook' + when(subprocess).Popen(code_hook, shell=True).thenReturn(None) + action = CodeHookAction( + parameters=self.parameters, + data_directory=self.data_directory, + code_hook=code_hook, + subprocess=True + ) + response = action.execute() + self.assertIsInstance(response, TaskData) + verify(subprocess, times=1).Popen(code_hook, shell=True) + + def test_code_hook_action_no_subprocess(self) -> None: + code_hook = 'code_hook' + when(subprocess).run(code_hook, shell=True).thenReturn(None) + action = CodeHookAction( + parameters=self.parameters, + data_directory=self.data_directory, + code_hook=code_hook, + subprocess=False + ) + response = action.execute() + self.assertIsInstance(response, TaskData) + verify(subprocess, times=1).run(code_hook, shell=True) + + def test_offline_analysis_action(self) -> None: + cmd_expected = f'bcipy-train -p "{self.parameters_path}"' + + when(subprocess).run(cmd_expected, shell=True, check=True).thenReturn(None) + action = OfflineAnalysisAction( + parameters=self.parameters, + data_directory=self.data_directory, + parameters_path=self.parameters_path, + ) + response = action.execute() + self.assertIsInstance(response, TaskData) + verify(subprocess, times=1).run(cmd_expected, shell=True, check=True) + + def test_experiment_field_collection_action(self) -> None: + experiment_id = 'experiment_id' + when(actions).start_experiment_field_collection_gui(experiment_id, self.data_directory).thenReturn(None) + action = ExperimentFieldCollectionAction( + parameters=self.parameters, + data_directory=self.data_directory, + experiment_id=experiment_id + ) + task_data = action.execute() + self.assertIsNotNone(task_data) + self.assertIsInstance(task_data, TaskData) + verify(actions, times=1).start_experiment_field_collection_gui(experiment_id, self.data_directory) + + +if __name__ == '__main__': + unittest.main() diff --git a/bcipy/task/tests/core/test_task_main.py b/bcipy/task/tests/core/test_task_main.py new file mode 100644 index 000000000..dd3ce22f1 --- /dev/null +++ b/bcipy/task/tests/core/test_task_main.py @@ -0,0 +1,57 @@ +import unittest +from bcipy.task import Task, TaskData, TaskMode + + +class TestTask(unittest.TestCase): + + def test_task_fails_without_name(self): + mode = TaskMode.CALIBRATION + + class TestTask(Task): + + def execute(self) -> TaskData: + ... + + with self.assertRaises(AssertionError): + TestTask(mode=mode) + + def test_task_fails_without_mode(self): + name = "test task" + + class TestTask(Task): + + def execute(self) -> TaskData: + ... + + with self.assertRaises(AssertionError): + TestTask(name=name) + + def test_task_fails_without_execute(self): + name = "test task" + mode = TaskMode.CALIBRATION + + class TestTask(Task): + pass + + with self.assertRaises(TypeError): + TestTask(name=name, mode=mode) + + def test_task_initializes(self): + name = "test task" + mode = TaskMode.CALIBRATION + + class TestTask(Task): + + def __init__(self, name: str, mode: TaskMode): + self.name = name + self.mode = mode + + def execute(self) -> TaskData: + ... + task = TestTask(name=name, mode=mode) + self.assertEqual(task.name, name) + self.assertEqual(task.mode, mode) + + +if __name__ == '__main__': + unittest.main() diff --git a/bcipy/task/tests/orchestrator/test_orchestrator.py b/bcipy/task/tests/orchestrator/test_orchestrator.py new file mode 100644 index 000000000..f2805a849 --- /dev/null +++ b/bcipy/task/tests/orchestrator/test_orchestrator.py @@ -0,0 +1,157 @@ +import unittest +import logging +import json +from mock import mock_open +from mockito import any, mock, when, unstub, verify +from bcipy.task.orchestrator import SessionOrchestrator +from bcipy.task import Task, TaskData +from bcipy.config import DEFAULT_PARAMETERS_PATH +from bcipy.helpers.load import load_json_parameters + + +class TestSessionOrchestrator(unittest.TestCase): + parameter_location = DEFAULT_PARAMETERS_PATH + + def setUp(self) -> None: + self.logger = mock(spec=logging.Logger) + self.logger.info = lambda x: x + self.logger.error = lambda x: x + self.logger.exception = lambda x: x + + def tearDown(self) -> None: + unstub() + + def test_orchestrator_add_task(self) -> None: + task = mock(spec=Task) + task.name = "test task" + task.mode = "test mode" + when(SessionOrchestrator)._init_orchestrator_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_orchestrator_logger(any()).thenReturn(self.logger) + orchestrator = SessionOrchestrator() + self.assertTrue(orchestrator.tasks == []) + orchestrator.add_task(task) + self.assertTrue(len(orchestrator.tasks) == 1) + + verify(SessionOrchestrator, times=1)._init_orchestrator_save_folder(any()) + verify(SessionOrchestrator, times=1)._init_orchestrator_logger(any()) + + def test_orchestrator_add_tasks(self) -> None: + task1 = mock(spec=Task) + task1.name = "test task" + task1.mode = "test mode" + task2 = mock(spec=Task) + task2.name = "test task" + task2.mode = "test mode" + tasks = [task1, task2] + when(SessionOrchestrator)._init_orchestrator_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_orchestrator_logger(any()).thenReturn(self.logger) + orchestrator = SessionOrchestrator() + self.assertTrue(orchestrator.tasks == []) + orchestrator.add_tasks(tasks) + self.assertTrue(len(orchestrator.tasks) == 2) + + self.assertEqual(orchestrator.tasks[0], task1) + self.assertEqual(orchestrator.tasks[1], task2) + + verify(SessionOrchestrator, times=1)._init_orchestrator_save_folder(any()) + verify(SessionOrchestrator, times=1)._init_orchestrator_logger(any()) + + def test_orchestrator_execute(self) -> None: + task = mock(spec=Task) + task.name = "test task" + task.mode = "test mode" + task.execute = lambda: TaskData() + when(SessionOrchestrator)._init_orchestrator_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_orchestrator_logger(any()).thenReturn(self.logger) + when(SessionOrchestrator)._init_task_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_task_logger(any()).thenReturn() + when(SessionOrchestrator)._save_data().thenReturn() + when(task).__call__( + any(), + any(), + fake=False, + experiment_id=any(), + alert_finished=any(), + parameters_path=any(), + last_task_dir=None, + protocol_path=any(), + progress=any(), + tasks=any(), + exit_callback=any(), + ).thenReturn(task) + orchestrator = SessionOrchestrator() + orchestrator.add_task(task) + orchestrator.execute() + + verify(task, times=1).__call__( + any(), + any(), + fake=False, + experiment_id=any(), + alert_finished=any(), + parameters_path=any(), + last_task_dir=None, + protocol_path=any(), + progress=any(), + tasks=any(), + exit_callback=any()) + verify(SessionOrchestrator, times=1)._init_orchestrator_save_folder(any()) + verify(SessionOrchestrator, times=1)._init_orchestrator_logger(any()) + verify(SessionOrchestrator, times=1)._init_task_save_folder(any()) + verify(SessionOrchestrator, times=1)._init_task_logger(any()) + verify(SessionOrchestrator, times=1)._save_data() + + @mock_open(read_data='{"Phrases": []}') + def test_orchestrator_multiple_copyphrases_loads_from_parameters_when_set(self, mock_file): + parameters = load_json_parameters(self.parameter_location, value_cast=True) + copy_phrase_location = "bcipy/parameters/experiments/phrases.json" + parameters['copy_phrases_location'] = copy_phrase_location + mock_copy_phrases = {"Phrases": [["test", 0], ["test2", 1]]} + when(SessionOrchestrator)._init_orchestrator_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_orchestrator_logger(any()).thenReturn(self.logger) + when(SessionOrchestrator)._init_task_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_task_logger(any()).thenReturn() + when(SessionOrchestrator)._save_data().thenReturn() + when(json).load(mock_file).thenReturn(mock_copy_phrases) + + orchestrator = SessionOrchestrator(parameters=parameters) + + self.assertEqual(orchestrator.copyphrases, mock_copy_phrases['Phrases']) + verify(json, times=1).load(mock_file) + + def test_orchestrator_save_data_multiple_copyphrases_saves_remaining_phrases(self): + when(SessionOrchestrator)._init_orchestrator_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_orchestrator_logger(any()).thenReturn(self.logger) + when(SessionOrchestrator)._init_task_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_task_logger(any()).thenReturn() + when(SessionOrchestrator)._save_procotol_data().thenReturn() + when(SessionOrchestrator)._save_copy_phrases().thenReturn() + + orchestrator = SessionOrchestrator() + orchestrator.copyphrases = [["test", 0], ["test2", 1]] + + orchestrator._save_data() + verify(SessionOrchestrator, times=1)._save_procotol_data() + verify(SessionOrchestrator, times=1)._save_copy_phrases() + + def test_orchestrator_next_phrase(self): + when(SessionOrchestrator)._init_orchestrator_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_orchestrator_logger(any()).thenReturn(self.logger) + when(SessionOrchestrator)._init_task_save_folder(any()).thenReturn() + when(SessionOrchestrator)._init_task_logger(any()).thenReturn() + when(SessionOrchestrator)._save_procotol_data().thenReturn() + when(SessionOrchestrator).initialize_copy_phrases().thenReturn() + + orchestrator = SessionOrchestrator() + orchestrator.copyphrases = [["test", 5], ["test2", 1]] + + self.assertEqual(orchestrator.next_phrase, None) + self.assertEqual(orchestrator.starting_index, 0) + orchestrator.set_next_phrase() + self.assertEqual(orchestrator.next_phrase, "test") + self.assertEqual(orchestrator.starting_index, 5) + self.assertTrue(len(orchestrator.copyphrases) == 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/bcipy/task/tests/orchestrator/test_protocol.py b/bcipy/task/tests/orchestrator/test_protocol.py new file mode 100644 index 000000000..1ea8750fe --- /dev/null +++ b/bcipy/task/tests/orchestrator/test_protocol.py @@ -0,0 +1,72 @@ +import unittest +from bcipy.task.orchestrator.protocol import parse_protocol, serialize_protocol, validate_protocol_string +from bcipy.task.actions import OfflineAnalysisAction +from bcipy.task.paradigm.rsvp.calibration.calibration import RSVPCalibrationTask +from bcipy.task.paradigm.rsvp.copy_phrase import RSVPCopyPhraseTask + + +class TestTaskProtocolProcessing(unittest.TestCase): + + def test_parses_one_task(self) -> None: + sequence = 'RSVP Calibration' + parsed = parse_protocol(sequence) + assert len(parsed) == 1 + assert parsed[0] is RSVPCalibrationTask + + def test_parses_with_task_name(self) -> None: + actions = OfflineAnalysisAction.name + parsed = parse_protocol(actions) + assert len(parsed) == 1 + assert parsed[0] is OfflineAnalysisAction + + def test_parses_multiple_tasks(self) -> None: + actions = 'RSVP Calibration -> RSVP Copy Phrase' + parsed = parse_protocol(actions) + assert len(parsed) == 2 + assert parsed[0] is RSVPCalibrationTask + assert parsed[1] is RSVPCopyPhraseTask + + def test_parses_actions_and_tasks(self) -> None: + sequence = 'RSVP Calibration -> OfflineAnalysisAction -> RSVP Copy Phrase' + parsed = parse_protocol(sequence) + assert len(parsed) == 3 + assert parsed[0] is RSVPCalibrationTask + assert parsed[1] is OfflineAnalysisAction + assert parsed[2] is RSVPCopyPhraseTask + + def test_parses_sequence_with_extra_spaces(self) -> None: + actions = ' RSVP Calibration -> OfflineAnalysisAction -> RSVP Copy Phrase ' + parsed = parse_protocol(actions) + assert len(parsed) == 3 + assert parsed[0] is RSVPCalibrationTask + assert parsed[1] is OfflineAnalysisAction + assert parsed[2] is RSVPCopyPhraseTask + + def test_throws_exception_on_invalid_task(self) -> None: + actions = 'RSVP Calibration -> does not exist' + with self.assertRaises(ValueError): + parse_protocol(actions) + + def test_throws_exception_on_invalid_string(self) -> None: + actions = 'thisstringisbad' + with self.assertRaises(ValueError): + parse_protocol(actions) + + def test_validates_valid_action_string(self) -> None: + actions = 'RSVP Calibration -> RSVP Copy Phrase' + validate_protocol_string(actions) + + def test_throws_exception_on_invalid_action_string(self) -> None: + actions = 'RSVP Calibration -> RSVP Copy Phrase -> does not exist' + with self.assertRaises(ValueError): + validate_protocol_string(actions) + + def test_serializes_one_task(self) -> None: + actions = [RSVPCalibrationTask] + serialized = serialize_protocol(actions) + assert serialized == RSVPCalibrationTask.name + + def test_serializes_multiple_tasks(self) -> None: + sequence = [RSVPCalibrationTask, OfflineAnalysisAction, RSVPCopyPhraseTask] + serialized = serialize_protocol(sequence) + assert serialized == 'RSVP Calibration -> OfflineAnalysisAction -> RSVP Copy Phrase' diff --git a/bcipy/task/tests/paradigm/matrix/test_matrix_calibration.py b/bcipy/task/tests/paradigm/matrix/test_matrix_calibration.py index da0651efe..d6ae98e4b 100644 --- a/bcipy/task/tests/paradigm/matrix/test_matrix_calibration.py +++ b/bcipy/task/tests/paradigm/matrix/test_matrix_calibration.py @@ -5,7 +5,6 @@ import psychopy from mockito import any, mock, unstub, verify, when -import bcipy.task.base_calibration import bcipy.task.paradigm.matrix.calibration from bcipy.acquisition import LslAcquisitionClient from bcipy.acquisition.devices import DeviceSpec @@ -48,31 +47,37 @@ def setUp(self): 'show_feedback': False, 'show_preview_inquiry': False, 'stim_color': 'white', - 'stim_height': 0.6, + 'matrix_stim_height': 0.6, 'stim_jitter': 0.0, 'stim_length': 3, 'stim_number': 10, 'stim_order': 'random', - 'stim_pos_x': 0.0, - 'stim_pos_y': 0.0, + 'matrix_stim_pos_x': 0.0, + 'matrix_stim_pos_y': 0.0, 'stim_space_char': '_', 'target_color': 'white', 'target_positions': 'distributed', 'task_buffer_length': 2, 'task_color': 'white', - 'task_height': 0.1, + 'matrix_task_height': 0.1, + 'matrix_task_padding': 0.1, 'task_text': 'HELLO_WORLD', 'time_fixation': 0.1, 'time_flash': 0.1, 'time_prompt': 0.1, 'trial_window': (0.0, 0.5), 'trials_before_break': 100, + "preview_inquiry_error_prob": 0.05, 'break_message': 'Take a break!', 'trigger_type': 'image', + 'matrix_keyboard_layout': 'QWERTY', + 'matrix_rows': 3, + 'matrix_columns': 3, + 'matrix_width': 0.6, } self.parameters = Parameters.from_cast_values(**parameters) - self.win = mock({'size': np.array([500, 500]), 'units': 'height'}) + self.servers = [mock()] device_spec = DeviceSpec(name='Testing', channels=['a', 'b', 'c'], @@ -99,6 +104,7 @@ def setUp(self): 'transform': mock(), 'evidence_type': 'ERP' }) + self.fake = False self.display = mock(spec=MatrixDisplay) self.display.first_stim_time = 0.0 @@ -132,33 +138,34 @@ def tearDown(self): """Override""" unstub() - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_initialize(self, save_session_mock, trigger_handler_mock): """Test initialization""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) - MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + MatrixCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) verify(bcipy.task.paradigm.matrix.calibration, times=1).init_matrix_display(self.parameters, self.win, any(), any()) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_execute(self, save_session_mock, trigger_handler_mock): """Test task execute""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() - - task = MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = MatrixCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) when(task).write_offset_trigger().thenReturn(None) when(task).write_trigger_data(any(), any()).thenReturn(None) @@ -172,33 +179,35 @@ def test_execute(self, save_session_mock, trigger_handler_mock): any(), any()) verify(task, times=1).write_offset_trigger() - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_validate_parameters_throws_task_exception_empty_parameter( self, save_session_mock, trigger_handler_mock): """Test validate parameters throws task exception when parameters is empty.""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() parameters = {} + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) with self.assertRaises(Exception): - MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=parameters, - file_save=self.temp_dir) + MatrixCalibrationTask(parameters=parameters, + file_save=self.temp_dir, + fake=self.fake) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_execute_save_stimuli_positions(self, save_session_mock, trigger_handler_mock): """Test execute save stimuli positions method is called as expected.""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) - task = MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + task = MatrixCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) when(task).write_offset_trigger().thenReturn(None) when(task).write_trigger_data(any(), any()).thenReturn(None) @@ -215,18 +224,19 @@ def test_execute_save_stimuli_positions(self, save_session_mock, any(), any()) verify(task, times=1).write_offset_trigger() - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_trigger_type_targetness(self, save_session_mock, trigger_handler_mock): """Test trigger type targetness.""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() - task = MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = MatrixCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) # non-target symbol = 'N' @@ -244,17 +254,18 @@ def test_trigger_type_targetness(self, save_session_mock, self.assertEqual(task.trigger_type(symbol, target, index), TriggerType.TARGET) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_trigger_type_fixation(self, save_session_mock, trigger_handler_mock): """Test trigger type fixation.""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() - task = MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = MatrixCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) # fixation symbol = '+' @@ -264,18 +275,19 @@ def test_trigger_type_fixation(self, save_session_mock, self.assertEqual(task.trigger_type(symbol, target, index), TriggerType.FIXATION) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_trigger_type_prompt(self, save_session_mock, trigger_handler_mock): """Test trigger type prompt.""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) - task = MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + task = MatrixCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) # prompt, index = 0, otherwise it would be a target symbol = 'P' @@ -285,19 +297,20 @@ def test_trigger_type_prompt(self, save_session_mock, self.assertEqual(task.trigger_type(symbol, target, index), TriggerType.PROMPT) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_write_trigger_data_first_run(self, save_session_mock, trigger_handler_mock): """Test write trigger data when it is the first run of the task.""" handler_mock = Mock() save_session_mock.return_value = mock() trigger_handler_mock.return_value = handler_mock + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) - task = MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + task = MatrixCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) client_by_type_resp = {ContentType.EEG: self.eeg_client_mock} timing_mock = mock() @@ -305,66 +318,68 @@ def test_write_trigger_data_first_run(self, save_session_mock, first_run = True when(self.daq).client_by_type( ContentType.EEG).thenReturn(client_by_type_resp) - when(bcipy.task.base_calibration).offset_label('EEG').thenReturn( + when(bcipy.task.calibration).offset_label('EEG').thenReturn( 'starting_offset') - when(bcipy.task.base_calibration).convert_timing_triggers( + when(bcipy.task.calibration).convert_timing_triggers( timing, timing[0][0], any()).thenReturn(timing_mock) task.write_trigger_data(timing, first_run) self.assertEqual(2, handler_mock.add_triggers.call_count) verify(self.eeg_client_mock, times=1).offset(0.0) - verify(bcipy.task.base_calibration, times=1).offset_label('EEG') - verify(bcipy.task.base_calibration, + verify(bcipy.task.calibration, times=1).offset_label('EEG') + verify(bcipy.task.calibration, times=1).convert_timing_triggers(timing, timing[0][0], any()) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_write_trigger_data_not_first_run(self, save_session_mock, trigger_handler_mock): """Test write trigger data when it is not the first run of the task.""" handler_mock = Mock() save_session_mock.return_value = mock() trigger_handler_mock.return_value = handler_mock + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) - task = MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + task = MatrixCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) timing_mock = mock() timing = [('a', 0.0)] first_run = False - when(bcipy.task.base_calibration).convert_timing_triggers( + when(bcipy.task.calibration).convert_timing_triggers( timing, timing[0][0], any()).thenReturn(timing_mock) task.write_trigger_data(timing, first_run) handler_mock.add_triggers.assert_called_once() - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_write_offset_trigger(self, save_session_mock, trigger_handler_mock): """Test write offset trigger""" save_session_mock.return_value = mock() handler_mock = Mock() trigger_handler_mock.return_value = handler_mock + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) - task = MatrixCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + task = MatrixCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) client_by_type_resp = {ContentType.EEG: self.eeg_client_mock} when(self.daq).client_by_type( ContentType.EEG).thenReturn(client_by_type_resp) - when(bcipy.task.base_calibration).offset_label( + when(bcipy.task.calibration).offset_label( 'EEG', prefix='daq_sample_offset').thenReturn('daq_sample_offset') task.write_offset_trigger() handler_mock.close.assert_called_once() handler_mock.add_triggers.assert_called_once() verify(self.eeg_client_mock, times=1).offset(0.0) - verify(bcipy.task.base_calibration, + verify(bcipy.task.calibration, times=1).offset_label('EEG', prefix='daq_sample_offset') diff --git a/bcipy/task/tests/paradigm/rsvp/calibration/test_rsvp_calibration.py b/bcipy/task/tests/paradigm/rsvp/calibration/test_rsvp_calibration.py index 8011205ce..2ab8fc9dd 100644 --- a/bcipy/task/tests/paradigm/rsvp/calibration/test_rsvp_calibration.py +++ b/bcipy/task/tests/paradigm/rsvp/calibration/test_rsvp_calibration.py @@ -4,6 +4,7 @@ import psychopy from mockito import any, mock, unstub, verify, when +import bcipy.task.calibration import bcipy.task.paradigm.rsvp.calibration.calibration from bcipy.acquisition import LslAcquisitionClient from bcipy.acquisition.devices import DeviceSpec @@ -46,19 +47,20 @@ def setUp(self): 'show_feedback': False, 'show_preview_inquiry': False, 'stim_color': 'white', - 'stim_height': 0.6, + 'rsvp_stim_height': 0.6, 'stim_jitter': 0.0, 'stim_length': 3, 'stim_number': 10, 'stim_order': 'random', - 'stim_pos_x': 0.0, - 'stim_pos_y': 0.0, + 'rsvp_stim_pos_x': 0.0, + 'rsvp_stim_pos_y': 0.0, 'stim_space_char': '_', 'target_color': 'white', 'target_positions': 'distributed', 'task_buffer_length': 2, 'task_color': 'white', - 'task_height': 0.1, + 'rsvp_task_height': 0.1, + 'rsvp_task_padding': 0.1, 'task_text': 'HELLO_WORLD', 'time_fixation': 0.1, 'time_flash': 0.1, @@ -91,6 +93,8 @@ def setUp(self): } }) self.temp_dir = '' + self.fake = False + self.servers = [mock()] self.model_metadata = mock({ 'device_spec': device_spec, 'transform': mock(), @@ -108,10 +112,10 @@ def setUp(self): when(bcipy.task.paradigm.rsvp.calibration.calibration ).init_calibration_display_task(self.parameters, self.win, any(), any()).thenReturn(self.display) - when(bcipy.task.base_calibration).trial_complete_message( + when(bcipy.task.calibration).trial_complete_message( any(), any()).thenReturn([]) - when(bcipy.task.base_calibration.TriggerHandler).write().thenReturn() - when(bcipy.task.base_calibration.TriggerHandler).add_triggers( + when(bcipy.task.calibration.TriggerHandler).write().thenReturn() + when(bcipy.task.calibration.TriggerHandler).add_triggers( any()).thenReturn() when(psychopy.event).getKeys(keyList=['space', 'escape'], @@ -125,33 +129,37 @@ def tearDown(self): """Override""" unstub() - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') - def test_initialize(self, save_session_mock, trigger_handler_mock): + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') + @patch('bcipy.task.calibration.BaseCalibrationTask.cleanup') + def test_initialize(self, save_session_mock, trigger_handler_mock, cleanup_mock): """Test initialization""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + cleanup_mock.return_value = None - RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) verify(bcipy.task.paradigm.rsvp.calibration.calibration, times=1).init_calibration_display_task(self.parameters, self.win, any(), any()) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_execute(self, save_session_mock, trigger_handler_mock): """Test task execute""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() - task = RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) when(task).write_offset_trigger().thenReturn(None) when(task).write_trigger_data(any(), any()).thenReturn(None) @@ -165,31 +173,33 @@ def test_execute(self, save_session_mock, trigger_handler_mock): any(), any()) verify(task, times=1).write_offset_trigger() - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_validate_parameters_throws_task_exception_empty_parameter( self, save_session_mock, trigger_handler_mock): """Test validate parameters throws task exception when parameters is empty.""" parameters = {} save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) with self.assertRaises(Exception): - RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=parameters, - file_save=self.temp_dir) + RSVPCalibrationTask(parameters=parameters, + file_save=self.temp_dir, + fake=self.fake) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_trigger_type_targetness(self, save_session_mock, trigger_handler_mock): """Test trigger type targetness.""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() - task = RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) # non-target symbol = 'N' @@ -207,17 +217,18 @@ def test_trigger_type_targetness(self, save_session_mock, self.assertEqual(task.trigger_type(symbol, target, index), TriggerType.TARGET) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_trigger_type_fixation(self, save_session_mock, trigger_handler_mock): """Test trigger type fixation.""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() - task = RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) # fixation symbol = '+' @@ -227,17 +238,18 @@ def test_trigger_type_fixation(self, save_session_mock, self.assertEqual(task.trigger_type(symbol, target, index), TriggerType.FIXATION) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_trigger_type_prompt(self, save_session_mock, trigger_handler_mock): """Test trigger type prompt.""" + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() - task = RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) # prompt, index = 0, otherwise it would be a target symbol = 'P' @@ -247,17 +259,18 @@ def test_trigger_type_prompt(self, save_session_mock, self.assertEqual(task.trigger_type(symbol, target, index), TriggerType.PROMPT) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_trigger_type_preview(self, save_session_mock, trigger_handler_mock): """Test trigger type preview.""" save_session_mock.return_value = mock() trigger_handler_mock.return_value = mock() - task = RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) # preview, index > 0, otherwise it would be a prompt symbol = 'inquiry_preview' @@ -267,18 +280,19 @@ def test_trigger_type_preview(self, save_session_mock, self.assertEqual(task.trigger_type(symbol, target, index), TriggerType.PREVIEW) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_write_trigger_data_first_run(self, save_session_mock, trigger_handler_mock): """Test write trigger data when it is the first run of the task.""" handler_mock = Mock() save_session_mock.return_value = mock() + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) trigger_handler_mock.return_value = handler_mock - task = RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) client_by_type_resp = {ContentType.EEG: self.eeg_client_mock} timing_mock = mock() @@ -286,57 +300,59 @@ def test_write_trigger_data_first_run(self, save_session_mock, first_run = True when(self.daq).client_by_type( ContentType.EEG).thenReturn(client_by_type_resp) - when(bcipy.task.base_calibration).offset_label('EEG').thenReturn( + when(bcipy.task.calibration).offset_label('EEG').thenReturn( 'starting_offset') - when(bcipy.task.base_calibration).convert_timing_triggers( + when(bcipy.task.calibration).convert_timing_triggers( timing, timing[0][0], any()).thenReturn(timing_mock) task.write_trigger_data(timing, first_run) self.assertEqual(2, handler_mock.add_triggers.call_count) verify(self.eeg_client_mock, times=1).offset(0.0) - verify(bcipy.task.base_calibration, times=1).offset_label('EEG') - verify(bcipy.task.base_calibration, + verify(bcipy.task.calibration, times=1).offset_label('EEG') + verify(bcipy.task.calibration, times=1).convert_timing_triggers(timing, timing[0][0], any()) - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_write_trigger_data_not_first_run(self, save_session_mock, trigger_handler_mock): """Test write trigger data when it is not the first run of the task.""" handler_mock = Mock() save_session_mock.return_value = mock() trigger_handler_mock.return_value = handler_mock - task = RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) timing_mock = mock() timing = [('a', 0.0)] first_run = False - when(bcipy.task.base_calibration).convert_timing_triggers( + when(bcipy.task.calibration).convert_timing_triggers( timing, timing[0][0], any()).thenReturn(timing_mock) task.write_trigger_data(timing, first_run) handler_mock.add_triggers.assert_called_once() - @patch('bcipy.task.base_calibration.TriggerHandler') - @patch('bcipy.task.base_calibration._save_session_related_data') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') def test_write_offset_trigger(self, save_session_mock, trigger_handler_mock): """Test write offset trigger""" save_session_mock.return_value = mock() handler_mock = Mock() trigger_handler_mock.return_value = handler_mock - task = RSVPCalibrationTask(win=self.win, - daq=self.daq, - parameters=self.parameters, - file_save=self.temp_dir) + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) client_by_type_resp = {ContentType.EEG: self.eeg_client_mock} when(self.daq).client_by_type( ContentType.EEG).thenReturn(client_by_type_resp) - when(bcipy.task.base_calibration).offset_label( + when(bcipy.task.calibration).offset_label( 'EEG', prefix='daq_sample_offset').thenReturn('daq_sample_offset') when(TriggerHandler).close().thenReturn() @@ -345,9 +361,71 @@ def test_write_offset_trigger(self, save_session_mock, handler_mock.close.assert_called_once() handler_mock.add_triggers.assert_called_once() verify(self.eeg_client_mock, times=1).offset(0.0) - verify(bcipy.task.base_calibration, + verify(bcipy.task.calibration, times=1).offset_label('EEG', prefix='daq_sample_offset') + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') + def test_setup(self, save_session_mock, trigger_handler_mock): + """Test setup""" + save_session_mock.return_value = mock() + handler_mock = Mock() + trigger_handler_mock.return_value = handler_mock + when(bcipy.task.calibration).init_acquisition(any(), any(), server=self.fake).thenReturn( + (self.daq, self.servers)) + when(bcipy.task.calibration).init_display_window(self.parameters).thenReturn( + self.win) + + self.assertFalse(RSVPCalibrationTask.initalized) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) + + self.assertTrue(task.initalized) + verify(bcipy.task.calibration, times=1).init_acquisition( + self.parameters, self.temp_dir, server=self.fake) + verify(bcipy.task.calibration, times=1).init_display_window( + self.parameters) + self.assertEqual((self.daq, self.servers, self.win), + task.setup(self.parameters, self.temp_dir, self.fake)) + + @patch('bcipy.task.calibration.TriggerHandler') + @patch('bcipy.task.calibration._save_session_related_data') + def test_cleanup(self, save_session_mock, trigger_handler_mock): + """Test cleanup""" + save_session_mock.return_value = mock() + handler_mock = Mock() + trigger_handler_mock.return_value = handler_mock + when(bcipy.task.calibration.BaseCalibrationTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + + # Mock the default cleanup + when(bcipy.task.calibration.BaseCalibrationTask).write_offset_trigger().thenReturn(None) + when(bcipy.task.calibration.BaseCalibrationTask).exit_display().thenReturn(None) + when(bcipy.task.calibration.BaseCalibrationTask).wait().thenReturn(None) + + # Mock the initialized cleanup + when(self.daq).stop_acquisition().thenReturn(None) + when(self.daq).cleanup().thenReturn(None) + when(self.servers[0]).stop().thenReturn(None) + when(self.win).close().thenReturn(None) + task = RSVPCalibrationTask(parameters=self.parameters, + file_save=self.temp_dir, + fake=self.fake) + # because the task is not initialized via setup, we need to set it to True here + task.initalized = True + + task.cleanup() + + verify(self.daq, times=1).stop_acquisition() + verify(self.daq, times=1).cleanup() + verify(self.servers[0], times=1).stop() + verify(self.win, times=1).close() + verify(bcipy.task.calibration.BaseCalibrationTask, times=1).setup(any(), any(), any()) + verify(bcipy.task.calibration.BaseCalibrationTask, times=1).write_offset_trigger() + verify(bcipy.task.calibration.BaseCalibrationTask, times=1).exit_display() + verify(bcipy.task.calibration.BaseCalibrationTask, times=1).wait() + if __name__ == '__main__': unittest.main() diff --git a/bcipy/task/tests/paradigm/rsvp/test_copy_phrase.py b/bcipy/task/tests/paradigm/rsvp/test_copy_phrase.py index 1ac1980ee..59706bfc1 100644 --- a/bcipy/task/tests/paradigm/rsvp/test_copy_phrase.py +++ b/bcipy/task/tests/paradigm/rsvp/test_copy_phrase.py @@ -14,7 +14,7 @@ from bcipy.acquisition.multimodal import ContentType from bcipy.config import DEFAULT_ENCODING from bcipy.helpers.copy_phrase_wrapper import CopyPhraseWrapper -from bcipy.helpers.exceptions import TaskConfigurationException +from bcipy.exceptions import TaskConfigurationException from bcipy.helpers.parameters import Parameters from bcipy.helpers.stimuli import InquirySchedule from bcipy.helpers.triggers import TriggerHandler @@ -48,7 +48,7 @@ def setUp(self): 'max_inq_len': 50, 'max_inq_per_series': 10, 'max_minutes': 20, - 'min_inq_len': 1, + 'min_inq_len': 5, 'max_selections': 50, 'max_incorrect': 10, 'notch_filter_frequency': 60.0, @@ -60,19 +60,20 @@ def setUp(self): 'show_preview_inquiry': False, 'spelled_letters_count': 0, 'stim_color': 'white', - 'stim_height': 0.6, + 'rsvp_stim_height': 0.6, 'stim_length': 10, 'stim_number': 100, 'stim_jitter': 0.0, 'stim_order': 'random', - 'stim_pos_x': 0.0, - 'stim_pos_y': 0.0, + 'rsvp_stim_pos_x': 0.0, + 'rsvp_stim_pos_y': 0.0, 'stim_space_char': '–', 'summarize_session': False, 'target_color': 'white', 'task_buffer_length': 2, 'task_color': 'white', - 'task_height': 0.1, + 'rsvp_task_height': 0.1, + 'rsvp_task_padding': 0.1, 'task_text': 'HELLO_WORLD', "preview_inquiry_error_prob": 0.05, 'info_pos_x': 0.0, @@ -84,7 +85,7 @@ def setUp(self): 'trigger_type': 'image', } self.parameters = Parameters.from_cast_values(**parameters) - + self.fake = True self.win = mock({'size': [500, 500], 'units': 'height'}) device_spec = DeviceSpec(name='Testing', @@ -108,6 +109,7 @@ def setUp(self): ContentType.EEG: self.eeg_client_mock } }) + self.servers = [mock()] when(self.daq).get_client(ContentType.EEG).thenReturn(self.eeg_client_mock) self.temp_dir = tempfile.mkdtemp() self.model_metadata = mock({ @@ -115,7 +117,7 @@ def setUp(self): 'transform': mock(), 'evidence_type': 'ERP' }) - self.signal_model = mock({'metadata': self.model_metadata}) + self.signal_models = [mock({'metadata': self.model_metadata})] self.language_model = mock() decision_maker = mock() @@ -131,12 +133,17 @@ def setUp(self): when(bcipy.task.paradigm.rsvp.copy_phrase).CopyPhraseWrapper( ...).thenReturn(self.copy_phrase_wrapper) + # mock data for initial series series_gen = mock_inquiry_data() when(self.copy_phrase_wrapper).initialize_series().thenReturn( next(series_gen)) when(TriggerHandler).write().thenReturn() when(TriggerHandler).add_triggers(any()).thenReturn() + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).get_language_model().thenReturn( + self.language_model) + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).get_signal_models().thenReturn( + self.signal_models) def tearDown(self): """Override""" @@ -145,79 +152,70 @@ def tearDown(self): def test_initialize(self): """Test initialization""" + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) RSVPCopyPhraseTask( - win=self.win, - daq=self.daq, parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + fake=self.fake) def test_validate_parameters(self): + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) task = RSVPCopyPhraseTask( - win=self.win, - daq=self.daq, parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + fake=self.fake) task.validate_parameters() def test_validate_parameters_throws_task_exception_missing_parameter(self): parameters = {} + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) with self.assertRaises(TaskConfigurationException): RSVPCopyPhraseTask( - win=self.win, - daq=self.daq, parameters=parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) def test_validate_parameters_throws_task_exception_excess_prestim_length(self): self.parameters['prestim_length'] = 1000 + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) with self.assertRaises(TaskConfigurationException): RSVPCopyPhraseTask( - win=self.win, - daq=self.daq, parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) def test_validate_parameters_throws_task_exception_excess_trial_window(self): self.parameters['trial_window'] = "0.0:1000.0" + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) with self.assertRaises(TaskConfigurationException): RSVPCopyPhraseTask( - win=self.win, - daq=self.daq, parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) @patch('bcipy.task.paradigm.rsvp.copy_phrase.get_user_input') @patch('bcipy.task.paradigm.rsvp.copy_phrase.trial_complete_message') def test_execute_without_inquiry(self, message_mock, user_input_mock): """User should be able to exit the task without viewing any inquiries""" - - task = RSVPCopyPhraseTask(win=self.win, - daq=self.daq, - parameters=self.parameters, + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCopyPhraseTask(parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) user_input_mock.return_value = False @@ -227,7 +225,7 @@ def test_execute_without_inquiry(self, message_mock, verify(self.copy_phrase_wrapper, times=1).initialize_series() verify(self.display, times=0).preview_inquiry() verify(self.display, times=0).do_inquiry() - self.assertEqual(self.temp_dir, result) + self.assertEqual(self.temp_dir, result.save_path) self.assertTrue( Path(task.session_save_location).is_file(), @@ -242,14 +240,12 @@ def test_execute_without_inquiry(self, message_mock, def test_execute_fake_data_single_inquiry(self, process_data_mock, message_mock, user_input_mock): """Test that fake data does not use the decision maker""" - - task = RSVPCopyPhraseTask(win=self.win, - daq=self.daq, - parameters=self.parameters, + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCopyPhraseTask(parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) # Execute a single inquiry then `escape` to stop user_input_mock.side_effect = [True, False] @@ -265,7 +261,7 @@ def test_execute_fake_data_single_inquiry(self, process_data_mock, message_mock, # Assertions verify(self.copy_phrase_wrapper, times=2).initialize_series() verify(self.display, times=1).do_inquiry() - self.assertEqual(self.temp_dir, result) + self.assertEqual(self.temp_dir, result.save_path) self.assertTrue( Path(task.session_save_location).is_file(), @@ -281,13 +277,12 @@ def test_max_inq_len(self, process_data_mock, message_mock, user_input_mock): """Test stoppage criteria for the max inquiry length""" self.parameters['max_inq_len'] = 2 - task = RSVPCopyPhraseTask(win=self.win, - daq=self.daq, - parameters=self.parameters, + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCopyPhraseTask(parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) # Don't provide any `escape` input from the user user_input_mock.return_value = True @@ -302,7 +297,7 @@ def test_max_inq_len(self, process_data_mock, message_mock, # Assertions verify(self.display, times=2).do_inquiry() - self.assertEqual(self.temp_dir, result) + self.assertEqual(self.temp_dir, result.save_path) self.assertTrue( Path(task.session_save_location).is_file(), @@ -321,14 +316,12 @@ def test_spelling_complete(self, process_data_mock, """Test that the task stops when the copy_phrase has been correctly spelled.""" self.parameters['task_text'] = 'Hello' self.parameters['spelled_letters_count'] = 4 - - task = RSVPCopyPhraseTask(win=self.win, - daq=self.daq, - parameters=self.parameters, + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCopyPhraseTask(parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) # Don't provide any `escape` input from the user user_input_mock.return_value = True @@ -343,7 +336,7 @@ def test_spelling_complete(self, process_data_mock, # Assertions verify(self.display, times=1).do_inquiry() - self.assertEqual(self.temp_dir, result) + self.assertEqual(self.temp_dir, result.save_path) self.assertTrue( Path(task.session_save_location).is_file(), @@ -359,25 +352,23 @@ def test_spelled_letters(self): """Spelled letters should reset if count is larger than copy phrase.""" self.parameters['task_text'] = 'Hi' self.parameters['spelled_letters_count'] = 3 - task = RSVPCopyPhraseTask(win=self.win, - daq=self.daq, - parameters=self.parameters, + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCopyPhraseTask(parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) self.assertEqual(task.starting_spelled_letters(), 0) def test_stims_for_eeg(self): """The correct stims should be sent to get_device_data_for_decision""" - task = RSVPCopyPhraseTask(win=self.win, - daq=self.daq, - parameters=self.parameters, + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCopyPhraseTask(parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) timings1 = [['calibration_trigger', 2.0539278959913645], ['+', 3.7769652379938634], ['Y', 4.247819707990857], ['S', 4.46274590199755], ['W', 4.679621118993964], @@ -419,15 +410,14 @@ def test_stims_for_eeg(self): @patch('bcipy.task.paradigm.rsvp.copy_phrase.get_device_data_for_decision') def test_next_letter(self, process_data_mock, message_mock, user_input_mock): + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) """Test that the task stops when the copy_phrase has been correctly spelled.""" self.parameters['task_text'] = 'Hello' - task = RSVPCopyPhraseTask(win=self.win, - daq=self.daq, - parameters=self.parameters, + task = RSVPCopyPhraseTask(parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) task.spelled_text = 'H' self.assertEqual(task.next_target(), 'e') @@ -450,13 +440,12 @@ def test_execute_fake_data_with_preview(self, process_data_mock, message_mock, user_input_mock): """Test that preview is displayed""" self.parameters['show_preview_inquiry'] = True - task = RSVPCopyPhraseTask(win=self.win, - daq=self.daq, - parameters=self.parameters, + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + task = RSVPCopyPhraseTask(parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, - fake=True) + + fake=self.fake) # Execute a single inquiry then `escape` to stop user_input_mock.side_effect = [True, False] @@ -475,7 +464,7 @@ def test_execute_fake_data_with_preview(self, process_data_mock, message_mock, verify(self.copy_phrase_wrapper, times=2).initialize_series() verify(self.display, times=1).do_inquiry() verify(self.copy_phrase_wrapper, times=1).add_evidence(EvidenceType.BTN, ...) - self.assertEqual(self.temp_dir, result) + self.assertEqual(self.temp_dir, result.save_path) @patch('bcipy.task.paradigm.rsvp.copy_phrase.init_evidence_evaluator') @patch('bcipy.task.paradigm.rsvp.copy_phrase.get_user_input') @@ -531,7 +520,8 @@ def test_execute_real_data_single_inquiry(self, process_data_mock, message_mock, when(bcipy.task.paradigm.rsvp.copy_phrase).CopyPhraseWrapper( ...).thenReturn(copy_phrase_wrapper_mock) - + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) # mock data for initial series when(copy_phrase_wrapper_mock).initialize_series().thenReturn( (False, @@ -576,12 +566,9 @@ def test_execute_real_data_single_inquiry(self, process_data_mock, message_mock, 'nontarget', 'nontarget' ])) - task = RSVPCopyPhraseTask(win=self.win, - daq=self.daq, - parameters=self.parameters, + task = RSVPCopyPhraseTask(parameters=self.parameters, file_save=self.temp_dir, - signal_models=[self.signal_model], - language_model=self.language_model, + fake=False) # Execute a single inquiry then `escape` to stop @@ -597,7 +584,7 @@ def test_execute_real_data_single_inquiry(self, process_data_mock, message_mock, verify(copy_phrase_wrapper_mock, times=1).decide(...) verify(self.display, times=0).preview_inquiry() verify(self.display, times=1).do_inquiry() - self.assertEqual(self.temp_dir, result) + self.assertEqual(self.temp_dir, result.save_path) self.assertTrue( Path(task.session_save_location).is_file(), @@ -606,6 +593,62 @@ def test_execute_real_data_single_inquiry(self, process_data_mock, message_mock, session = Session.from_dict(json.load(json_file)) self.assertEqual(1, session.total_number_series) + def test_setup(self): + """Test setup""" + + when(bcipy.task.paradigm.rsvp.copy_phrase).init_acquisition(any(), any(), server=self.fake).thenReturn( + (self.daq, self.servers)) + when(bcipy.task.paradigm.rsvp.copy_phrase).init_display_window(self.parameters).thenReturn( + self.win) + + self.assertFalse(RSVPCopyPhraseTask.initalized) + task = RSVPCopyPhraseTask(parameters=self.parameters, + file_save=self.temp_dir, + + fake=self.fake) + + self.assertTrue(task.initalized) + verify(bcipy.task.paradigm.rsvp.copy_phrase, times=1).init_acquisition( + self.parameters, self.temp_dir, server=self.fake) + verify(bcipy.task.paradigm.rsvp.copy_phrase, times=1).init_display_window( + self.parameters) + self.assertEqual((self.daq, self.servers, self.win), + task.setup(self.parameters, self.temp_dir, self.fake)) + + def test_cleanup(self): + """Test cleanup""" + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).setup(any(), any(), any()).thenReturn( + (self.daq, self.servers, self.win)) + + # Mock the default cleanup + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).write_offset_trigger().thenReturn(None) + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).exit_display().thenReturn(None) + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).save_session_data().thenReturn(None) + when(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask).wait().thenReturn(None) + + # Mock the initialized cleanup + when(self.daq).stop_acquisition().thenReturn(None) + when(self.daq).cleanup().thenReturn(None) + when(self.servers[0]).stop().thenReturn(None) + when(self.win).close().thenReturn(None) + task = RSVPCopyPhraseTask(parameters=self.parameters, + file_save=self.temp_dir, + + fake=self.fake) + # because the task is not initialized via setup, we need to set it to True here + task.initalized = True + + task.cleanup() + + verify(self.daq, times=1).stop_acquisition() + verify(self.daq, times=1).cleanup() + verify(self.servers[0], times=1).stop() + verify(self.win, times=1).close() + verify(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask, times=1).setup(any(), any(), any()) + verify(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask, times=1).write_offset_trigger() + verify(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask, times=1).exit_display() + verify(bcipy.task.paradigm.rsvp.copy_phrase.RSVPCopyPhraseTask, times=1).wait() + def mock_inquiry_data(): """Generator that yields data mocking the copy_phrase_wrapper initialize_series method""" diff --git a/bcipy/tests/test_bci_main.py b/bcipy/tests/test_bci_main.py index edc7b1a2f..76edfa019 100644 --- a/bcipy/tests/test_bci_main.py +++ b/bcipy/tests/test_bci_main.py @@ -1,560 +1,133 @@ -import logging import unittest -from mockito import (any, mock, unstub, verify, verifyNoUnwantedInteractions, - verifyStubbedInvocationsAreUsed, when) +from mockito import any, mock, unstub, verify, when from bcipy import main -from bcipy.config import (DEFAULT_EXPERIMENT_ID, DEFAULT_PARAMETERS_PATH, - STATIC_AUDIO_PATH) -from bcipy.helpers.exceptions import UnregisteredExperimentException -from bcipy.main import _clean_up_session, bci_main, execute_task -from bcipy.task import TaskType +from bcipy.config import DEFAULT_PARAMETERS_PATH +from bcipy.exceptions import (BciPyCoreException, + UnregisteredExperimentException) +from bcipy.main import bci_main +from bcipy.task.orchestrator import SessionOrchestrator -logging.disable(logging.CRITICAL) - -class TestBciMain(unittest.TestCase): - - parameter_location = DEFAULT_PARAMETERS_PATH - data_save_location = '/' - save_location = '/' - parameters = { - 'acq_mode': 'EEG', - 'data_save_loc': data_save_location, - 'log_name': 'test_log', - 'fake_data': False, - 'signal_model_path': '', - 'lm_path': '', - 'alert_sound_file': 'test.wav', - } - system_info = { - 'bcipy_version': 'test_version' - } - user = 'test_user' - task = mock() - task.label = 'RSVP Calibration' - experiment = DEFAULT_EXPERIMENT_ID - alert = False - fake = parameters['fake_data'] - - def tearDown(self) -> None: - verifyStubbedInvocationsAreUsed() - verifyNoUnwantedInteractions() - unstub() - - def test_bci_main_default_experiment(self) -> None: - when(main).validate_experiment(self.experiment).thenReturn(True) - when(main).validate_bcipy_session(self.parameters, self.fake).thenReturn(True) - when(main).load_json_parameters(self.parameter_location, value_cast=True).thenReturn( - self.parameters - ) - when(main).visualize_session_data(self.save_location, self.parameters).thenReturn(None) - when(main).get_system_info().thenReturn(self.system_info) - when(main).init_save_data_structure( - self.data_save_location, - self.user, - self.parameter_location, - task=self.task.label, - experiment_id=self.experiment, - ).thenReturn(self.save_location) - when(main).configure_logger( - self.save_location, - version=self.system_info['bcipy_version'] - ) - when(main).collect_experiment_field_data(self.experiment, self.save_location) - when(main).execute_task( - self.task, - self.parameters, - self.save_location, - self.alert, - self.fake).thenReturn(True) - - response = bci_main(self.parameter_location, self.user, self.task) - self.assertEqual(response, True) - - # validate all the calls happen as expected and the correct # of times - verify(main, times=1).validate_experiment(self.experiment) - verify(main, times=1).validate_bcipy_session(self.parameters, self.fake) - verify(main, times=1).load_json_parameters(self.parameter_location, value_cast=True) - verify(main, times=1).get_system_info() - verify(main, times=1).visualize_session_data(self.save_location, self.parameters) - verify(main, times=1).init_save_data_structure( - self.data_save_location, - self.user, - self.parameter_location, - task=self.task.label, - experiment_id=self.experiment) - verify(main, times=1).configure_logger( - self.save_location, - version=self.system_info['bcipy_version']) - verify(main, times=1).collect_experiment_field_data(self.experiment, self.save_location) - verify(main, times=1).execute_task(self.task, self.parameters, self.save_location, self.alert, self.fake) - - def test_bci_main_invalid_experiment(self) -> None: - experiment = 'does_not_exist' - with self.assertRaises(UnregisteredExperimentException): - bci_main(self.parameter_location, self.user, self.task, experiment) - - def test_invalid_parameter_location(self) -> None: - invalid_parameter_location = 'does/not/exist.json' - when(main).validate_experiment(self.experiment).thenReturn(True) - - with self.assertRaises(FileNotFoundError): - bci_main(invalid_parameter_location, self.user, self.task) - - verify(main, times=1).validate_experiment(self.experiment) - - def test_bci_main_visualize(self) -> None: - """Test bci_main with visualization enabled.""" - when(main).validate_experiment(self.experiment).thenReturn(True) - when(main).validate_bcipy_session(self.parameters, self.fake).thenReturn(True) - when(main).load_json_parameters(self.parameter_location, value_cast=True).thenReturn( - self.parameters - ) - when(main).visualize_session_data(self.save_location, self.parameters).thenReturn(None) - when(main).get_system_info().thenReturn(self.system_info) - when(main).init_save_data_structure( - self.data_save_location, - self.user, - self.parameter_location, - task=self.task.label, - experiment_id=self.experiment, - ).thenReturn(self.save_location) - when(main).configure_logger( - self.save_location, - version=self.system_info['bcipy_version'] - ) - when(main).collect_experiment_field_data(self.experiment, self.save_location) - when(main).execute_task( - self.task, - self.parameters, - self.save_location, - self.alert, - self.fake).thenReturn(True) - - response = bci_main(self.parameter_location, self.user, self.task, visualize=True) - self.assertEqual(response, True) - - # validate all the calls happen as expected and the correct # of times - verify(main, times=1).validate_experiment(self.experiment) - verify(main, times=1).validate_bcipy_session(self.parameters, self.fake) - verify(main, times=1).load_json_parameters(self.parameter_location, value_cast=True) - verify(main, times=1).get_system_info() - verify(main, times=1).visualize_session_data(self.save_location, self.parameters) - verify(main, times=1).init_save_data_structure( - self.data_save_location, - self.user, - self.parameter_location, - task=self.task.label, - experiment_id=self.experiment) - verify(main, times=1).configure_logger( - self.save_location, - version=self.system_info['bcipy_version']) - verify(main, times=1).collect_experiment_field_data(self.experiment, self.save_location) - verify(main, times=1).execute_task(self.task, self.parameters, self.save_location, self.alert, self.fake) - - def test_bci_main_visualize_disabled(self) -> None: - """Test bci_main with visualization disabled.""" - when(main).validate_experiment(self.experiment).thenReturn(True) - when(main).validate_bcipy_session(self.parameters, self.fake).thenReturn(True) - when(main).load_json_parameters(self.parameter_location, value_cast=True).thenReturn( - self.parameters - ) - when(main).get_system_info().thenReturn(self.system_info) - when(main).init_save_data_structure( - self.data_save_location, - self.user, - self.parameter_location, - task=self.task.label, - experiment_id=self.experiment, - ).thenReturn(self.save_location) - when(main).configure_logger( - self.save_location, - version=self.system_info['bcipy_version'] - ) - when(main).collect_experiment_field_data(self.experiment, self.save_location) - when(main).execute_task( - self.task, - self.parameters, - self.save_location, - self.alert, - self.fake).thenReturn(True) - - response = bci_main(self.parameter_location, self.user, self.task, visualize=False) - self.assertEqual(response, True) - - # validate all the calls happen as expected and the correct # of times - verify(main, times=1).validate_experiment(self.experiment) - verify(main, times=1).validate_bcipy_session(self.parameters, self.fake) - verify(main, times=1).load_json_parameters(self.parameter_location, value_cast=True) - verify(main, times=1).get_system_info() - verify(main, times=1).init_save_data_structure( - self.data_save_location, - self.user, - self.parameter_location, - task=self.task.label, - experiment_id=self.experiment) - verify(main, times=1).configure_logger( - self.save_location, - version=self.system_info['bcipy_version']) - verify(main, times=1).collect_experiment_field_data(self.experiment, self.save_location) - verify(main, times=1).execute_task(self.task, self.parameters, self.save_location, self.alert, self.fake) - - def test_bci_main_fake(self) -> None: - """Test bci_main with fake data override.""" - fake = True - when(main).validate_experiment(self.experiment).thenReturn(True) - when(main).validate_bcipy_session(self.parameters, fake).thenReturn(True) - when(main).load_json_parameters(self.parameter_location, value_cast=True).thenReturn( - self.parameters - ) - when(main).visualize_session_data(self.save_location, self.parameters).thenReturn(None) - when(main).get_system_info().thenReturn(self.system_info) - when(main).init_save_data_structure( - self.data_save_location, - self.user, - self.parameter_location, - task=self.task.label, - experiment_id=self.experiment, - ).thenReturn(self.save_location) - when(main).configure_logger( - self.save_location, - version=self.system_info['bcipy_version'] - ) - when(main).collect_experiment_field_data(self.experiment, self.save_location) - when(main).execute_task( - self.task, - self.parameters, - self.save_location, - self.alert, - fake).thenReturn(True) - - response = bci_main(self.parameter_location, self.user, self.task, fake=fake) - self.assertEqual(response, True) - - # validate all the calls happen as expected and the correct # of times - verify(main, times=1).validate_experiment(self.experiment) - verify(main, times=1).validate_bcipy_session(self.parameters, fake) - verify(main, times=1).load_json_parameters(self.parameter_location, value_cast=True) - verify(main, times=1).get_system_info() - verify(main, times=1).visualize_session_data(self.save_location, self.parameters) - verify(main, times=1).init_save_data_structure( - self.data_save_location, - self.user, - self.parameter_location, - task=self.task.label, - experiment_id=self.experiment) - verify(main, times=1).configure_logger( - self.save_location, - version=self.system_info['bcipy_version']) - verify(main, times=1).collect_experiment_field_data(self.experiment, self.save_location) - verify(main, times=1).execute_task(self.task, self.parameters, self.save_location, self.alert, fake) - - -class TestCleanUpSession(unittest.TestCase): - - def tearDown(self) -> None: - unstub() - - def test_clean_up_no_server(self) -> None: - daq = mock() - display = mock() - servers = [] - - # mock the required daq calls - when(daq).stop_acquisition() - when(daq).cleanup() - - # mock the required display call - when(display).close() - - response = _clean_up_session(display, daq, servers) - self.assertTrue(response) - - verify(daq, times=1).stop_acquisition() - verify(daq, times=1).cleanup() - verify(display, times=1).close() - - def test_clean_up_with_server(self) -> None: - daq = mock() - display = mock() - server = mock() - servers = [server] - - # mock the required daq calls - when(daq).stop_acquisition() - when(daq).cleanup() - - # mock the required display call - when(display).close() - - # mock the required server call - when(server).stop() - - response = _clean_up_session(display, daq, servers) - self.assertTrue(response) - - verify(daq, times=1).stop_acquisition() - verify(daq, times=1).cleanup() - verify(display, times=1).close() - verify(server, times=1).stop() - - -class TestExecuteTask(unittest.TestCase): +class TestBCIMain(unittest.TestCase): def setUp(self) -> None: + self.parameters_path = DEFAULT_PARAMETERS_PATH self.parameters = { - 'acq_mode': 'EEG', - 'k_folds': 10, - 'is_txt_stim': True, - 'signal_model_path': '', - 'alert_sound_file': 'test.wav', + 'fake_data': False, + 'parameter_location': False, + 'visualize': False, + 'data_save_loc': 'data/', } - self.save_folder = '/' + self.user = 'test user' + self.experiment = 'default' self.alert = False - self.task = TaskType(1) - self.fake = True - self.display_mock = mock() - self.daq = mock() - self.eeg_client = mock() - when(self.daq).get_client('EEG').thenReturn(self.eeg_client) - self.server = [mock()] + self.visualize = False + self.fake = False + self.logger = mock() + self.logger.info = lambda x: x def tearDown(self) -> None: unstub() - def test_execute_task_fake_data(self) -> None: - response = (self.daq, self.server) - when(main).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake - ).thenReturn(response) - when(main).init_display_window(self.parameters).thenReturn(self.display_mock) - when(main).print_message(self.display_mock, any()) - when(main).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=None, - signal_models=[], - fake=self.fake, - ) - when(main)._clean_up_session(self.display_mock, self.daq, self.server) - - execute_task(self.task, self.parameters, self.save_folder, self.alert, self.fake) - - verify(main, times=1).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake) - verify(main, times=1).init_display_window(self.parameters) - verify(main, times=1).print_message(self.display_mock, any()) - verify(main, times=1).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=None, - signal_models=[], - fake=self.fake, - ) - verify(main, times=1)._clean_up_session(self.display_mock, self.daq, self.server) - - def test_execute_task_real_data(self) -> None: - self.fake = False - response = (self.daq, self.server) - when(main).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake - ).thenReturn(response) - when(main).init_display_window(self.parameters).thenReturn(self.display_mock) - when(main).print_message(self.display_mock, any()) - when(main).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=None, - signal_models=[], - fake=self.fake, - ) - when(main)._clean_up_session(self.display_mock, self.daq, self.server) - - execute_task(self.task, self.parameters, self.save_folder, self.alert, self.fake) - - verify(main, times=1).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake) - verify(main, times=1).init_display_window(self.parameters) - verify(main, times=1).print_message(self.display_mock, any()) - verify(main, times=1).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=None, - signal_models=[], - fake=self.fake, - ) - verify(main, times=1)._clean_up_session(self.display_mock, self.daq, self.server) - - def test_execute_task_non_calibration_real_data(self) -> None: - self.fake = False - model_path = "data/mycalib/" - self.parameters['signal_model_path'] = model_path - self.task = TaskType(2) - signal_model = mock() - language_model = mock() - file_name = 'test' - load_model_response = [signal_model] - eeg_response = (self.daq, self.server) - when(main).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake).thenReturn(eeg_response) - when(main).init_display_window(self.parameters).thenReturn(self.display_mock) - when(main).print_message(self.display_mock, any()) - when(main).choose_signal_models(['EEG']).thenReturn(load_model_response) - when(main).init_language_model(self.parameters).thenReturn(language_model) - when(main).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=language_model, - signal_models=[signal_model], - fake=self.fake, - ) - when(main)._clean_up_session(self.display_mock, self.daq, self.server) - - execute_task(self.task, self.parameters, self.save_folder, self.alert, self.fake) - - verify(main, times=1).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake) - verify(main, times=1).init_display_window(self.parameters) - verify(main, times=1).print_message(self.display_mock, any()) - verify(main, times=1).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=language_model, - signal_models=[signal_model], - fake=self.fake, - ) - verify(main, times=1).choose_signal_models(['EEG']) - verify(main, times=1)._clean_up_session(self.display_mock, self.daq, self.server) - - def test_execute_language_model_enabled(self) -> None: - self.fake = False - self.task = TaskType(2) # set to a noncalibration task - - # mock the signal and language models - signal_model = mock() - file_name = 'test' - language_model = mock() - load_model_response = [signal_model] - - # mock the behavior of execute task - eeg_response = (self.daq, self.server) - when(main).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake).thenReturn(eeg_response) - when(main).init_language_model(self.parameters).thenReturn(language_model) - when(main).init_display_window(self.parameters).thenReturn(self.display_mock) - when(main).print_message(self.display_mock, any()) - when(main).choose_signal_models(['EEG']).thenReturn(load_model_response) - when(main).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=language_model, - signal_models=[signal_model], - fake=self.fake, - ) - when(main)._clean_up_session(self.display_mock, self.daq, self.server) - - execute_task(self.task, self.parameters, self.save_folder, self.alert, self.fake) - - verify(main, times=1).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake) - verify(main, times=1).init_display_window(self.parameters) - verify(main, times=1).print_message(self.display_mock, any()) - verify(main, times=1).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=language_model, - signal_models=[signal_model], - fake=self.fake, - ) - verify(main, times=1).choose_signal_models(['EEG']) - verify(main, times=1).init_language_model(self.parameters) - verify(main, times=1)._clean_up_session(self.display_mock, self.daq, self.server) - - def test_execute_with_alert_enabled(self): - expected_alert_path = f"{STATIC_AUDIO_PATH}/{self.parameters['alert_sound_file']}" - response = (self.daq, self.server) - when(main).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake, - ).thenReturn(response) - when(main).init_display_window(self.parameters).thenReturn(self.display_mock) - when(main).print_message(self.display_mock, any()) - when(main).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=None, - signal_models=[], - fake=self.fake, - ) - when(main)._clean_up_session(self.display_mock, self.daq, self.server) - when(main).play_sound(expected_alert_path) - - execute_task(self.task, self.parameters, self.save_folder, True, self.fake) - - verify(main, times=1).init_eeg_acquisition( - self.parameters, - self.save_folder, - server=self.fake) - verify(main, times=1).init_display_window(self.parameters) - verify(main, times=1).print_message(self.display_mock, any()) - verify(main, times=1).start_task( - self.display_mock, - self.daq, - self.task, - self.parameters, - self.save_folder, - language_model=None, - signal_models=[], - fake=self.fake, - ) - verify(main, times=1)._clean_up_session(self.display_mock, self.daq, self.server) - verify(main, times=1).play_sound(expected_alert_path) + def test_bci_main_fails_without_experiment_or_task(self) -> None: + with self.assertRaises(BciPyCoreException): + bci_main( + parameter_location=self.parameters_path, + user=self.user, + alert=self.alert, + visualize=self.visualize, + fake=self.fake + ) + + def test_bcipy_main_fails_with_invalid_experiment(self) -> None: + when(main).validate_bcipy_session(any(), any()).thenRaise(UnregisteredExperimentException) + with self.assertRaises(UnregisteredExperimentException): + bci_main( + parameter_location=self.parameters_path, + user=self.user, + experiment_id='invalid_experiment', + alert=self.alert, + visualize=self.visualize, + fake=self.fake + ) + + def test_bci_main_runs_with_valid_experiment(self) -> None: + when(main).validate_bcipy_session(any(), any()).thenReturn(True) # Mock the validate_bcipy_session function + when(main).load_json_parameters( + any(), value_cast=any()).thenReturn( + self.parameters) # Mock the load_json_parameters function + when(SessionOrchestrator).get_system_info().thenReturn(None) + when(SessionOrchestrator)._init_orchestrator_save_folder(any()).thenReturn(None) + when(SessionOrchestrator)._init_orchestrator_logger(any()).thenReturn(self.logger) + when(SessionOrchestrator).initialize_copy_phrases().thenReturn(None) + when(SessionOrchestrator).add_tasks(any()).thenReturn(None) + when(SessionOrchestrator).execute().thenReturn(None) + + bci_main( + parameter_location=self.parameters_path, + user=self.user, + experiment_id=self.experiment, + alert=self.alert, + visualize=self.visualize, + fake=self.fake + ) + verify(SessionOrchestrator, times=1).add_tasks(any()) + verify(SessionOrchestrator, times=1).execute() + verify(SessionOrchestrator, times=1).initialize_copy_phrases() + verify(SessionOrchestrator, times=1)._init_orchestrator_logger(any()) + verify(SessionOrchestrator, times=1)._init_orchestrator_save_folder(any()) + verify(main, times=1).load_json_parameters(any(), value_cast=any()) + verify(SessionOrchestrator, times=1).get_system_info() + + def test_bci_main_runs_with_valid_task(self) -> None: + when(main).validate_bcipy_session(any(), any()).thenReturn(True) + when(main).load_json_parameters(any(), value_cast=any()).thenReturn(self.parameters) + when(SessionOrchestrator).get_system_info().thenReturn(None) + when(SessionOrchestrator)._init_orchestrator_save_folder(any()).thenReturn(None) + when(SessionOrchestrator)._init_orchestrator_logger(any()).thenReturn(self.logger) + when(SessionOrchestrator).initialize_copy_phrases().thenReturn(None) + when(SessionOrchestrator).add_tasks(any()).thenReturn(None) + when(SessionOrchestrator).execute().thenReturn(None) + + bci_main( + parameter_location=self.parameters_path, + user=self.user, + task='RSVP Calibration', + alert=self.alert, + visualize=self.visualize, + fake=self.fake + ) + + verify(SessionOrchestrator, times=1).add_tasks(any()) + verify(SessionOrchestrator, times=1).execute() + verify(SessionOrchestrator, times=1).initialize_copy_phrases() + verify(SessionOrchestrator, times=1)._init_orchestrator_logger(any()) + verify(SessionOrchestrator, times=1)._init_orchestrator_save_folder(any()) + verify(main, times=1).load_json_parameters(any(), value_cast=any()) + verify(SessionOrchestrator, times=1).get_system_info() + + def test_bci_main_returns_false_with_orchestrator_execute_exception(self): + when(main).validate_bcipy_session(any(), any()).thenReturn(True) + when(main).load_json_parameters(any(), value_cast=any()).thenReturn(self.parameters) + when(SessionOrchestrator).get_system_info().thenReturn(None) + when(SessionOrchestrator)._init_orchestrator_save_folder(any()).thenReturn(None) + when(SessionOrchestrator)._init_orchestrator_logger(any()).thenReturn(self.logger) + when(SessionOrchestrator).initialize_copy_phrases().thenReturn(None) + when(SessionOrchestrator).add_tasks(any()).thenReturn(None) + when(SessionOrchestrator).execute().thenRaise(Exception) + + response = bci_main( + parameter_location=self.parameters_path, + user=self.user, + task='RSVP Calibration', + alert=self.alert, + visualize=self.visualize, + fake=self.fake + ) + + self.assertFalse(response) if __name__ == '__main__': diff --git a/requirements.txt b/requirements.txt index f3f9df88e..f4a764fdd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ construct==2.8.14 mne==1.5.0 pyo==1.0.5 pyglet<=1.5.27,>=1.4 -PsychoPy==2023.2.1 +PsychoPy==2024.2.1 openpyxl==3.1.2 numpy==1.24.4 sounddevice==0.4.4 @@ -13,15 +13,15 @@ SoundFile==0.12.1 scipy==1.10.1 scikit-learn==1.2.2 seaborn==0.9.0 -matplotlib==3.7.2 +matplotlib==3.7.5 pylsl==1.16.2 -pandas==1.5.3 +pandas==2.0.3 psutil==5.7.2 Pillow==9.4.0 py-cpuinfo==9.0.0 pyedflib==0.1.34 -PyQt6==6.6.0 -PyQt6-Qt6==6.6.0 +pyopengl==3.1.7 +PyQt6==6.7.1 pywavelets==1.4.1 tqdm==4.62.2 reportlab==4.2.0 diff --git a/scripts/shell/run_gui.sh b/scripts/shell/run_gui.sh deleted file mode 100644 index 26065ada7..000000000 --- a/scripts/shell/run_gui.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -###### RUN BCI. ####### -# cd to path of bcipy code -# cd bcipy - -# Execute the gui code -python bcipy/gui/BCInterface.py diff --git a/setup.py b/setup.py index 7d0c34f83..76a95b252 100644 --- a/setup.py +++ b/setup.py @@ -107,7 +107,11 @@ def run(self): )), entry_points={ 'console_scripts': - ['bcipy = bcipy.main:bcipy_main', 'bcipy-sim = bcipy.simulator'], + [ + 'bcipy = bcipy.main:bcipy_main', + 'bcipy-erp-viz = bcipy.helpers.visualization:erp', + 'bcipy-sim = bcipy.simulator', + "bcipy-train = bcipy.signal.model.offline_analysis:main"], }, install_requires=REQUIRED, include_package_data=True,