Skip to content

Commit

Permalink
Merge branch 'main' into merging_units
Browse files Browse the repository at this point in the history
  • Loading branch information
yger authored Jun 27, 2024
2 parents f7bec43 + 0d99342 commit 1b46e99
Show file tree
Hide file tree
Showing 4 changed files with 245 additions and 35 deletions.
118 changes: 118 additions & 0 deletions .github/determine_testing_environment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
from pathlib import Path
import argparse
import os


# We get the list of files change as an input
parser = argparse.ArgumentParser()
parser.add_argument("changed_files_in_the_pull_request", nargs="*", help="List of changed files")
args = parser.parse_args()

changed_files_in_the_pull_request = args.changed_files_in_the_pull_request
changed_files_in_the_pull_request_paths = [Path(file) for file in changed_files_in_the_pull_request]

# We assume nothing has been changed

core_changed = False
pyproject_toml_changed = False
neobaseextractor_changed = False
extractors_changed = False
plexon2_changed = False
preprocessing_changed = False
postprocessing_changed = False
qualitymetrics_changed = False
sorters_changed = False
sorters_external_changed = False
sorters_internal_changed = False
comparison_changed = False
curation_changed = False
widgets_changed = False
exporters_changed = False
sortingcomponents_changed = False
generation_changed = False


for changed_file in changed_files_in_the_pull_request_paths:

file_is_in_src = changed_file.parts[0] == "src"

if not file_is_in_src:

if changed_file.name == "pyproject.toml":
pyproject_toml_changed = True

else:
if changed_file.name == "neobaseextractor.py":
neobaseextractor_changed = True
elif changed_file.name == "plexon2.py":
extractors_changed = True
elif "core" in changed_file.parts:
conditions_changed = True
elif "extractors" in changed_file.parts:
extractors_changed = True
elif "preprocessing" in changed_file.parts:
preprocessing_changed = True
elif "postprocessing" in changed_file.parts:
postprocessing_changed = True
elif "qualitymetrics" in changed_file.parts:
qualitymetrics_changed = True
elif "comparison" in changed_file.parts:
comparison_changed = True
elif "curation" in changed_file.parts:
curation_changed = True
elif "widgets" in changed_file.parts:
widgets_changed = True
elif "exporters" in changed_file.parts:
exporters_changed = True
elif "sortingcomponents" in changed_file.parts:
sortingcomponents_changed = True
elif "generation" in changed_file.parts:
generation_changed = True
elif "sorters" in changed_file.parts:
if "external" in changed_file.parts:
sorters_external_changed = True
elif "internal" in changed_file.parts:
sorters_internal_changed = True
else:
sorters_changed = True


run_everything = core_changed or pyproject_toml_changed or neobaseextractor_changed
run_generation_tests = run_everything or generation_changed
run_extractor_tests = run_everything or extractors_changed
run_preprocessing_tests = run_everything or preprocessing_changed
run_postprocessing_tests = run_everything or postprocessing_changed
run_qualitymetrics_tests = run_everything or qualitymetrics_changed
run_curation_tests = run_everything or curation_changed
run_sortingcomponents_tests = run_everything or sortingcomponents_changed

run_comparison_test = run_everything or run_generation_tests or comparison_changed
run_widgets_test = run_everything or run_qualitymetrics_tests or run_preprocessing_tests or widgets_changed
run_exporters_test = run_everything or run_widgets_test or exporters_changed

run_sorters_test = run_everything or sorters_changed
run_internal_sorters_test = run_everything or run_sortingcomponents_tests or sorters_internal_changed

install_plexon_dependencies = plexon2_changed

environment_varaiables_to_add = {
"RUN_EXTRACTORS_TESTS": run_extractor_tests,
"RUN_PREPROCESSING_TESTS": run_preprocessing_tests,
"RUN_POSTPROCESSING_TESTS": run_postprocessing_tests,
"RUN_QUALITYMETRICS_TESTS": run_qualitymetrics_tests,
"RUN_CURATION_TESTS": run_curation_tests,
"RUN_SORTINGCOMPONENTS_TESTS": run_sortingcomponents_tests,
"RUN_GENERATION_TESTS": run_generation_tests,
"RUN_COMPARISON_TESTS": run_comparison_test,
"RUN_WIDGETS_TESTS": run_widgets_test,
"RUN_EXPORTERS_TESTS": run_exporters_test,
"RUN_SORTERS_TESTS": run_sorters_test,
"RUN_INTERNAL_SORTERS_TESTS": run_internal_sorters_test,
"INSTALL_PLEXON_DEPENDENCIES": install_plexon_dependencies,
}

# Write the conditions to the GITHUB_ENV file
env_file = os.getenv("GITHUB_ENV")
with open(env_file, "a") as f:
for key, value in environment_varaiables_to_add.items():
f.write(f"{key}={value}\n")
150 changes: 116 additions & 34 deletions .github/workflows/all-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,64 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
# cache: 'pip' # caching pip dependencies

- name: Get current hash (SHA) of the ephy_testing_data repo
id: repo_hash
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v41

- name: List all changed files
shell: bash
env:
ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
run: |
for file in ${ALL_CHANGED_FILES}; do
echo "$file was changed"
done
- name: Set testing environment # This decides which tests are run and whether to install especial dependencies
shell: bash
run: |
changed_files="${{ steps.changed-files.outputs.all_changed_files }}"
python .github/determine_testing_environment.py $changed_files
- name: Display testing environment
shell: bash
run: |
echo "RUN_EXTRACTORS_TESTS=${RUN_EXTRACTORS_TESTS}"
echo "RUN_PREPROCESSING_TESTS=${RUN_PREPROCESSING_TESTS}"
echo "RUN_POSTPROCESSING_TESTS=${RUN_POSTPROCESSING_TESTS}"
echo "RUN_QUALITYMETRICS_TESTS=${RUN_QUALITYMETRICS_TESTS}"
echo "RUN_CURATION_TESTS=${RUN_CURATION_TESTS}"
echo "RUN_SORTINGCOMPONENTS_TESTS=${RUN_SORTINGCOMPONENTS_TESTS}"
echo "RUN_GENERATION_TESTS=${RUN_GENERATION_TESTS}"
echo "RUN_COMPARISON_TESTS=${RUN_COMPARISON_TESTS}"
echo "RUN_WIDGETS_TESTS=${RUN_WIDGETS_TESTS}"
echo "RUN_EXPORTERS_TESTS=${RUN_EXPORTERS_TESTS}"
echo "RUN_SORTERS_TESTS=${RUN_SORTERS_TESTS}"
echo "RUN_INTERNAL_SORTERS_TESTS=${RUN_INTERNAL_SORTERS_TESTS}"
echo "INSTALL_PLEXON_DEPENDENCIES=${INSTALL_PLEXON_DEPENDENCIES}"
- name: Install packages
run: |
echo "dataset_hash=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)"
echo "dataset_hash=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)" >> $GITHUB_OUTPUT
pip install -e .[test_core]
shell: bash

- name: Test core
run: pytest -m "core"
shell: bash

- name: Install Other Testing Dependencies
run: |
pip install -e .[test]
pip install tabulate
pip install pandas
shell: bash

- name: Get current hash (SHA) of the ephy_testing_data repo
shell: bash
id: repo_hash
run: echo "dataset_hash=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)" >> $GITHUB_OUTPUT

- name: Cache datasets
id: cache-datasets
uses: actions/cache/restore@v4
Expand All @@ -48,82 +98,114 @@ jobs:
key: ${{ runner.os }}-datasets-${{ steps.repo_hash.outputs.dataset_hash }}
restore-keys: ${{ runner.os }}-datasets

- name: Install packages
run: |
git config --global user.email "[email protected]"
git config --global user.name "CI Almighty"
pip install -e .[test,extractors,streaming_extractors,full]
pip install tabulate
- name: Install git-annex
shell: bash

- name: Installad datalad
if: env.RUN_EXTRACTORS_TESTS == 'true'
run: |
pip install datalad-installer
if [ ${{ runner.os }} = 'Linux' ]; then
datalad-installer --sudo ok git-annex --method datalad/packages
wget https://downloads.kitenet.net/git-annex/linux/current/git-annex-standalone-amd64.tar.gz
mkdir /home/runner/work/installation
mv git-annex-standalone-amd64.tar.gz /home/runner/work/installation/
workdir=$(pwd)
cd /home/runner/work/installation
tar xvzf git-annex-standalone-amd64.tar.gz
echo "$(pwd)/git-annex.linux" >> $GITHUB_PATH
cd $workdir
elif [ ${{ runner.os }} = 'macOS' ]; then
datalad-installer --sudo ok git-annex --method brew
elif [ ${{ runner.os }} = 'Windows' ]; then
datalad-installer --sudo ok git-annex --method datalad/git-annex:release
fi
pip install datalad
git config --global filter.annex.process "git-annex filter-process" # recommended for efficiency
shell: bash
- name: Set execute permissions on run_tests.sh
run: chmod +x .github/run_tests.sh
shell: bash
- name: Test core
run: pytest -m "core"
- name: Set execute permissions on run_tests.sh
shell: bash
run: chmod +x .github/run_tests.sh

- name: Test extractors
shell: bash
env:
HDF5_PLUGIN_PATH: ${{ github.workspace }}/hdf5_plugin_path_maxwell
run: pytest -m "extractors"
shell: bash
if: env.RUN_EXTRACTORS_TESTS == 'true'
run: |
pip install -e .[extractors,streaming_extractors]
./.github/run_tests.sh "extractors and not streaming_extractors" --no-virtual-env
- name: Test preprocessing
run: ./.github/run_tests.sh "preprocessing and not deepinterpolation" --no-virtual-env
shell: bash
if: env.RUN_PREPROCESSING_TESTS == 'true'
run: |
pip install -e .[preprocessing]
./.github/run_tests.sh "preprocessing and not deepinterpolation" --no-virtual-env
- name: Test postprocessing
run: ./.github/run_tests.sh postprocessing --no-virtual-env
shell: bash
if: env.RUN_POSTPROCESSING_TESTS == 'true'
run: |
pip install -e .[full]
./.github/run_tests.sh postprocessing --no-virtual-env
- name: Test quality metrics
run: ./.github/run_tests.sh qualitymetrics --no-virtual-env
shell: bash
if: env.RUN_QUALITYMETRICS_TESTS == 'true'
run: |
pip install -e .[qualitymetrics]
./.github/run_tests.sh qualitymetrics --no-virtual-env
- name: Test comparison
run: ./.github/run_tests.sh comparison --no-virtual-env
shell: bash
if: env.RUN_COMPARISON_TESTS == 'true'
run: |
pip install -e .[full]
./.github/run_tests.sh comparison --no-virtual-env
- name: Test core sorters
run: ./.github/run_tests.sh sorters --no-virtual-env
shell: bash
if: env.RUN_SORTERS_TESTS == 'true'
run: |
pip install -e .[full]
./.github/run_tests.sh sorters --no-virtual-env
- name: Test internal sorters
run: ./.github/run_tests.sh sorters_internal --no-virtual-env
shell: bash
if: env.RUN_INTERNAL_SORTERS_TESTS == 'true'
run: |
pip install -e .[full]
./.github/run_tests.sh sorters_internal --no-virtual-env
- name: Test curation
run: ./.github/run_tests.sh curation --no-virtual-env
shell: bash
if: env.RUN_CURATION_TESTS == 'true'
run: |
pip install -e .[full]
./.github/run_tests.sh curation --no-virtual-env
- name: Test widgets
run: ./.github/run_tests.sh widgets --no-virtual-env
shell: bash
if: env.RUN_WIDGETS_TESTS == 'true'
run: |
pip install -e .[full]
./.github/run_tests.sh widgets --no-virtual-env
- name: Test exporters
run: ./.github/run_tests.sh exporters --no-virtual-env
shell: bash
if: env.RUN_EXPORTERS_TESTS == 'true'
run: |
pip install -e .[full]
./.github/run_tests.sh exporters --no-virtual-env
- name: Test sortingcomponents
run: ./.github/run_tests.sh sortingcomponents --no-virtual-env
shell: bash
if: env.RUN_SORTINGCOMPONENTS_TESTS == 'true'
run: |
pip install -e .[full]
./.github/run_tests.sh sortingcomponents --no-virtual-env
- name: Test generation
run: ./.github/run_tests.sh generation --no-virtual-env
shell: bash
if: env.RUN_GENERATION_TESTS == 'true'
run: |
pip install -e .[full]
./.github/run_tests.sh generation --no-virtual-env
5 changes: 5 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,11 @@ streaming_extractors = [
"s3fs"
]

preprocessing = [
"scipy",
]


full = [
"h5py",
"pandas",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,12 @@ def test_compare_multiple_templates():
duration = 60
num_channels = 8

rec, sort = generate_ground_truth_recording(durations=[duration], num_channels=num_channels)
seed = 0
rec, sort = generate_ground_truth_recording(
durations=[duration],
num_channels=num_channels,
seed=seed,
)

# split recording in 3 equal slices
fs = rec.get_sampling_frequency()
Expand Down

0 comments on commit 1b46e99

Please sign in to comment.