Skip to content

Commit

Permalink
Merge branch 'main' of github.com:spikeinterface/spikeinterface into …
Browse files Browse the repository at this point in the history
…meta_merging_sc2
  • Loading branch information
yger committed Jun 26, 2024
2 parents 30f3617 + 921ec82 commit 4db85c7
Show file tree
Hide file tree
Showing 15 changed files with 401 additions and 124 deletions.
7 changes: 6 additions & 1 deletion .github/run_tests.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
#!/bin/bash

MARKER=$1
NOVIRTUALENV=$2

# Check if the second argument is provided and if it is equal to --no-virtual-env
if [ -z "$NOVIRTUALENV" ] || [ "$NOVIRTUALENV" != "--no-virtual-env" ]; then
source $GITHUB_WORKSPACE/test_env/bin/activate
fi

source $GITHUB_WORKSPACE/test_env/bin/activate
pytest -m "$MARKER" -vv -ra --durations=0 --durations-min=0.001 | tee report.txt; test ${PIPESTATUS[0]} -eq 0 || exit 1
echo "# Timing profile of ${MARKER}" >> $GITHUB_STEP_SUMMARY
python $GITHUB_WORKSPACE/.github/build_job_summary.py report.txt >> $GITHUB_STEP_SUMMARY
Expand Down
129 changes: 129 additions & 0 deletions .github/workflows/all-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
name: Complete tests

on:
workflow_dispatch:
schedule:
- cron: "0 12 * * 0" # Weekly on Sunday at noon UTC
pull_request:
types: [synchronize, opened, reopened]
branches:
- main

env:
KACHERY_CLOUD_CLIENT_ID: ${{ secrets.KACHERY_CLOUD_CLIENT_ID }}
KACHERY_CLOUD_PRIVATE_KEY: ${{ secrets.KACHERY_CLOUD_PRIVATE_KEY }}

concurrency: # Cancel previous workflows on the same pull request
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

jobs:
run:
name: ${{ matrix.os }} Python ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.12"] # Lower and higher versions we support
os: [macos-13, windows-latest, ubuntu-latest]
steps:
- uses: actions/checkout@v4
- name: Setup Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
# cache: 'pip' # caching pip dependencies

- name: Get current hash (SHA) of the ephy_testing_data repo
id: repo_hash
run: |
echo "dataset_hash=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)"
echo "dataset_hash=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)" >> $GITHUB_OUTPUT
shell: bash
- name: Cache datasets
id: cache-datasets
uses: actions/cache/restore@v4
with:
path: ~/spikeinterface_datasets
key: ${{ runner.os }}-datasets-${{ steps.repo_hash.outputs.dataset_hash }}
restore-keys: ${{ runner.os }}-datasets

- name: Install packages
run: |
git config --global user.email "[email protected]"
git config --global user.name "CI Almighty"
pip install -e .[test,extractors,streaming_extractors,full]
pip install tabulate
shell: bash

- name: Installad datalad
run: |
pip install datalad-installer
if [ ${{ runner.os }} = 'Linux' ]; then
datalad-installer --sudo ok git-annex --method datalad/packages
elif [ ${{ runner.os }} = 'macOS' ]; then
datalad-installer --sudo ok git-annex --method brew
elif [ ${{ runner.os }} = 'Windows' ]; then
datalad-installer --sudo ok git-annex --method datalad/git-annex:release
fi
pip install datalad
git config --global filter.annex.process "git-annex filter-process" # recommended for efficiency
shell: bash

- name: Set execute permissions on run_tests.sh
run: chmod +x .github/run_tests.sh
shell: bash

- name: Test core
run: pytest -m "core"
shell: bash

- name: Test extractors
env:
HDF5_PLUGIN_PATH: ${{ github.workspace }}/hdf5_plugin_path_maxwell
run: pytest -m "extractors"
shell: bash

- name: Test preprocessing
run: ./.github/run_tests.sh "preprocessing and not deepinterpolation" --no-virtual-env
shell: bash

- name: Test postprocessing
run: ./.github/run_tests.sh postprocessing --no-virtual-env
shell: bash

- name: Test quality metrics
run: ./.github/run_tests.sh qualitymetrics --no-virtual-env
shell: bash

- name: Test comparison
run: ./.github/run_tests.sh comparison --no-virtual-env
shell: bash

- name: Test core sorters
run: ./.github/run_tests.sh sorters --no-virtual-env
shell: bash

- name: Test internal sorters
run: ./.github/run_tests.sh sorters_internal --no-virtual-env
shell: bash

- name: Test curation
run: ./.github/run_tests.sh curation --no-virtual-env
shell: bash

- name: Test widgets
run: ./.github/run_tests.sh widgets --no-virtual-env
shell: bash

- name: Test exporters
run: ./.github/run_tests.sh exporters --no-virtual-env
shell: bash

- name: Test sortingcomponents
run: ./.github/run_tests.sh sortingcomponents --no-virtual-env
shell: bash

- name: Test generation
run: ./.github/run_tests.sh generation --no-virtual-env
shell: bash
68 changes: 25 additions & 43 deletions .github/workflows/caches_cron_job.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,64 +2,35 @@ name: Create caches for gin ecephys data and virtual env

on:
workflow_dispatch:
push: # When someting is pushed into main this checks if caches need to re-created
push: # When something is pushed into main this checks if caches need to be re-created
branches:
- main
schedule:
- cron: "0 12 * * *" # Daily at noon UTC

jobs:



create-virtual-env-cache-if-missing:
name: Caching virtual env
runs-on: "ubuntu-latest"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Get current year-month
id: date
run: |
echo "date=$(date +'%Y-%m')" >> $GITHUB_OUTPUT
- name: Get current dependencies hash
id: dependencies
run: |
echo "hash=${{hashFiles('**/pyproject.toml')}}" >> $GITHUB_OUTPUT
- uses: actions/cache@v4
id: cache-venv
with:
path: ${{ github.workspace }}/test_env
key: ${{ runner.os }}-venv-${{ steps.dependencies.outputs.hash }}-${{ steps.date.outputs.date }}
lookup-only: 'true' # Avoids downloading the data, saving behavior is not affected.
- name: Cache found?
run: echo "Cache-hit == ${{steps.cache-venv.outputs.cache-hit == 'true'}}"
- name: Create the virtual environment to be cached
if: steps.cache-venv.outputs.cache-hit != 'true'
uses: ./.github/actions/build-test-environment




create-gin-data-cache-if-missing:
name: Caching data env
runs-on: "ubuntu-latest"
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/setup-python@v5
with:
python-version: '3.10'
python-version: '3.11'
- name: Create the directory to store the data
run: |
mkdir --parents --verbose $HOME/spikeinterface_datasets/ephy_testing_data/
chmod -R 777 $HOME/spikeinterface_datasets
ls -l $HOME/spikeinterface_datasets
mkdir -p ~/spikeinterface_datasets/ephy_testing_data/
ls -l ~/spikeinterface_datasets
shell: bash
- name: Get current hash (SHA) of the ephy_testing_data repo
id: repo_hash
run: |
echo "dataset_hash=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)"
echo "dataset_hash=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)" >> $GITHUB_OUTPUT
shell: bash
- uses: actions/cache@v4
id: cache-datasets
with:
Expand All @@ -68,31 +39,42 @@ jobs:
lookup-only: 'true' # Avoids downloading the data, saving behavior is not affected.
- name: Cache found?
run: echo "Cache-hit == ${{steps.cache-datasets.outputs.cache-hit == 'true'}}"
shell: bash
- name: Installing datalad and git-annex
if: steps.cache-datasets.outputs.cache-hit != 'true'
run: |
git config --global user.email "[email protected]"
git config --global user.name "CI Almighty"
python -m pip install -U pip # Official recommended way
pip install datalad-installer
datalad-installer --sudo ok git-annex --method datalad/packages
if [ ${{ runner.os }} == 'Linux' ]; then
datalad-installer --sudo ok git-annex --method datalad/packages
elif [ ${{ runner.os }} == 'macOS' ]; then
datalad-installer --sudo ok git-annex --method brew
elif [ ${{ runner.os }} == 'Windows' ]; then
datalad-installer --sudo ok git-annex --method datalad/git-annex:release
fi
pip install datalad
git config --global filter.annex.process "git-annex filter-process" # recommended for efficiency
shell: bash
- name: Download dataset
if: steps.cache-datasets.outputs.cache-hit != 'true'
run: |
datalad install --recursive --get-data https://gin.g-node.org/NeuralEnsemble/ephy_testing_data
shell: bash
- name: Move the downloaded data to the right directory
if: steps.cache-datasets.outputs.cache-hit != 'true'
run: |
mv --force ./ephy_testing_data $HOME/spikeinterface_datasets/
mv ./ephy_testing_data ~/spikeinterface_datasets/
shell: bash
- name: Show size of the cache to assert data is downloaded
run: |
cd $HOME
cd ~
pwd
du -hs spikeinterface_datasets # Should show the size of ephy_testing_data
cd spikeinterface_datasets
pwd
ls -lh # Should show ephy_testing_data
cd ephy_testing_data
ls -lh
shell: bash
7 changes: 3 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,9 @@ test = [

# for sortingview backend
"sortingview",

# recent datalad need a too recent version for git-annex
# so we use an old one here
"datalad==0.16.2",
# Download data
"pooch>=1.8.2",
"datalad>=1.0.2",

## install tridesclous for testing ##
"tridesclous>=1.6.8",
Expand Down
8 changes: 5 additions & 3 deletions src/spikeinterface/comparison/basecomparison.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,9 +313,11 @@ class MixinTemplateComparison:
"""
Mixin for template comparisons to define:
* similarity method
* sparsity
* support
* num_shifts
"""

def __init__(self, similarity_method="cosine_similarity", sparsity_dict=None):
def __init__(self, similarity_method="cosine", support="union", num_shifts=0):
self.similarity_method = similarity_method
self.sparsity_dict = sparsity_dict
self.support = support
self.num_shifts = num_shifts
9 changes: 6 additions & 3 deletions src/spikeinterface/comparison/multicomparisons.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,8 +333,9 @@ def __init__(
match_score=0.8,
chance_score=0.3,
verbose=False,
similarity_method="cosine_similarity",
sparsity_dict=None,
similarity_method="cosine",
support="union",
num_shifts=0,
do_matching=True,
):
if name_list is None:
Expand All @@ -347,7 +348,9 @@ def __init__(
chance_score=chance_score,
verbose=verbose,
)
MixinTemplateComparison.__init__(self, similarity_method=similarity_method, sparsity_dict=sparsity_dict)
MixinTemplateComparison.__init__(
self, similarity_method=similarity_method, support=support, num_shifts=num_shifts
)

if do_matching:
self._compute_all()
Expand Down
Loading

0 comments on commit 4db85c7

Please sign in to comment.