diff --git a/.circleci/config.yml b/.circleci/config.yml index 41df74b..0734e8b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,7 +31,7 @@ jobs: # Download and cache dependencies - restore_cache: keys: - - py35-dependencies-{{ checksum "requirements.txt" }} + - py35-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "requirements-dev.txt"}} # fallback to using the latest cache if no exact match is found - py35-dependencies- @@ -40,13 +40,13 @@ jobs: command: | python3 -m venv venv . venv/bin/activate - pip install -r requirements.txt + pip install -r requirements-dev.txt pip install git+https://github.com/j-friedrich/OASIS.git - save_cache: paths: - ./venv - key: py35-dependencies-{{ checksum "requirements.txt" }} + key: py35-dependencies-{{ checksum "requirements.txt" }}-{{ checksum "requirements-dev.txt"}} # run tests! - run: @@ -86,7 +86,7 @@ jobs: command: | virtualenv venv . venv/bin/activate - pip install -r requirements.txt + pip install -r requirements-dev.txt pip install git+https://github.com/j-friedrich/OASIS.git - save_cache: diff --git a/.gitignore b/.gitignore index 1960928..3acd760 100644 --- a/.gitignore +++ b/.gitignore @@ -67,6 +67,8 @@ instance/ # Sphinx documentation docs/_build/ +docs/generated/ +docs/gallery/ # PyBuilder target/ @@ -105,4 +107,4 @@ ENV/ .mypy_cache/ # Visual Studio Code -.vscode/ \ No newline at end of file +.vscode/ diff --git a/AUTHORS.rst b/AUTHORS.rst index 5c5d86f..48c8298 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -5,9 +5,11 @@ Credits Development Lead ---------------- -* Justin Kiggens +* Justin Kiggins Contributors ------------ * Nicholas Cain +* Michael Oliver +* Sahar Manavi diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 5650a77..e0c70dc 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -79,7 +79,7 @@ Ready to contribute? Here's how to set up `neuroglia` for local development. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: $ flake8 neuroglia tests - $ python setup.py test or py.test + $ py.test $ tox To get flake8 and tox, just pip install them into your conda env. @@ -101,8 +101,8 @@ Before you submit a pull request, check that it meets these guidelines: 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. -3. The pull request should work for Python 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check - https://travis-ci.org/AllenInstitute/neuroglia/pull_requests +3. The pull request should work for Python 2.7 and 3.5. Check + https://circleci.com/gh/AllenInstitute/neuroglia and make sure that the tests pass for all supported Python versions. Tips diff --git a/HISTORY.rst b/HISTORY.rst deleted file mode 100644 index 1e1ab73..0000000 --- a/HISTORY.rst +++ /dev/null @@ -1,8 +0,0 @@ -======= -History -======= - -0.1.0 (2017-08-08) ------------------- - -* Integration into cookiecutter-pypackage template form diff --git a/README.rst b/README.rst index 5984efc..c9d877e 100644 --- a/README.rst +++ b/README.rst @@ -3,20 +3,21 @@ neuroglia ========= +more than just glue. scikit-learn compatible transformers for neural data science + .. image:: https://circleci.com/gh/AllenInstitute/neuroglia.svg?style=svg&circle-token=d0a164bbf19524a24f0d6bc42535aab9c89f8c13 :target: https://circleci.com/gh/AllenInstitute/neuroglia + What is neuroglia? ------------------ -scikit-learn compatible transformers for neurophysiology data - Things you can do: - make a PSTH -- extract events from a calcium signal -- synthesize a calcium signal - infer spikes from a calcium signal +- smooth a spiketrain +- chain these things together into scikit-learn pipelines Installation ------------ @@ -30,12 +31,17 @@ Requirements - scikit-learn - xarray -## Level of Support -We are planning on occasional updating this tool with no fixed schedule. Community involvement is encouraged through both issues and pull requests. Please make pull requests against the dev branch, as we will test changes there before merging into master. +Level of Support +------------ +We are planning on occasional updating this tool with no fixed schedule. Community involvement is encouraged through both issues and pull requests. + +License +------- -## License +TBD -## Authors +Authors +-------- .. include:: ../AUTHORS.rst diff --git a/docs/Makefile b/docs/Makefile index 9381d64..dda7c8c 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -54,6 +54,11 @@ html: @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." +html-noplot: + $(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo diff --git a/docs/api.rst b/docs/api.rst new file mode 100644 index 0000000..cd7ff05 --- /dev/null +++ b/docs/api.rst @@ -0,0 +1,56 @@ +.. _api_ref: + +.. currentmodule:: neuroglia + +API reference +============= + +.. _spike_api: + +Spike transformers +------------------ + +.. autosummary:: + :toctree: generated/ + + spike.Binner + spike.Smoother + nwb.SpikeTablizer + +.. _trace_api: + +Trace transformers +------------------ + +.. autosummary:: + :toctree: generated/ + + trace.Binarizer + trace.EdgeDetector + trace.WhenTrueFinder + calcium.OASISInferer + calcium.MedianFilterDetrend + calcium.SavGolFilterDetrend + calcium.EventRescale + + +.. _event_api: + +Event transformers +------------------ + +.. autosummary:: + :toctree: generated/ + + event.PeriEventSpikeSampler + event.PeriEventTraceSampler + +.. _tensor_api: + +Tensor transformers +------------------- + +.. autosummary:: + :toctree: generated/ + + tensor.ResponseReducer diff --git a/docs/authors.rst b/docs/authors.rst deleted file mode 100644 index e122f91..0000000 --- a/docs/authors.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../AUTHORS.rst diff --git a/docs/conf.py b/docs/conf.py index 9db544f..72fe4b8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -32,6 +32,7 @@ sys.path.insert(0, project_root) import neuroglia +import sphinx_bootstrap_theme # -- General configuration --------------------------------------------- @@ -40,7 +41,14 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.viewcode', + 'sphinx.ext.autosummary', + # 'numpydoc', + 'sphinx.ext.napoleon', + 'sphinx_gallery.gen_gallery', + ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -56,7 +64,7 @@ # General information about the project. project = u'neuroglia' -copyright = u"2017, Nicholas Cain" +copyright = u"2017, Allen Institute for Brain Science" # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout @@ -106,17 +114,50 @@ # documents. #keep_warnings = False +sphinx_gallery_conf = { + # path to your examples scripts + 'examples_dirs': '../examples', + # path where to save gallery generated examples + 'gallery_dirs': 'gallery', + # #directory where function granular galleries are stored + 'backreferences_dir': 'generated', + # + # # Modules for which function level galleries are created. In + # # this case sphinx_gallery and numpy in a tuple of strings. + # 'doc_module': ('neuroglia',), + 'download_section_examples': False, + 'default_thumb_file': '/local1/astrocytes-jsnyder.jpg', + 'min_reported_time': 10, + } + # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +# html_theme = 'alabaster' +html_theme = 'bootstrap' +html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} + +html_theme_options = { + 'source_link_position': "footer", + 'bootswatch_theme': "yeti", # https://bootswatch.com/ + 'navbar_sidebarrel': False, + 'bootstrap_version': "3", + 'navbar_links': [ + ("Introduction", "introduction"), + ("Examples",'examples'), + ("API", "api"), + ], + + } + + # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] @@ -153,7 +194,7 @@ #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. @@ -190,6 +231,7 @@ # Output file base name for HTML help builder. htmlhelp_basename = 'neurogliadoc' +autosummary_generate = True # -- Options for LaTeX output ------------------------------------------ @@ -210,7 +252,7 @@ latex_documents = [ ('index', 'neuroglia.tex', u'neuroglia Documentation', - u'Nicholas Cain', 'manual'), + u'Justin Kiggins', 'manual'), ] # The name of an image file (relative to this directory) to place at @@ -241,7 +283,7 @@ man_pages = [ ('index', 'neuroglia', u'neuroglia Documentation', - [u'Nicholas Cain'], 1) + [u'Justin Kiggins'], 1) ] # If true, show URL addresses after external links. @@ -256,7 +298,7 @@ texinfo_documents = [ ('index', 'neuroglia', u'neuroglia Documentation', - u'Nicholas Cain', + u'Justin Kiggins', 'neuroglia', 'One line description of project.', 'Miscellaneous'), diff --git a/docs/contributing.rst b/docs/contributing.rst deleted file mode 100644 index e582053..0000000 --- a/docs/contributing.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../CONTRIBUTING.rst diff --git a/docs/examples.rst b/docs/examples.rst new file mode 100644 index 0000000..091c316 --- /dev/null +++ b/docs/examples.rst @@ -0,0 +1,3 @@ +.. _examples: + +.. include:: gallery/index.rst diff --git a/docs/history.rst b/docs/history.rst deleted file mode 100644 index 2506499..0000000 --- a/docs/history.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../HISTORY.rst diff --git a/docs/index.rst b/docs/index.rst index 9a16ecd..e26f95b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,22 +1,56 @@ -Welcome to neuroglia's documentation! + + +neuroglia: more than just brain glue ====================================== -Contents: +.. raw:: html + +
+
+
+
+
+ +Neuroglia is a Python machine learning library for neurophysiology data. It +provide a scikit-learn compatible transformers for extracting features from +extracellular electrophysiology & optical physiology data for machine learning +pipelines. + +For a brief introduction to the ideas behind the package, you can read the +:ref:`introductory notes `. If you want to get started, see the +:ref:`installation page `, then check out the +:ref:`API reference ` to learn how to use the package. + +To see the code or report a bug, please visit the `github repository +`_. + +.. raw:: html + +
+
+

Documentation

.. toctree:: - :maxdepth: 2 - - readme - installation - usage - modules - contributing - authors - history - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` + :maxdepth: 1 + + introduction + installing + examples + api + +.. raw:: html + +
+
+

Features

+ +* Spike transformers: :ref:`API ` +* Trace transformers: :ref:`API ` +* Event transformers: :ref:`API ` +* Tensor transformers: :ref:`API ` + +.. raw:: html + +
+
+
diff --git a/docs/installation.rst b/docs/installing.rst similarity index 71% rename from docs/installation.rst rename to docs/installing.rst index 27f6551..e3446cc 100644 --- a/docs/installation.rst +++ b/docs/installing.rst @@ -1,3 +1,5 @@ +.. _installing: + .. highlight:: shell ============ @@ -14,7 +16,7 @@ To install neuroglia, run this command in your terminal: $ pip install neuroglia -This is the preferred method to install neuroglia, as it will always install the most recent stable release. +This is the preferred method to install neuroglia, as it will always install the most recent stable release. If you don't have `pip`_ installed, this `Python installation guide`_ can guide you through the process. @@ -32,13 +34,13 @@ You can either clone the public repository: .. code-block:: console - $ git clone git://github.com/nicain/neuroglia + $ git clone git://github.com/AllenInstitute/neuroglia Or download the `tarball`_: .. code-block:: console - $ curl -OL https://github.com/nicain/neuroglia/tarball/master + $ curl -OL https://github.com/AllenInstitute/neuroglia/tarball/master Once you have a copy of the source, you can install it with: @@ -47,5 +49,5 @@ Once you have a copy of the source, you can install it with: $ python setup.py install -.. _Github repo: https://github.com/nicain/neuroglia -.. _tarball: https://github.com/nicain/neuroglia/tarball/master +.. _Github repo: https://github.com/AllenInstitute/neuroglia +.. _tarball: https://github.com/AllenInstitute/neuroglia/tarball/master diff --git a/docs/introduction.rst b/docs/introduction.rst new file mode 100644 index 0000000..660e09f --- /dev/null +++ b/docs/introduction.rst @@ -0,0 +1,95 @@ +.. _introduction: + +Introduction +============ + +Neuroglia is a suite of scikit-learn transformers to facilitate converting between the canonical data structures used in ephys & ophys: + +- Spike times: a list of timestamps labelled with the neuron that elicited the spike +- Traces: a 2D array of neurons x time. Aka a “time series”. E.g. calcium traces from 2P. binned spike times. Gaussian smoothed spike times, etc. +- Tensor: a 3D array of traces aligned to events (events x neurons x time) + +scikit-learn transformers +------------------------- + +Transformations between these representations are implemented as scikit-learn transformers. This means that they all are defined as objects with “fit” and “transform” methods so that, for example, applying a Gaussian smoothing to a population of spiking data means transforming from a “spike times” structure to a “traces” structure like so: +:: + + smoother = neuroglia.spike.Smoother( + sample_times=np.arange(0,MAX_TIME,0.001), # <- this is the time base that the smoothed traces will be cast onto + kernel=’gaussian’, # <- this is the kernel that will be used + tau=0.005, # <- this is the width of the kernel in whatever time base the spike times are in + ) + + smoothed_traces = smoother.fit_transform(SPIKES) + +Conforming to the syntax that is expected by the scikit learn API turns these transformers into building blocks that can plug into a scikit learn pipeline. For example, let’s say you wanted to do some dimensionality reduction on the smoothed traces. +:: + + from sklearn.decomposition import NMF + + nmf = NMF(n_components=10) + reduced_traces = nmf.fit_transform(smoothed_traces) + +machine learning pipelines +-------------------------- + +You could also chain these together like so +:: + + from sklearn.pipeline import Pipeline + + pipeline = Pipeline([ + (‘smooth’,smoother), + (‘reduce’, nmf), + ]) + + reduced_traces = pipeline.fit_transform(SPIKES) + +And if you wanted to change an analysis step, it just becomes a matter of replacing that piece of the pipeline +:: + + from sklearn.decomposition import PCA + + pipeline = Pipeline([ + (‘smooth’,smoother), + (‘reduce’, PCA(n_components=10)), + ]) + + reduced_traces = pipeline.fit_transform(SPIKES) + + +event-aligned responses +----------------------- + +I’ve also implemented annotating events with event-aligned responses, so I can build an entire decoding pipeline that decodes the stimulus that was presented to a population from (for example) the peak response in any 10ms bin in a 250ms window after the stimulus onset: +:: + + from neuroglia.event import PeriEventSpikeSampler + from neuroglia.tensor import ResponseReducer + from sklearn.neighbors import KNeighborsClassifier + + pipeline = Pipeline([ + ('sample', PeriEventSpikeSampler( + spikes=SPIKES, + sample_times=np.arange(0.0,0.25,0.01), + tracizer=Binner, + )), + ('reduce', ResponseReducer(method='max')), + ('classify', KNeighborsClassifier()), + ]) + +cross validation of an entire pipeline +-------------------------------------- + +Then, once this pipeline has defined, we can take advantage of scikit-learn infrastructure for cross validation to do a 4-fold cross validation across stimulus presentations +:: + + from sklearn.model_selection import cross_val_score + + X = EVENTS[‘times’] + y = EVENTS[‘image_id’] + + scores = cross_val_score(pipeline, X, y, cv=4) + +These examples illustrate the major features of the package & how the API works. diff --git a/docs/readme.rst b/docs/readme.rst deleted file mode 100644 index 72a3355..0000000 --- a/docs/readme.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../README.rst diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..f239316 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,5 @@ +numpy +scipy +pandas +xarray +scikit-learn diff --git a/docs/usage.rst b/docs/usage.rst deleted file mode 100644 index bb69e61..0000000 --- a/docs/usage.rst +++ /dev/null @@ -1,7 +0,0 @@ -===== -Usage -===== - -To use neuroglia in a project:: - - import neuroglia diff --git a/examples/README.txt b/examples/README.txt new file mode 100644 index 0000000..bac945d --- /dev/null +++ b/examples/README.txt @@ -0,0 +1,2 @@ +Examples +======== diff --git a/examples/plot_neuropixel.py b/examples/plot_neuropixel.py new file mode 100644 index 0000000..29cd9b7 --- /dev/null +++ b/examples/plot_neuropixel.py @@ -0,0 +1,80 @@ +""" +natural scene decoding from V1 +============================== + +This is an example of how to decode natural images from spikes recorded in V1 + +""" + +from __future__ import print_function + +#################################### +# first, we need to load the data + +data_path = '/allen/aibs/mat/RamIyer/frm_Dan/NWBFilesSev/V1_NI_pkl_data/' + +import pandas as pd +ephys_data = pd.read_pickle(data_path+'M15_ni_data.pkl') + +#################################### +# Let's get the dataframe of image presentations and rename the columns + +events = ephys_data['stim_table'].rename( + columns={ + 'Start':'time', + 'Frame':'image_id', + }, +) +print(events.head()) + +#################################### +# Next, let's reformat the spike times into a single table + + +from neuroglia.nwb import SpikeTablizer +spikes = SpikeTablizer().fit_transform(ephys_data['spiketimes']).reset_index() +print(spikes.head()) + +#################################### +# Now, we'll sample spikes near each event & build this into a xarray 3D tensor + +from neuroglia.event import PeriEventSpikeSampler +from neuroglia.spike import Binner +import numpy as np +spike_sampler = PeriEventSpikeSampler( + spikes=spikes, + sample_times=np.arange(0.1,0.35,0.01), + tracizer=Binner, +) +tensor = spike_sampler.fit_transform(events) +print(tensor) + +#################################### +# We can get the average elicited spike count with the `ResponseReducer` + +from neuroglia.tensor import ResponseReducer +reducer = ResponseReducer(method='mean') +means = reducer.fit_transform(tensor) +print(means) + +#################################### +# Let's use the scikit-learn pipeline to chain these steps into a single +# decoding pipeline + +from sklearn.pipeline import Pipeline +from sklearn.neighbors import KNeighborsClassifier + +pipeline = Pipeline([ + ('spike_sampler',PeriEventSpikeSampler(spikes=spikes,sample_times=np.arange(0.1,0.35,0.01),tracizer=Binner)), + ('extract', ResponseReducer(method=np.mean)), + ('classify', KNeighborsClassifier()), +]) + +#################################### +# Finally, we can do a 3-fold cross validation on the entire pipeline with +# `cross_val_score` +# :: +# from sklearn.model_selection import cross_val_score +# scores = cross_val_score(pipeline, spikes, events['image_id'], cv=3) +# n_images = len(events['image_id'].unique()) +# print(scores*n_images) diff --git a/neuroglia/spike.py b/neuroglia/spike.py index 164e6b1..542ee93 100644 --- a/neuroglia/spike.py +++ b/neuroglia/spike.py @@ -13,11 +13,55 @@ def get_neuron(neuron_spikes): class Binner(BaseEstimator,TransformerMixin): """Bin a population of spike events into an array of spike counts. + This transformer converts a table of spike times into a series of spike + counts. Spikes are binned according to the spike_times argument. + + Parameters + ---------- + + sample_times : array-like + The samples times that will be used to bin spikes. + + Attributes + ---------- + + + Examples + -------- + + >>> import numpy as np + >>> import pandas as pd + >>> from neuroglia.spike import Binner + >>> binner = Binner(np.arange(0,1.0,0.001)) + >>> spikes = pd.DataFrame({'times':np.random.rand}) + >>> X = binner.fit_transform(spikes) + + See also + -------- + + neuroglia.spike.Smoother + neuroglia.nwb.SpikeTablizer + """ def __init__(self,sample_times): self.sample_times = sample_times def fit(self, X, y=None): + """ Do nothing an return the estimator unchanged. + + This method is just there to implement the usual API and hence work in pipelines. + + Parameters + ---------- + + X : pandas DataFrame with columns ['time','neuron'] + y : (ignored) + + Returns + ------- + + self + """ return self def __make_trace(self,neuron_spikes): @@ -30,6 +74,20 @@ def __make_trace(self,neuron_spikes): return pd.Series(data=trace,index=self.sample_times[:-1],name=neuron) def transform(self, X): + """ Bin each neuron's spikes into a trace of spike counts. + + Parameters + ---------- + X : pandas DataFrame with columns ['time','neuron'] + spike times that will be binned + y : (ignored) + + Returns + ------- + Xt : pandas DataFrame of spike counts + Columns are neuron labels and the index is the left edge of the + sample times. + """ traces = X.groupby('neuron').apply(self.__make_trace).T return traces @@ -42,7 +100,38 @@ def transform(self, X): DEFAULT_TAU = 0.005 class Smoother(BaseEstimator,TransformerMixin): - """docstring for Smoother.""" + """Smooth a population of spike events into an array. + + This transformer converts a table of spike times into a trace of smoothed + spike values. Spikes are binned according to the spike_times argument. + + Parameters + ---------- + + sample_times : array-like + The samples times that will be used to bin spikes. + + Attributes + ---------- + + + Examples + -------- + + >>> import numpy as np + >>> import pandas as pd + >>> from neuroglia.spike import Binner + >>> binner = Binner(np.arange(0,1.0,0.001)) + >>> spikes = pd.DataFrame({'times':np.random.rand}) + >>> X = binner.fit_transform(spikes) + + See also + -------- + + neuroglia.spike.Smoother + neuroglia.nwb.SpikeTablizer + + """ def __init__(self,sample_times,kernel='gaussian',tau=DEFAULT_TAU): self.sample_times = sample_times @@ -51,6 +140,21 @@ def __init__(self,sample_times,kernel='gaussian',tau=DEFAULT_TAU): self.tau = tau def fit(self, X, y=None): + """ Do nothing an return the estimator unchanged. + + This method is just there to implement the usual API and hence work in pipelines. + + Parameters + ---------- + + X : pandas DataFrame with columns ['time','neuron'] + y : (ignored) + + Returns + ------- + + self + """ return self def __make_trace(self,neuron_spikes): diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..2cf30d2 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,4 @@ +-r requirements.txt +cython +pytest +pytest-cov diff --git a/requirements-doc.txt b/requirements-doc.txt new file mode 100644 index 0000000..d923270 --- /dev/null +++ b/requirements-doc.txt @@ -0,0 +1,5 @@ +-r requirements.txt +sphinx +sphinx-gallery +sphinx_bootstrap_theme +seaborn diff --git a/requirements.txt b/requirements.txt index 24c84bd..f239316 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,5 @@ numpy +scipy pandas xarray -pytest -pytest-cov -scipy scikit-learn -cython