Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optimized Memristive Reservoir using CuPy #49

Open
wants to merge 38 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
0ab7620
Update reservoir.py
Ten-Sharp May 14, 2024
7174673
SNN, Lyapunov exponents, and debugging
fmilisav Sep 15, 2023
f8481fa
took out gym
fmilisav Sep 15, 2023
e576340
LE debugging
fmilisav Oct 18, 2023
f7ed89b
Working base conn2res
Ten-Sharp May 21, 2024
d78feb4
added test file just to have on laptop
Ten-Sharp May 21, 2024
1a9e479
Optimization using CuPy
Ten-Sharp May 22, 2024
913bf19
CuPy Implementation for Memristive Reservoir
Ten-Sharp May 23, 2024
e3a85dc
Merge branch 'master' of https://github.com/Ten-Sharp/conn2res
Ten-Sharp May 23, 2024
ae49e71
Finished some overhead simplifications for CuPy
Ten-Sharp May 23, 2024
de267fa
Finished Optimized MSSNetwork optimization
Ten-Sharp May 27, 2024
6abe154
tracing print statements
Ten-Sharp May 28, 2024
bc40bf2
added flag to allow for only internal states returned from simulate, …
Ten-Sharp May 28, 2024
6ce3518
Updated new Cupy reservoir classes documentation
Ten-Sharp May 29, 2024
ca09159
Updated example5 such that it is more general and clearer
Ten-Sharp May 29, 2024
9846079
small bug fix
Ten-Sharp May 29, 2024
2a0a3b3
Merge branch 'master' of https://github.com/Ten-Sharp/conn2res
Ten-Sharp May 29, 2024
9417399
fixed bugs created by merge (weird duplicate lines???)
Ten-Sharp May 29, 2024
1ac1c34
Found that Reservoir was copied into Performance at the merge??? Fixe…
Ten-Sharp May 30, 2024
0e3355e
More Bug fixes and trying to implement dG with normal dist not binomi…
Ten-Sharp Jun 4, 2024
471b510
reverted back to binomial since more accurate
Ten-Sharp Jun 4, 2024
8356d95
Fixed issue with getV which only returned positive voltages. Also imp…
Ten-Sharp Jun 10, 2024
ad6c636
test to add NLT task to Conn2Res tasks
Ten-Sharp Jun 11, 2024
e2369e6
cleaned up implementation of NLT into Conn2Res tasks (thanks filip fo…
Ten-Sharp Jun 11, 2024
904ee68
Fixed bugs with NLT task
Ten-Sharp Jun 11, 2024
d0274e7
Fixed tasks database, modified memristor structure so Nb is uniformly…
Ten-Sharp Jun 13, 2024
eca8485
Quick change to MMSNmultitask init
Ten-Sharp Jun 14, 2024
0426c5d
changes to reservoir
Ten-Sharp Jun 19, 2024
40c2a8e
testing changes to add sum to r2_score as multioutput
Ten-Sharp Jun 21, 2024
3097114
revert changes...
Ten-Sharp Jun 21, 2024
ef11d41
bug with updating code?
Ten-Sharp Jun 21, 2024
82f58ee
testing hwy code won't update
Ten-Sharp Jun 21, 2024
5cb19bd
nothing is updating
Ten-Sharp Jun 21, 2024
38dd2ea
Revert "nothing is updating"
Ten-Sharp Jun 24, 2024
942ec10
Added tracking of energy dissipated between time steps of MSSNetwork.…
Ten-Sharp Jun 26, 2024
0431db0
added energy dissipation calculation, made it possible to collect raw…
Ten-Sharp Jul 16, 2024
0b5f385
removed print statements
Ten-Sharp Jul 22, 2024
1909321
small changes and documentation
Ten-Sharp Aug 14, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions CuPy Install steps
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
Need CUDA installed through their website:
--- Lab computers run on Linux - x86_64 - Ubuntu - 20.04 - deb(local) *DOUBLE CHECK* ---

https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=20.04&target_type=deb_local

Make sure to install the drivers after finishing above steps with:

sudo apt-get install -y cuda-drivers

Note: if Public key is invalid for apt repository...:

RUN: sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub

Then try re-running: sudo apt-get update
This error will probably appear when apt-get update is run:


Then to install CuPy:
check cuda version with: nvidia-smi (should be in the top right)

then run either:
for cuda version 11.xx: pip install cupy-cuda11x
for cuda version 12.xx: pip install cupy-cuda12x

Note: Check whether you should be using pip or pip3 depending on your python environment
12 changes: 8 additions & 4 deletions conn2res/connectivity.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class Conn:
"""

def __init__(self, w=None, filename=None, subj_id=0, modules=None,
density=None):
density=None, subset=True):
"""
Constructor method for class Conn
"""
Expand All @@ -83,7 +83,6 @@ def __init__(self, w=None, filename=None, subj_id=0, modules=None,

# remove inf and nan
self.w[np.logical_or(np.isinf(self.w), np.isnan(self.w))] = 0

# make sure weights are float
self.w = self.w.astype(float)

Expand Down Expand Up @@ -115,7 +114,8 @@ def __init__(self, w=None, filename=None, subj_id=0, modules=None,
self.idx_node = np.full(self.n_nodes, True)

# make sure that all nodes are connected to the rest of the network
self.subset_nodes()
if (subset == True):
self.subset_nodes()

# assign modules
self.modules = modules
Expand Down Expand Up @@ -306,6 +306,10 @@ def get_nodes(self, node_set, nodes_from=None, nodes_without=None,
"""
Get a set of nodes from the connectivity matrix

Note: All the input arrays are arrays of the index of nodes we choose or don't choose from
ie, nodes_from = [0,3,4,5,8], we will only choose a subset of nodes from nodes
0,3,4,5,8 => [3,8]

Parameters
----------
node_set : str
Expand Down Expand Up @@ -454,7 +458,7 @@ def _update_attributes(self, idx_node):
boolean indexing should be used for nodes
"""

if isinstance(idx_node, np.ndarray) and idx_node.dtype == bool:
if isinstance(idx_node, np.ndarray) and idx_node.dtype == np.bool_:
# update node attributes
self.n_nodes = sum(idx_node)
self.idx_node[self.idx_node] = idx_node
Expand Down
132 changes: 132 additions & 0 deletions conn2res/datasetsConn2Res.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
import os

import numpy as np
from scipy import signal

def memory_capacity(n_trials=None, horizon_max=-20, win=30,
low=-1,high=1,input_gain=None,add_bias=False,
seed=None, **kwargs):
"""
Fetch data for MemoryCapacity, which is defined as a multi-output
task using a uniformly distributed input signal and multiple
delayed output signals

Parameters
----------
n_trials : int, optional
number of time steps in input and output, by default None
horizon_max : int, optional
maximum shift between input and output, i.e., negative number
for memory capacity task, by default -20
note that an array of horizons are generated from -1 to
inclusive of horizon_max using a step of -1, which
defines memory capacity task as a multi-output task, i.e., one
task per horizon
win : int, optional
initial window of the input signal to be used for generating the
delayed output signal, by default 30
note that horizon_max should exceed this window (in
absolute value), otherwise ValueError is thrown
low : float, optional
lower boundary of the output interval of numpy.uniform(),
by default -1
high : float, optional
upper boundary of the output interval of numpy.uniform(),
by default 1
input_gain : float, optional
gain on the input signal, i.e., scalar multiplier, by default None
add_bias : bool, optional
decides whether bias is added to the input signal or not,
by default False
seed : int, array_like[ints], SeedSequence, BitGenerator, Generator, optional
seed to initialize the random number generator, by default None
for details, see numpy.random.default_rng()

Returns
-------
x, y : numpy.ndarray, list
input (x) and output (y) training data

Raises
------
ValueError
if maximum horizon exceeds win (in absolute value)
"""

# generate horizon as a list inclusive of horizon_max
sign_ = np.sign(horizon_max)
horizon = np.arange(
sign_,
sign_ + horizon_max,
sign_,
)

# calculate absolute maximum horizon
abs_horizon_max = np.abs(horizon_max)
if win < abs_horizon_max:
raise ValueError("Absolute maximum horizon should be within window")

# use random number generator for reproducibility
rng = np.random.default_rng(seed=seed)

# generate input data
x = rng.uniform(low=low, high=high, size=(n_trials + win + abs_horizon_max + 1))[
:, np.newaxis
]

# output data
y = np.hstack([x[win + h : -abs_horizon_max + h - 1] for h in horizon])

#This extracts the portion of x that will be sliced off the front
z=x[:win]

# update input data
x = x[win : -abs_horizon_max - 1]

# reshape data if needed
if x.ndim == 1:
x = x[:, np.newaxis]
if y.ndim == 1:
y = y[:, np.newaxis]

# scale input data
if input_gain is not None:
x *= input_gain

# add bias to input data if needed
if add_bias:
x = np.hstack((np.ones((n_trials, 1)), x))

horizon_max = horizon_max
# _data = {'x': x, 'y': y}

return x, y, z


def non_linear_transformation(n_trials=500,n_cycles=10,waveform='square',input_gain=None,add_bias=False, **kwargs):
#calculates cycle duration
cycle_duration = n_trials/n_cycles
t = np.arange(n_trials)

#Generated time series of Sin wave of voltage
x = np.sin(2 * np.pi * t / cycle_duration)[:,np.newaxis]

#Creates 'transformed' signal as square of sawtooth signal
if waveform == 'sawtooth':
y = signal.sawtooth(2 * np.pi * t / cycle_duration)[:,np.newaxis]
return x,y,None
else:
y = signal.square(x)

#adds input gain if needed
if input_gain is not None:
x *= input_gain
y *= input_gain

#adds bias if needed
if add_bias:
x = np.hstack((np.ones((n_trials, 1)), x))

return x,y,None


4 changes: 4 additions & 0 deletions conn2res/performance.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,11 @@ def r2_score(
A floating point value or an array of floating
point values, one for each individual target.
"""

func = getattr(metrics, 'r2_score')
if(multioutput=='sum'):
results = func(y_true, y_pred, sample_weight=sample_weight, multioutput='raw_values')
return np.sum(results)
return func(y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput)


Expand Down
12 changes: 7 additions & 5 deletions conn2res/readout.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ def train(self, X, y, sample_weight=None):
)

# TODO: define sample_weight

# train model
self._model.fit(X=X, y=y, sample_weight=sample_weight)

Expand Down Expand Up @@ -182,10 +181,12 @@ def test(self, X, y, sample_weight=None, metric=None, **kwargs):

# predict values
y_pred = self._model.predict(X)

# estimate score
scores[m] = func(
y, y_pred, sample_weight=sample_weight, **kwargs)
result = func(y, y_pred, sample_weight=sample_weight, **kwargs)
# print("TYPE: ",type(result), result.shape)
# print(result)
result = [result]
scores[m] = result

return scores

Expand Down Expand Up @@ -258,6 +259,7 @@ def run_task(

# train and test model
if readout_nodes is None:

self.train(
x_train, y_train, sample_weight_train
)
Expand Down Expand Up @@ -861,4 +863,4 @@ def _get_sample_weight_old(inputs, labels=None, sample_block=None):
np.hstack([np.tile(1/e, e) for e in nc[np.argsort(ia)]]))


return sample_weight
return sample_weight
Loading