Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add smoothness metric #72

Draft
wants to merge 8 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions nigsp/cli/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,18 @@ def _get_parser():
),
default=None,
)
opt_metrics.add_argument(
"-smooth",
"--smoothness",
dest="comp_metric",
action="append_const",
const="smoothness",
help=(
"Compute the signal smoothness "
"(see Shuman et al, 2013, IEEE Signal Proc. Mag.)"
),
default=None,
)

opt_proc = parser.add_argument_group("Optional Arguments for data processing")
opt_proc.add_argument(
Expand Down
7 changes: 7 additions & 0 deletions nigsp/objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def __init__(
surr_split=None,
sdi=None,
gsdi=None,
smoothness=None,
fc=None,
fc_split=None,
):
Expand Down Expand Up @@ -82,6 +83,7 @@ def __init__(
self.surr_split = deepcopy(surr_split)
self.sdi = deepcopy(sdi)
self.gsdi = deepcopy(gsdi)
self.smoothness = deepcopy(smoothness)
self.fc = deepcopy(fc)
self.fc_split = deepcopy(fc_split)

Expand Down Expand Up @@ -168,6 +170,11 @@ def compute_gsdi(self, mean=False, keys=None): # pragma: no cover
self.gsdi = operations.gsdi(self.ts_split, mean, keys)
return self

def compute_smoothness(self, signal): # pragma: no cover
"""Implement metrics.smoothness as class method."""
self.smoothness = operations.smoothness(self.lapl_mtx, signal)
return self

def create_surrogates(self, sc_type="informed", n_surr=1000, seed=None):
"""Implement surrogates.sc_informed and sc_uninformed as class method."""
sc_args = {"timeseries": self.timeseries, "n_surr": n_surr}
Expand Down
2 changes: 1 addition & 1 deletion nigsp/operations/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
# Import all operations.
from .graph import nodestrength, zerocross
from .laplacian import decomposition, symmetric_normalised_laplacian
from .metrics import functional_connectivity, gsdi, sdi
from .metrics import functional_connectivity, gsdi, sdi, smoothness
from .nifti import apply_atlas, apply_mask, mat_to_vol, unfold_atlas, unmask, vol_to_mat
from .surrogates import random_sign, sc_informed, sc_uninformed, test_significance
from .timeseries import (
Expand Down
53 changes: 52 additions & 1 deletion nigsp/operations/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

LGR = logging.getLogger(__name__)

SUPPORTED_METRICS = ["sdi", "dfc", "fc"]
SUPPORTED_METRICS = ["sdi", "dfc", "fc", "smoothness"]


@due.dcite(references.PRETI_2019)
Expand Down Expand Up @@ -246,6 +246,57 @@ def _fc(timeseries, mean=False):
return fc


@due.dcite(references.SHUMAN_2013)
def smoothness(laplacian, signal):
"""Compute the smoothness of a signal (as defined in [1]) over the graph corresponding to given Laplacian.

Parameters
----------
node_signal : numpy.ndarray
any signal defined with one value per node.
laplacian : numpy.ndarray
graph laplacian to use.

Returns
-------
smoothness : float
the smoothness of the signal.

References
----------
.. [1] D. I. Shuman, S. K. Narang, P. Frossard, A. Ortega and P. Vandergheynst,
"The emerging field of signal processing on graphs: Extending high-dimensional
data analysis to networks and other irregular domains," in IEEE Signal
Processing Magazine, vol. 30, no. 3, pp. 83-98, May 2013
"""
LGR.info("Compute signal smoothness.")

# Checking shape of signal
if signal.ndim == 1:
LGR.warning(
"2D array required, but signal is a vector. Adding empty dimension."
)
signal = np.expand_dims(signal, -1)
elif signal.ndim > 2:
raise ValueError("Signal should be a 2D array.")

# Checking if laplacian is square
if laplacian.shape[0] != laplacian.shape[1]:
raise ValueError("Laplacian should be a square matrix.")

# Checking that the dimensions of signal and laplacian are compatible
if signal.shape[0] != laplacian.shape[0]:
if signal.shape[1] == laplacian.shape[0]:
LGR.warning(
"It seems that the signal needs to be transposed to the correct shape."
)
signal = np.swapaxes(signal, 0, 1)
else:
raise ValueError("The dimensions of the signal and Laplacian don't match.")

return np.matmul(signal.T, np.matmul(laplacian, signal))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeaaaah... Do you remember when your function was one line? Now you understand the pain of the Sdev...

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes definitely, thanks for the guidance and regular feedback!



"""
Copyright 2022, Stefano Moia.

Expand Down
2 changes: 2 additions & 0 deletions nigsp/references.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""References to be imported and injected throughout the package."""
from nigsp.due import Doi

SHUMAN_2013 = Doi("10.1109/MSP.2012.2235192")

PRETI_2019 = Doi("10.1038/s41467-019-12765-7")

GRIFFA_2022 = Doi("10.1016/j.neuroimage.2022.118970")
Expand Down
47 changes: 46 additions & 1 deletion nigsp/tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import numpy as np
from numpy.random import rand
from pytest import raises
from pytest import raises, warns

from nigsp.operations import metrics
from nigsp.utils import prepare_ndim_iteration
Expand Down Expand Up @@ -72,6 +72,25 @@ def test_gsdi():
assert (gsdi_out["beta_over_alpha"] == gsdi_in).all()


def test_smoothness():
s1 = rand(10)
s2 = rand(10, 2)
s3 = rand(2, 10)
laplacian = rand(10, 10)

expected_smoothness1 = np.dot(s1.T, np.dot(laplacian, s1))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this correct? If so you don't need a warning and a special treatment, since it's the same as the main case!

Copy link
Collaborator Author

@hugofluhr hugofluhr Aug 24, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for s3 I need to transpose it by hand to compute the expected_smoothness otherwise the dot product won't work! See the difference between lines 81 and 84

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm talking about s1-s2

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ah yes that's correct sorry, I'm trying to make it work with a 3rd dimension for different subjects so I'll see how this evolves as I will probably move from np.matmul to np.tensordot

Copy link
Collaborator

@venkateshness venkateshness Aug 24, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure it'd fit in in memory, keep an eye on mem usage! You might have to run over subjects dim with a loop

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Tensordot might indeed not work out - Maybe you can constrain axes in the multiplication, or indeed loop through subjects. I have some code in another function (the graph fourier transform, if I remember correctly) that split cases based on dimensions (and runs a loop).

expected_smoothness2 = np.dot(s2.T, np.dot(laplacian, s2))
expected_smoothness3 = np.dot(s3, np.dot(laplacian, s3.T))

computed_smoothness1 = metrics.smoothness(laplacian, s1)
computed_smoothness2 = metrics.smoothness(laplacian, s2)
computed_smoothness3 = metrics.smoothness(laplacian, s3)

assert (expected_smoothness1 == computed_smoothness1).all()
assert (expected_smoothness2 == computed_smoothness2).all()
assert (expected_smoothness3 == computed_smoothness3).all()

Copy link
Collaborator

@smoia smoia Aug 22, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can add an assert here with signal = rand(10) and expected_smoothness computed on signal[..., np.newaxis]. And another one with signal = rand(2,10) and expected smoothness on signal.T.

Or use chatGPT 🤣


# ### Break tests
def test_break_sdi():
ts1 = np.arange(1, 3)[..., np.newaxis]
Expand Down Expand Up @@ -124,3 +143,29 @@ def test_functional_connectivity():

for k in fcd.keys():
assert (fcd[k] == np.corrcoef(tsd[k])).all()


def test_break_smoothness():
# shape of signal
signal = rand(3, 3, 3)
laplacian = rand(3, 3)

with raises(ValueError) as errorinfo:
metrics.smoothness(laplacian, signal)
assert "should be a 2D" in str(errorinfo.value)

# shape of laplacian
signal = rand(10, 2)
laplacian = rand(10, 9)

with raises(ValueError) as errorinfo:
metrics.smoothness(laplacian, signal)
assert "a square matrix" in str(errorinfo.value)

# shape mismatch
signal = rand(10, 2)
laplacian = rand(9, 9)

with raises(ValueError) as errorinfo:
metrics.smoothness(laplacian, signal)
assert "don't match" in str(errorinfo.value)