Skip to content

Commit

Permalink
Add complex hypervector types (#81)
Browse files Browse the repository at this point in the history
* WIP implement complex hypervectors

* Implement complex level and circular hypervector sets

* Add complex tests

* Simplify plotting utility

* Use unbind function

* Add unbind to docs
  • Loading branch information
mikeheddes authored Jun 7, 2022
1 parent d8b3660 commit 1f7e9db
Show file tree
Hide file tree
Showing 12 changed files with 345 additions and 207 deletions.
1 change: 1 addition & 0 deletions docs/functional.rst
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ Operations
:template: function.rst

bind
unbind
bundle
permute
cleanup
Expand Down
2 changes: 2 additions & 0 deletions torchhd/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
level_hv,
circular_hv,
bind,
unbind,
bundle,
permute,
)
Expand All @@ -27,6 +28,7 @@
"level_hv",
"circular_hv",
"bind",
"unbind",
"bundle",
"permute",
]
310 changes: 238 additions & 72 deletions torchhd/functional.py

Large diffs are not rendered by default.

28 changes: 14 additions & 14 deletions torchhd/structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,10 +212,10 @@ def contains(self, input: Tensor) -> Tensor:
Examples::
>>> M.contains(letters_hv[0])
tensor([0.4575])
tensor(0.4575)
"""
return functional.cosine_similarity(input, self.value.unsqueeze(0))
return functional.cosine_similarity(input, self.value)

def __len__(self) -> int:
"""Returns the size of the multiset.
Expand Down Expand Up @@ -363,7 +363,7 @@ def get(self, key: Tensor) -> Tensor:
tensor([ 1., -1., 1., ..., -1., 1., -1.])
"""
return functional.bind(self.value, key)
return functional.unbind(self.value, key)

def replace(self, key: Tensor, old: Tensor, new: Tensor) -> None:
"""Replace the value from key-value pair in the hash table.
Expand Down Expand Up @@ -711,7 +711,7 @@ def pop(self, input: Tensor) -> None:
"""
self.size -= 1
self.value = functional.bind(self.value, input)
self.value = functional.unbind(self.value, input)
self.value = functional.permute(self.value, shifts=-1)

def popleft(self, input: Tensor) -> None:
Expand All @@ -727,7 +727,7 @@ def popleft(self, input: Tensor) -> None:
"""
self.size -= 1
rotated_input = functional.permute(input, shifts=len(self))
self.value = functional.bind(self.value, rotated_input)
self.value = functional.unbind(self.value, rotated_input)

def replace(self, index: int, old: Tensor, new: Tensor) -> None:
"""Replace the old hypervector value from the given index, for the new hypervector value.
Expand All @@ -744,7 +744,7 @@ def replace(self, index: int, old: Tensor, new: Tensor) -> None:
"""
rotated_old = functional.permute(old, shifts=self.size - index - 1)
self.value = functional.bind(self.value, rotated_old)
self.value = functional.unbind(self.value, rotated_old)

rotated_new = functional.permute(new, shifts=self.size - index - 1)
self.value = functional.bind(self.value, rotated_new)
Expand Down Expand Up @@ -880,13 +880,13 @@ def node_neighbors(self, input: Tensor, outgoing=True) -> Tensor:
"""
if self.is_directed:
if outgoing:
permuted_neighbors = functional.bind(self.value, input)
permuted_neighbors = functional.unbind(self.value, input)
return functional.permute(permuted_neighbors, shifts=-1)
else:
permuted_node = functional.permute(input, shifts=1)
return functional.bind(self.value, permuted_node)
return functional.unbind(self.value, permuted_node)
else:
return functional.bind(self.value, input)
return functional.unbind(self.value, input)

def contains(self, input: Tensor) -> Tensor:
"""Returns the cosine similarity of the input vector against the graph.
Expand All @@ -898,9 +898,9 @@ def contains(self, input: Tensor) -> Tensor:
>>> e = G.encode_edge(letters_hv[0], letters_hv[1])
>>> G.contains(e)
tensor([1.])
tensor(1.)
"""
return functional.cosine_similarity(input, self.value.unsqueeze(0))
return functional.cosine_similarity(input, self.value)

def clear(self) -> None:
"""Empties the graph.
Expand Down Expand Up @@ -1012,7 +1012,7 @@ def get_leaf(self, path: List[str]) -> Tensor:
hv_path, functional.permute(self.right, shifts=idx)
)

return functional.bind(hv_path, self.value)
return functional.unbind(self.value, hv_path)

def clear(self) -> None:
"""Empties the tree.
Expand Down Expand Up @@ -1084,8 +1084,8 @@ def transition(self, state: Tensor, action: Tensor) -> Tensor:
tensor([ 1., 1., -1., ..., -1., -1., 1.])
"""
next_state = functional.bind(self.value, state)
next_state = functional.bind(next_state, action)
next_state = functional.unbind(self.value, state)
next_state = functional.unbind(next_state, action)
return functional.permute(next_state, shifts=-1)

def clear(self) -> None:
Expand Down
45 changes: 30 additions & 15 deletions torchhd/tests/basis_hv/test_circular_hv.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,30 +47,51 @@ def test_value(self, dtype):
assert torch.all(
(hv == True) | (hv == False)
).item(), "values are either 1 or 0"
elif dtype in torch_complex_dtypes:
magnitudes= hv.abs()
assert torch.allclose(magnitudes, torch.tensor(1.0, dtype=magnitudes.dtype)), "magnitude must be 1"
else:
assert torch.all(
(hv == -1) | (hv == 1)
).item(), "values are either -1 or +1"


hv = functional.circular_hv(8, 1000000, generator=generator, dtype=dtype)
sims = functional.hamming_similarity(hv[0], hv).float() / 1000000
sims_diff = sims[:-1] - sims[1:]
if dtype in torch_complex_dtypes:
sims = functional.cosine_similarity(hv[0], hv)
sims_diff = sims[:-1] - sims[1:]

assert torch.all(
sims_diff.sign() == torch.tensor([1, 1, 1, 1, -1, -1, -1])
), "second half must get more similar"
assert torch.all(
sims_diff.sign() == torch.tensor([1, 1, 1, 1, -1, -1, -1])
), "second half must get more similar"

abs_sims_diff = sims_diff.abs()
assert torch.all(
(0.124 < abs_sims_diff) & (abs_sims_diff < 0.126)
).item(), "similarity changes linearly"
abs_sims_diff = sims_diff.abs()
assert torch.all(
(0.248 < abs_sims_diff) & (abs_sims_diff < 0.252)
).item(), "similarity changes linearly"
else:
sims = functional.hamming_similarity(hv[0], hv).float() / 1000000
sims_diff = sims[:-1] - sims[1:]

assert torch.all(
sims_diff.sign() == torch.tensor([1, 1, 1, 1, -1, -1, -1])
), "second half must get more similar"

abs_sims_diff = sims_diff.abs()
assert torch.all(
(0.124 < abs_sims_diff) & (abs_sims_diff < 0.126)
).item(), "similarity changes linearly"

@pytest.mark.parametrize("sparsity", [0.0, 0.1, 0.756, 1.0])
@pytest.mark.parametrize("dtype", torch_dtypes)
def test_sparsity(self, sparsity, dtype):
if not supported_dtype(dtype):
return

if dtype in torch_complex_dtypes:
# Complex hypervectors don't support sparsity.
return

generator = torch.Generator()
generator.manual_seed(seed)

Expand All @@ -96,12 +117,6 @@ def test_device(self, dtype):

@pytest.mark.parametrize("dtype", torch_dtypes)
def test_dtype(self, dtype):
if dtype in torch_complex_dtypes:
with pytest.raises(NotImplementedError):
functional.circular_hv(3, 26, dtype=dtype)

return

if dtype == torch.uint8:
with pytest.raises(ValueError):
functional.circular_hv(3, 26, dtype=dtype)
Expand Down
6 changes: 0 additions & 6 deletions torchhd/tests/basis_hv/test_identity_hv.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,6 @@ def test_device(self, dtype):

@pytest.mark.parametrize("dtype", torch_dtypes)
def test_dtype(self, dtype):
if dtype in torch_complex_dtypes:
with pytest.raises(NotImplementedError):
functional.identity_hv(3, 26, dtype=dtype)

return

if dtype == torch.uint8:
with pytest.raises(ValueError):
functional.identity_hv(3, 26, dtype=dtype)
Expand Down
43 changes: 28 additions & 15 deletions torchhd/tests/basis_hv/test_level_hv.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,29 +47,48 @@ def test_value(self, dtype):
assert torch.all(
(hv == True) | (hv == False)
).item(), "values are either 1 or 0"
elif dtype in torch_complex_dtypes:
magnitudes= hv.abs()
assert torch.allclose(magnitudes, torch.tensor(1.0, dtype=magnitudes.dtype)), "magnitude must be 1"
else:
assert torch.all(
(hv == -1) | (hv == 1)
).item(), "values are either -1 or +1"

# look at the similarity profile w.r.t. the first hypervector
sims = functional.hamming_similarity(hv[0], hv).float() / 10000
sims_diff = sims[:-1] - sims[1:]
assert torch.all(sims_diff > 0).item(), "similarity must be decreasing"
if dtype in torch_complex_dtypes:
sims = functional.cosine_similarity(hv[0], hv)
sims_diff = sims[:-1] - sims[1:]
assert torch.all(sims_diff > 0).item(), "similarity must be decreasing"

hv = functional.level_hv(5, 1000000, generator=generator, dtype=dtype)
sims = functional.hamming_similarity(hv[0], hv).float() / 1000000
sims_diff = sims[:-1] - sims[1:]
assert torch.all(
(0.124 < sims_diff) & (sims_diff < 0.126)
).item(), "similarity decreases linearly"
hv = functional.level_hv(5, 1000000, generator=generator, dtype=dtype)
sims = functional.cosine_similarity(hv[0], hv)
sims_diff = sims[:-1] - sims[1:]
assert torch.all(
(0.248 < sims_diff) & (sims_diff < 0.252)
).item(), "similarity decreases linearly"
else:
sims = functional.hamming_similarity(hv[0], hv).float() / 10000
sims_diff = sims[:-1] - sims[1:]
assert torch.all(sims_diff > 0).item(), "similarity must be decreasing"

hv = functional.level_hv(5, 1000000, generator=generator, dtype=dtype)
sims = functional.hamming_similarity(hv[0], hv).float() / 1000000
sims_diff = sims[:-1] - sims[1:]
assert torch.all(
(0.124 < sims_diff) & (sims_diff < 0.126)
).item(), "similarity decreases linearly"

@pytest.mark.parametrize("sparsity", [0.0, 0.1, 0.756, 1.0])
@pytest.mark.parametrize("dtype", torch_dtypes)
def test_sparsity(self, sparsity, dtype):
if not supported_dtype(dtype):
return

if dtype in torch_complex_dtypes:
# Complex hypervectors don't support sparsity.
return

generator = torch.Generator()
generator.manual_seed(seed)

Expand All @@ -95,12 +114,6 @@ def test_device(self, dtype):

@pytest.mark.parametrize("dtype", torch_dtypes)
def test_dtype(self, dtype):
if dtype in torch_complex_dtypes:
with pytest.raises(NotImplementedError):
functional.level_hv(3, 26, dtype=dtype)

return

if dtype == torch.uint8:
with pytest.raises(ValueError):
functional.level_hv(3, 26, dtype=dtype)
Expand Down
49 changes: 29 additions & 20 deletions torchhd/tests/basis_hv/test_random_hv.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,21 +42,26 @@ def test_value(self, dtype):
generator = torch.Generator()
generator.manual_seed(seed)

hv = functional.random_hv(100, 10000, dtype=dtype, generator=generator)

if dtype == torch.bool:
hv = functional.random_hv(100, 10000, dtype=dtype, generator=generator)
assert torch.all((hv == False) | (hv == True)).item()

return

hv = functional.random_hv(100, 10000, dtype=dtype, generator=generator)
assert torch.all((hv == -1) | (hv == 1)).item()
elif dtype in torch_complex_dtypes:
magnitudes= hv.abs()
assert torch.allclose(magnitudes, torch.tensor(1.0, dtype=magnitudes.dtype)), "magnitude must be 1"
else:
assert torch.all((hv == -1) | (hv == 1)).item()

@pytest.mark.parametrize("sparsity", [0.0, 0.1, 0.756, 1.0])
@pytest.mark.parametrize("dtype", torch_dtypes)
def test_sparsity(self, sparsity, dtype):
if not supported_dtype(dtype):
return

if dtype in torch_complex_dtypes:
# Complex hypervectors don't support sparsity.
return

generator = torch.Generator()
generator.manual_seed(seed)

Expand All @@ -83,14 +88,24 @@ def test_orthogonality(self, dtype):
generator = torch.Generator()
generator.manual_seed(seed)

sims = [None] * 100
for i in range(100):
hv = functional.random_hv(2, 10000, dtype=dtype, generator=generator)
sims[i] = functional.hamming_similarity(hv[0], hv[1].unsqueeze(0))

sims = torch.cat(sims).float() / 10000
assert within(sims.mean().item(), 0.5, 0.001)
assert sims.std().item() < 0.01
if dtype in torch_complex_dtypes:
sims = [None] * 100
for i in range(100):
hv = functional.random_hv(2, 10000, dtype=dtype, generator=generator)
sims[i] = functional.cosine_similarity(hv[0], hv[1])

sims = torch.stack(sims).float() / 10000
assert within(sims.mean().item(), 0.0, 0.001)
assert sims.std().item() < 0.01
else:
sims = [None] * 100
for i in range(100):
hv = functional.random_hv(2, 10000, dtype=dtype, generator=generator)
sims[i] = functional.hamming_similarity(hv[0], hv[1].unsqueeze(0))

sims = torch.stack(sims).float() / 10000
assert within(sims.mean().item(), 0.5, 0.001)
assert sims.std().item() < 0.01

@pytest.mark.parametrize("dtype", torch_dtypes)
def test_device(self, dtype):
Expand All @@ -103,12 +118,6 @@ def test_device(self, dtype):

@pytest.mark.parametrize("dtype", torch_dtypes)
def test_dtype(self, dtype):
if dtype in torch_complex_dtypes:
with pytest.raises(NotImplementedError):
functional.random_hv(3, 26, dtype=dtype)

return

if dtype == torch.uint8:
with pytest.raises(ValueError):
functional.random_hv(3, 26, dtype=dtype)
Expand Down
Loading

0 comments on commit 1f7e9db

Please sign in to comment.