Skip to content

Commit

Permalink
Adjusting ISeeCube to use the new FourierEncoder.
Browse files Browse the repository at this point in the history
  • Loading branch information
mobra7 committed Nov 6, 2024
1 parent a10e118 commit 547976b
Showing 1 changed file with 5 additions and 3 deletions.
8 changes: 5 additions & 3 deletions src/graphnet/models/transformer/iseecube.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def __init__(
max_rel_pos: int = 256,
num_register_tokens: int = 3,
scaled_emb: bool = False,
n_features: int = 6,
fourier_mapping: list = [0, 1, 2, 3, 4, 5],
):
"""Construct `ISeeCube`.
Expand All @@ -46,15 +46,17 @@ def __init__(
max_rel_pos: Maximum relative position for relative position bias.
num_register_tokens: The number of register tokens.
scaled_emb: Whether to scale the sinusoidal positional embeddings.
n_features: The number of features in the input data.
fourier_mapping: Mapping of the data to [x,y,z,time,charge,
auxiliary] for the FourierEncoder. Use None for missing
features.
"""
super().__init__(seq_length, hidden_dim)
self.fourier_ext = FourierEncoder(
seq_length=seq_length,
mlp_dim=mlp_dim,
output_dim=hidden_dim,
scaled=scaled_emb,
n_features=n_features,
mapping=fourier_mapping,
)
self.pos_embedding = nn.Parameter(
torch.empty(1, seq_length, hidden_dim).normal_(std=0.02),
Expand Down

0 comments on commit 547976b

Please sign in to comment.