Skip to content

Commit

Permalink
Merge pull request #174 from macrocosm-os/SN9-95-sunset-7b-competition
Browse files Browse the repository at this point in the history
Added activation block to sunset 7b
  • Loading branch information
cryptal-mc authored Sep 11, 2024
2 parents 66345d6 + f082680 commit 9d4ab4a
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 81 deletions.
31 changes: 22 additions & 9 deletions constants/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
# ---------------------------------

# Release
__version__ = "4.3.0"
__version__ = "4.4.0"

# Validator schema version
__validator_version__ = "3.1.0"
Expand Down Expand Up @@ -284,6 +284,27 @@
),
],
),
(
3_849_722,
[
Competition(
CompetitionId.M772_MODEL,
MODEL_CONSTRAINTS_BY_COMPETITION_ID_LINEAR_DECAY[CompetitionId.M772_MODEL],
0.14,
),
Competition(
CompetitionId.B3_MODEL,
MODEL_CONSTRAINTS_BY_COMPETITION_ID_LINEAR_DECAY[CompetitionId.B3_MODEL],
0.29,
),
Competition(
CompetitionId.B14_MODEL,
MODEL_CONSTRAINTS_BY_COMPETITION_ID_LINEAR_DECAY[CompetitionId.B14_MODEL],
0.57,
),
],
),

]

for block_and_competitions in COMPETITION_SCHEDULE_BY_BLOCK:
Expand All @@ -308,18 +329,10 @@
# 0.01 gives ~96% to best model with only ~3 receiving any weights.
temperature = 0.01

# block to activate sample unpacking
sample_unpack_block = BLOCK_3B_7BSTAR_UNPACK

# validators number of pages to eval over miners on each step.
pages_per_eval_unpack = 5 # With sample unpacking
pages_per_eval_pack = 18

timestamp_epsilon_experiment_start_block = BLOCK_3B_7BSTAR_UNPACK
timestamp_epsilon_experiment_end_block = 3_750_683
timestamp_epsilon_experiment = 0.001
timestamp_epsilon_experiment_weight_percent = 0.123

# validator eval batch size.
batch_size = 1
# validator eval batch min to keep for next loop.
Expand Down
75 changes: 3 additions & 72 deletions neurons/validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -796,12 +796,9 @@ async def run_step(self):
competition.constraints, cache_dir=self.config.model_dir
)

if cur_block >= constants.sample_unpack_block:
pack_samples = False
pages_per_eval = constants.pages_per_eval_unpack
else:
pack_samples = True
pages_per_eval = constants.pages_per_eval_pack
pack_samples = False
pages_per_eval = constants.pages_per_eval_unpack


# If the option is set in the config, override
pages_per_eval = (
Expand Down Expand Up @@ -928,72 +925,6 @@ async def run_step(self):
)
step_weights = torch.softmax(model_weights / constants.temperature, dim=0)

# If we are running the epsilon experiment for competition 7B then also try the experiment epsilon.
if (
competition.id == CompetitionId.B7_MODEL
and cur_block >= constants.timestamp_epsilon_experiment_start_block
and cur_block < constants.timestamp_epsilon_experiment_end_block
):
epsilon_experiment_func = FixedEpsilon(0.001)
wins_epsilon_experiment, win_rate_epsilon_experiment = (
pt.validation.compute_wins(
uids,
losses_per_uid,
batches,
uid_to_block,
epsilon_experiment_func,
cur_block,
)
)

# Compute softmaxed weights based on win rate.
model_weights_epsilon_experiment = torch.tensor(
[win_rate_epsilon_experiment[uid] for uid in uids], dtype=torch.float32
)
step_weights_epsilon_experiment = torch.softmax(
model_weights_epsilon_experiment / constants.temperature, dim=0
)

# Overwrite step weights using a ratio between regular and experiment model weights.
# We do this after the original softmax and temperature division so we still get two distinct '1st places'.
regular_weight = 1 - constants.timestamp_epsilon_experiment_weight_percent
experiment_weight = constants.timestamp_epsilon_experiment_weight_percent
step_weights = (
step_weights * regular_weight
+ step_weights_epsilon_experiment * experiment_weight
)

# Since we have different win rates for this experimental competition, we need to log it separately.
# Update the uids to competition ids map to replace B7_MODEL with B7_MODEL_LOWER_EPSILON for logging.
# Note that mapping uids to competition ids uses raw ints from the metadata.
# Competition Names could be used with handling in the conversion and a larger table column.
uids_to_competition_ids_epsilon_experiment = {
k: (
CompetitionId.B7_MODEL_LOWER_EPSILON.value
if v == CompetitionId.B7_MODEL
else v
)
for k, v in self._get_uids_to_competition_ids().items()
}

bt.logging.info(
"Logging step for Epsilon Experiment. Weights are not final."
)
self.log_step(
CompetitionId.B7_MODEL_LOWER_EPSILON,
epsilon_experiment_func,
cur_block,
uids,
uid_to_state,
uids_to_competition_ids_epsilon_experiment,
pages,
model_weights_epsilon_experiment,
wins_epsilon_experiment,
win_rate_epsilon_experiment,
load_model_perf,
compute_loss_perf,
)

# Fill in metagraph sized tensor with the step weights of the evaluated models.
with self.metagraph_lock:
competition_weights = torch.zeros_like(self.metagraph.S)
Expand Down

0 comments on commit 9d4ab4a

Please sign in to comment.